1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #define SWSMU_CODE_LAYER_L4
24
25 #include "amdgpu.h"
26 #include "amdgpu_smu.h"
27 #include "smu_cmn.h"
28 #include "soc15_common.h"
29
30 /*
31 * DO NOT use these for err/warn/info/debug messages.
32 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
33 * They are more MGPU friendly.
34 */
35 #undef pr_err
36 #undef pr_warn
37 #undef pr_info
38 #undef pr_debug
39
40 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
41
42 const int link_speed[] = {25, 50, 80, 160, 320, 640};
43
44 #undef __SMU_DUMMY_MAP
45 #define __SMU_DUMMY_MAP(type) #type
46 static const char * const __smu_message_names[] = {
47 SMU_MESSAGE_TYPES
48 };
49
50 #define smu_cmn_call_asic_func(intf, smu, args...) \
51 ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \
52 (smu)->ppt_funcs->intf(smu, ##args) : \
53 -ENOTSUPP) : \
54 -EINVAL)
55
56 #define SMU_MSG_V1_DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
57 #define SMU_MSG_V1_DEFAULT_RATELIMIT_BURST 10
58
smu_get_message_name(struct smu_context * smu,enum smu_message_type type)59 static const char *smu_get_message_name(struct smu_context *smu,
60 enum smu_message_type type)
61 {
62 if (type >= SMU_MSG_MAX_COUNT)
63 return "unknown smu message";
64
65 return __smu_message_names[type];
66 }
67
68 /* Redefine the SMU error codes here.
69 *
70 * Note that these definitions are redundant and should be removed
71 * when the SMU has exported a unified header file containing these
72 * macros, which header file we can just include and use the SMU's
73 * macros. At the moment, these error codes are defined by the SMU
74 * per-ASIC unfortunately, yet we're a one driver for all ASICs.
75 */
76 #define SMU_RESP_NONE 0
77 #define SMU_RESP_OK 1
78 #define SMU_RESP_CMD_FAIL 0xFF
79 #define SMU_RESP_CMD_UNKNOWN 0xFE
80 #define SMU_RESP_CMD_BAD_PREREQ 0xFD
81 #define SMU_RESP_BUSY_OTHER 0xFC
82 #define SMU_RESP_DEBUG_END 0xFB
83
84 #define SMU_RESP_UNEXP (~0U)
85
smu_msg_v1_send_debug_msg(struct smu_msg_ctl * ctl,u32 msg,u32 param)86 static int smu_msg_v1_send_debug_msg(struct smu_msg_ctl *ctl, u32 msg, u32 param)
87 {
88 struct amdgpu_device *adev = ctl->smu->adev;
89 struct smu_msg_config *cfg = &ctl->config;
90
91 if (!(ctl->flags & SMU_MSG_CTL_DEBUG_MAILBOX))
92 return -EOPNOTSUPP;
93
94 mutex_lock(&ctl->lock);
95
96 WREG32(cfg->debug_param_reg, param);
97 WREG32(cfg->debug_msg_reg, msg);
98 WREG32(cfg->debug_resp_reg, 0);
99
100 mutex_unlock(&ctl->lock);
101
102 return 0;
103 }
104
__smu_cmn_send_debug_msg(struct smu_msg_ctl * ctl,u32 msg,u32 param)105 static int __smu_cmn_send_debug_msg(struct smu_msg_ctl *ctl,
106 u32 msg,
107 u32 param)
108 {
109 if (!ctl->ops || !ctl->ops->send_debug_msg)
110 return -EOPNOTSUPP;
111
112 return ctl->ops->send_debug_msg(ctl, msg, param);
113 }
114
115 /**
116 * smu_cmn_wait_for_response -- wait for response from the SMU
117 * @smu: pointer to an SMU context
118 *
119 * Wait for status from the SMU.
120 *
121 * Return 0 on success, -errno on error, indicating the execution
122 * status and result of the message being waited for. See
123 * smu_msg_v1_decode_response() for details of the -errno.
124 */
smu_cmn_wait_for_response(struct smu_context * smu)125 int smu_cmn_wait_for_response(struct smu_context *smu)
126 {
127 return smu_msg_wait_response(&smu->msg_ctl, 0);
128 }
129
130 /**
131 * smu_cmn_send_smc_msg_with_param -- send a message with parameter
132 * @smu: pointer to an SMU context
133 * @msg: message to send
134 * @param: parameter to send to the SMU
135 * @read_arg: pointer to u32 to return a value from the SMU back
136 * to the caller
137 *
138 * Send the message @msg with parameter @param to the SMU, wait for
139 * completion of the command, and return back a value from the SMU in
140 * @read_arg pointer.
141 *
142 * Return 0 on success, -errno when a problem is encountered sending
143 * message or receiving reply. If there is a PCI bus recovery or
144 * the destination is a virtual GPU which does not allow this message
145 * type, the message is simply dropped and success is also returned.
146 * See smu_msg_v1_decode_response() for details of the -errno.
147 *
148 * If we weren't able to send the message to the SMU, we also print
149 * the error to the standard log.
150 *
151 * Command completion status is printed only if the -errno is
152 * -EREMOTEIO, indicating that the SMU returned back an
153 * undefined/unknown/unspecified result. All other cases are
154 * well-defined, not printed, but instead given back to the client to
155 * decide what further to do.
156 *
157 * The return value, @read_arg is read back regardless, to give back
158 * more information to the client, which on error would most likely be
159 * @param, but we can't assume that. This also eliminates more
160 * conditionals.
161 */
smu_cmn_send_smc_msg_with_param(struct smu_context * smu,enum smu_message_type msg,uint32_t param,uint32_t * read_arg)162 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
163 enum smu_message_type msg,
164 uint32_t param,
165 uint32_t *read_arg)
166 {
167 struct smu_msg_ctl *ctl = &smu->msg_ctl;
168 struct smu_msg_args args = {
169 .msg = msg,
170 .args[0] = param,
171 .num_args = 1,
172 .num_out_args = read_arg ? 1 : 0,
173 .flags = 0,
174 .timeout = 0,
175 };
176 int ret;
177
178 ret = ctl->ops->send_msg(ctl, &args);
179
180 if (read_arg)
181 *read_arg = args.out_args[0];
182
183 return ret;
184 }
185
smu_cmn_send_smc_msg(struct smu_context * smu,enum smu_message_type msg,uint32_t * read_arg)186 int smu_cmn_send_smc_msg(struct smu_context *smu,
187 enum smu_message_type msg,
188 uint32_t *read_arg)
189 {
190 return smu_cmn_send_smc_msg_with_param(smu,
191 msg,
192 0,
193 read_arg);
194 }
195
smu_cmn_send_debug_smc_msg(struct smu_context * smu,uint32_t msg)196 int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
197 uint32_t msg)
198 {
199 return __smu_cmn_send_debug_msg(&smu->msg_ctl, msg, 0);
200 }
201
smu_cmn_send_debug_smc_msg_with_param(struct smu_context * smu,uint32_t msg,uint32_t param)202 int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
203 uint32_t msg, uint32_t param)
204 {
205 return __smu_cmn_send_debug_msg(&smu->msg_ctl, msg, param);
206 }
207
smu_msg_v1_decode_response(u32 resp)208 static int smu_msg_v1_decode_response(u32 resp)
209 {
210 int res;
211
212 switch (resp) {
213 case SMU_RESP_NONE:
214 /* The SMU is busy--still executing your command.
215 */
216 res = -ETIME;
217 break;
218 case SMU_RESP_OK:
219 res = 0;
220 break;
221 case SMU_RESP_CMD_FAIL:
222 /* Command completed successfully, but the command
223 * status was failure.
224 */
225 res = -EIO;
226 break;
227 case SMU_RESP_CMD_UNKNOWN:
228 /* Unknown command--ignored by the SMU.
229 */
230 res = -EOPNOTSUPP;
231 break;
232 case SMU_RESP_CMD_BAD_PREREQ:
233 /* Valid command--bad prerequisites.
234 */
235 res = -EINVAL;
236 break;
237 case SMU_RESP_BUSY_OTHER:
238 /* The SMU is busy with other commands. The client
239 * should retry in 10 us.
240 */
241 res = -EBUSY;
242 break;
243 default:
244 /* Unknown or debug response from the SMU.
245 */
246 res = -EREMOTEIO;
247 break;
248 }
249
250 return res;
251 }
252
__smu_msg_v1_poll_stat(struct smu_msg_ctl * ctl,u32 timeout_us)253 static u32 __smu_msg_v1_poll_stat(struct smu_msg_ctl *ctl, u32 timeout_us)
254 {
255 struct amdgpu_device *adev = ctl->smu->adev;
256 struct smu_msg_config *cfg = &ctl->config;
257 u32 timeout = timeout_us ? timeout_us : ctl->default_timeout;
258 u32 reg;
259
260 for (; timeout > 0; timeout--) {
261 reg = RREG32(cfg->resp_reg);
262 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
263 break;
264 udelay(1);
265 }
266
267 return reg;
268 }
269
__smu_msg_v1_send(struct smu_msg_ctl * ctl,u16 index,struct smu_msg_args * args)270 static void __smu_msg_v1_send(struct smu_msg_ctl *ctl, u16 index,
271 struct smu_msg_args *args)
272 {
273 struct amdgpu_device *adev = ctl->smu->adev;
274 struct smu_msg_config *cfg = &ctl->config;
275 int i;
276
277 WREG32(cfg->resp_reg, 0);
278 for (i = 0; i < args->num_args; i++)
279 WREG32(cfg->arg_regs[i], args->args[i]);
280 WREG32(cfg->msg_reg, index);
281 }
282
__smu_msg_v1_read_out_args(struct smu_msg_ctl * ctl,struct smu_msg_args * args)283 static void __smu_msg_v1_read_out_args(struct smu_msg_ctl *ctl,
284 struct smu_msg_args *args)
285 {
286 struct amdgpu_device *adev = ctl->smu->adev;
287 int i;
288
289 for (i = 0; i < args->num_out_args; i++)
290 args->out_args[i] = RREG32(ctl->config.arg_regs[i]);
291 }
292
__smu_msg_v1_print_err_limited(struct smu_msg_ctl * ctl,struct smu_msg_args * args,char * err_msg)293 static void __smu_msg_v1_print_err_limited(struct smu_msg_ctl *ctl,
294 struct smu_msg_args *args,
295 char *err_msg)
296 {
297 static DEFINE_RATELIMIT_STATE(_rs,
298 SMU_MSG_V1_DEFAULT_RATELIMIT_INTERVAL,
299 SMU_MSG_V1_DEFAULT_RATELIMIT_BURST);
300 struct smu_context *smu = ctl->smu;
301 struct amdgpu_device *adev = smu->adev;
302
303 if (__ratelimit(&_rs)) {
304 u32 in[SMU_MSG_MAX_ARGS];
305 int i;
306
307 dev_err(adev->dev, "%s msg_reg: %x resp_reg: %x", err_msg,
308 RREG32(ctl->config.msg_reg),
309 RREG32(ctl->config.resp_reg));
310 if (args->num_args > 0) {
311 for (i = 0; i < args->num_args; i++)
312 in[i] = RREG32(ctl->config.arg_regs[i]);
313 print_hex_dump(KERN_ERR, "in params:", DUMP_PREFIX_NONE,
314 16, 4, in, args->num_args * sizeof(u32),
315 false);
316 }
317 }
318 }
319
__smu_msg_v1_print_error(struct smu_msg_ctl * ctl,u32 resp,struct smu_msg_args * args)320 static void __smu_msg_v1_print_error(struct smu_msg_ctl *ctl,
321 u32 resp,
322 struct smu_msg_args *args)
323 {
324 struct smu_context *smu = ctl->smu;
325 struct amdgpu_device *adev = smu->adev;
326 int index = ctl->message_map[args->msg].map_to;
327
328 switch (resp) {
329 case SMU_RESP_NONE:
330 __smu_msg_v1_print_err_limited(ctl, args, "SMU: No response");
331 break;
332 case SMU_RESP_OK:
333 break;
334 case SMU_RESP_CMD_FAIL:
335 break;
336 case SMU_RESP_CMD_UNKNOWN:
337 __smu_msg_v1_print_err_limited(ctl, args,
338 "SMU: unknown command");
339 break;
340 case SMU_RESP_CMD_BAD_PREREQ:
341 __smu_msg_v1_print_err_limited(
342 ctl, args, "SMU: valid command, bad prerequisites");
343 break;
344 case SMU_RESP_BUSY_OTHER:
345 if (args->msg != SMU_MSG_GetBadPageCount)
346 __smu_msg_v1_print_err_limited(ctl, args,
347 "SMU: I'm very busy");
348 break;
349 case SMU_RESP_DEBUG_END:
350 __smu_msg_v1_print_err_limited(ctl, args, "SMU: Debug Err");
351 break;
352 case SMU_RESP_UNEXP:
353 if (amdgpu_device_bus_status_check(adev)) {
354 dev_err(adev->dev,
355 "SMU: bus error for message: %s(%d) response:0x%08X ",
356 smu_get_message_name(smu, args->msg), index,
357 resp);
358 if (args->num_args > 0)
359 print_hex_dump(KERN_ERR,
360 "in params:", DUMP_PREFIX_NONE,
361 16, 4, args->args,
362 args->num_args * sizeof(u32),
363 false);
364 }
365 break;
366 default:
367 __smu_msg_v1_print_err_limited(ctl, args,
368 "SMU: unknown response");
369 break;
370 }
371 }
372
__smu_msg_v1_ras_filter(struct smu_msg_ctl * ctl,enum smu_message_type msg,u32 msg_flags,bool * skip_pre_poll)373 static int __smu_msg_v1_ras_filter(struct smu_msg_ctl *ctl,
374 enum smu_message_type msg, u32 msg_flags,
375 bool *skip_pre_poll)
376 {
377 struct smu_context *smu = ctl->smu;
378 struct amdgpu_device *adev = smu->adev;
379 bool fed_status;
380 u32 reg;
381
382 if (!(smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI))
383 return 0;
384
385 fed_status = amdgpu_ras_get_fed_status(adev);
386
387 /* Block non-RAS-priority messages during RAS error */
388 if (fed_status && !(msg_flags & SMU_MSG_RAS_PRI)) {
389 dev_dbg(adev->dev, "RAS error detected, skip sending %s",
390 smu_get_message_name(smu, msg));
391 return -EACCES;
392 }
393
394 /* Skip pre-poll for priority messages or during RAS error */
395 if ((msg_flags & SMU_MSG_NO_PRECHECK) || fed_status) {
396 reg = RREG32(ctl->config.resp_reg);
397 dev_dbg(adev->dev,
398 "Sending priority message %s response status: %x",
399 smu_get_message_name(smu, msg), reg);
400 if (reg == 0)
401 *skip_pre_poll = true;
402 }
403
404 return 0;
405 }
406
407 /**
408 * smu_msg_v1_send_msg - Complete V1 protocol with all filtering
409 * @ctl: Message control block
410 * @args: Message arguments
411 *
412 * Return: 0 on success, negative errno on failure
413 */
smu_msg_v1_send_msg(struct smu_msg_ctl * ctl,struct smu_msg_args * args)414 static int smu_msg_v1_send_msg(struct smu_msg_ctl *ctl,
415 struct smu_msg_args *args)
416 {
417 struct smu_context *smu = ctl->smu;
418 struct amdgpu_device *adev = smu->adev;
419 const struct cmn2asic_msg_mapping *mapping;
420 u32 reg, msg_flags;
421 int ret, index;
422 bool skip_pre_poll = false;
423 bool lock_held = args->flags & SMU_MSG_FLAG_LOCK_HELD;
424
425 /* Early exit if no HW access */
426 if (adev->no_hw_access)
427 return 0;
428
429 /* Message index translation */
430 if (args->msg >= SMU_MSG_MAX_COUNT || !ctl->message_map)
431 return -EINVAL;
432
433 if (args->num_args > ctl->config.num_arg_regs ||
434 args->num_out_args > ctl->config.num_arg_regs)
435 return -EINVAL;
436
437 mapping = &ctl->message_map[args->msg];
438 if (!mapping->valid_mapping)
439 return -EINVAL;
440
441 msg_flags = mapping->flags;
442 index = mapping->map_to;
443
444 /* VF filter - skip messages not valid for VF */
445 if (amdgpu_sriov_vf(adev) && !(msg_flags & SMU_MSG_VF_FLAG))
446 return 0;
447
448 if (!lock_held)
449 mutex_lock(&ctl->lock);
450
451 /* RAS priority filter */
452 ret = __smu_msg_v1_ras_filter(ctl, args->msg, msg_flags,
453 &skip_pre_poll);
454 if (ret)
455 goto out;
456
457 /* FW state checks */
458 if (smu->smc_fw_state == SMU_FW_HANG) {
459 dev_err(adev->dev,
460 "SMU is in hanged state, failed to send smu message!\n");
461 ret = -EREMOTEIO;
462 goto out;
463 } else if (smu->smc_fw_state == SMU_FW_INIT) {
464 skip_pre_poll = true;
465 smu->smc_fw_state = SMU_FW_RUNTIME;
466 }
467
468 /* Pre-poll: ensure previous message completed */
469 if (!skip_pre_poll) {
470 reg = __smu_msg_v1_poll_stat(ctl, args->timeout);
471 ret = smu_msg_v1_decode_response(reg);
472 if (reg == SMU_RESP_NONE || ret == -EREMOTEIO) {
473 __smu_msg_v1_print_error(ctl, reg, args);
474 goto out;
475 }
476 }
477
478 /* Send message */
479 __smu_msg_v1_send(ctl, (u16)index, args);
480
481 /* Post-poll (skip if ASYNC) */
482 if (args->flags & SMU_MSG_FLAG_ASYNC) {
483 ret = 0;
484 goto out;
485 }
486
487 reg = __smu_msg_v1_poll_stat(ctl, args->timeout);
488 ret = smu_msg_v1_decode_response(reg);
489
490 /* FW state update on fatal error */
491 if (ret == -EREMOTEIO) {
492 smu->smc_fw_state = SMU_FW_HANG;
493 __smu_msg_v1_print_error(ctl, reg, args);
494 } else if (ret != 0) {
495 __smu_msg_v1_print_error(ctl, reg, args);
496 }
497
498 /* Read output args */
499 if ((ret == 0 || (args->flags & SMU_MSG_FLAG_FORCE_READ_ARG)) &&
500 args->num_out_args > 0) {
501 __smu_msg_v1_read_out_args(ctl, args);
502 dev_dbg(adev->dev, "smu send message: %s(%d) resp : 0x%08x",
503 smu_get_message_name(smu, args->msg), index, reg);
504 if (args->num_args > 0)
505 print_hex_dump_debug("in params:", DUMP_PREFIX_NONE, 16,
506 4, args->args,
507 args->num_args * sizeof(u32),
508 false);
509 print_hex_dump_debug("out params:", DUMP_PREFIX_NONE, 16, 4,
510 args->out_args,
511 args->num_out_args * sizeof(u32), false);
512 } else {
513 dev_dbg(adev->dev, "smu send message: %s(%d), resp: 0x%08x\n",
514 smu_get_message_name(smu, args->msg), index, reg);
515 if (args->num_args > 0)
516 print_hex_dump_debug("in params:", DUMP_PREFIX_NONE, 16,
517 4, args->args,
518 args->num_args * sizeof(u32),
519 false);
520 }
521
522 out:
523 /* Debug halt on error */
524 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
525 ret) {
526 amdgpu_device_halt(adev);
527 WARN_ON(1);
528 }
529
530 if (!lock_held)
531 mutex_unlock(&ctl->lock);
532 return ret;
533 }
534
smu_msg_v1_wait_response(struct smu_msg_ctl * ctl,u32 timeout_us)535 static int smu_msg_v1_wait_response(struct smu_msg_ctl *ctl, u32 timeout_us)
536 {
537 struct smu_context *smu = ctl->smu;
538 struct amdgpu_device *adev = smu->adev;
539 u32 reg;
540 int ret;
541
542 reg = __smu_msg_v1_poll_stat(ctl, timeout_us);
543 ret = smu_msg_v1_decode_response(reg);
544
545 if (ret == -EREMOTEIO)
546 smu->smc_fw_state = SMU_FW_HANG;
547
548 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
549 ret && (ret != -ETIME)) {
550 amdgpu_device_halt(adev);
551 WARN_ON(1);
552 }
553
554 return ret;
555 }
556
557 const struct smu_msg_ops smu_msg_v1_ops = {
558 .send_msg = smu_msg_v1_send_msg,
559 .wait_response = smu_msg_v1_wait_response,
560 .decode_response = smu_msg_v1_decode_response,
561 .send_debug_msg = smu_msg_v1_send_debug_msg,
562 };
563
smu_msg_wait_response(struct smu_msg_ctl * ctl,u32 timeout_us)564 int smu_msg_wait_response(struct smu_msg_ctl *ctl, u32 timeout_us)
565 {
566 return ctl->ops->wait_response(ctl, timeout_us);
567 }
568
569 /**
570 * smu_msg_send_async_locked - Send message asynchronously, caller holds lock
571 * @ctl: Message control block
572 * @msg: Message type
573 * @param: Message parameter
574 *
575 * Send an SMU message without waiting for response. Caller must hold ctl->lock
576 * and call smu_msg_wait_response() later to get the result.
577 *
578 * Return: 0 on success, negative errno on failure
579 */
smu_msg_send_async_locked(struct smu_msg_ctl * ctl,enum smu_message_type msg,u32 param)580 int smu_msg_send_async_locked(struct smu_msg_ctl *ctl,
581 enum smu_message_type msg, u32 param)
582 {
583 struct smu_msg_args args = {
584 .msg = msg,
585 .args[0] = param,
586 .num_args = 1,
587 .num_out_args = 0,
588 .flags = SMU_MSG_FLAG_ASYNC | SMU_MSG_FLAG_LOCK_HELD,
589 .timeout = 0,
590 };
591
592 return ctl->ops->send_msg(ctl, &args);
593 }
594
smu_cmn_to_asic_specific_index(struct smu_context * smu,enum smu_cmn2asic_mapping_type type,uint32_t index)595 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
596 enum smu_cmn2asic_mapping_type type,
597 uint32_t index)
598 {
599 struct cmn2asic_msg_mapping msg_mapping;
600 struct cmn2asic_mapping mapping;
601
602 switch (type) {
603 case CMN2ASIC_MAPPING_MSG:
604 if (index >= SMU_MSG_MAX_COUNT ||
605 !smu->msg_ctl.message_map)
606 return -EINVAL;
607
608 msg_mapping = smu->msg_ctl.message_map[index];
609 if (!msg_mapping.valid_mapping)
610 return -EINVAL;
611
612 if (amdgpu_sriov_vf(smu->adev) &&
613 !(msg_mapping.flags & SMU_MSG_VF_FLAG))
614 return -EACCES;
615
616 return msg_mapping.map_to;
617
618 case CMN2ASIC_MAPPING_CLK:
619 if (index >= SMU_CLK_COUNT ||
620 !smu->clock_map)
621 return -EINVAL;
622
623 mapping = smu->clock_map[index];
624 if (!mapping.valid_mapping)
625 return -EINVAL;
626
627 return mapping.map_to;
628
629 case CMN2ASIC_MAPPING_FEATURE:
630 if (index >= SMU_FEATURE_COUNT ||
631 !smu->feature_map)
632 return -EINVAL;
633
634 mapping = smu->feature_map[index];
635 if (!mapping.valid_mapping)
636 return -EINVAL;
637
638 return mapping.map_to;
639
640 case CMN2ASIC_MAPPING_TABLE:
641 if (index >= SMU_TABLE_COUNT ||
642 !smu->table_map)
643 return -EINVAL;
644
645 mapping = smu->table_map[index];
646 if (!mapping.valid_mapping)
647 return -EINVAL;
648
649 return mapping.map_to;
650
651 case CMN2ASIC_MAPPING_PWR:
652 if (index >= SMU_POWER_SOURCE_COUNT ||
653 !smu->pwr_src_map)
654 return -EINVAL;
655
656 mapping = smu->pwr_src_map[index];
657 if (!mapping.valid_mapping)
658 return -EINVAL;
659
660 return mapping.map_to;
661
662 case CMN2ASIC_MAPPING_WORKLOAD:
663 if (index >= PP_SMC_POWER_PROFILE_COUNT ||
664 !smu->workload_map)
665 return -EINVAL;
666
667 mapping = smu->workload_map[index];
668 if (!mapping.valid_mapping)
669 return -ENOTSUPP;
670
671 return mapping.map_to;
672
673 default:
674 return -EINVAL;
675 }
676 }
677
smu_cmn_feature_is_supported(struct smu_context * smu,enum smu_feature_mask mask)678 int smu_cmn_feature_is_supported(struct smu_context *smu,
679 enum smu_feature_mask mask)
680 {
681 int feature_id;
682
683 feature_id = smu_cmn_to_asic_specific_index(smu,
684 CMN2ASIC_MAPPING_FEATURE,
685 mask);
686 if (feature_id < 0)
687 return 0;
688
689 return smu_feature_list_is_set(smu, SMU_FEATURE_LIST_SUPPORTED,
690 feature_id);
691 }
692
__smu_get_enabled_features(struct smu_context * smu,struct smu_feature_bits * enabled_features)693 static int __smu_get_enabled_features(struct smu_context *smu,
694 struct smu_feature_bits *enabled_features)
695 {
696 return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);
697 }
698
smu_cmn_feature_is_enabled(struct smu_context * smu,enum smu_feature_mask mask)699 int smu_cmn_feature_is_enabled(struct smu_context *smu,
700 enum smu_feature_mask mask)
701 {
702 struct amdgpu_device *adev = smu->adev;
703 struct smu_feature_bits enabled_features;
704 int feature_id;
705
706 if (__smu_get_enabled_features(smu, &enabled_features)) {
707 dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
708 return 0;
709 }
710
711 /*
712 * For Renoir and Cyan Skillfish, they are assumed to have all features
713 * enabled. Also considering they have no feature_map available, the
714 * check here can avoid unwanted feature_map check below.
715 */
716 if (smu_feature_bits_full(&enabled_features,
717 smu->smu_feature.feature_num))
718 return 1;
719
720 feature_id = smu_cmn_to_asic_specific_index(smu,
721 CMN2ASIC_MAPPING_FEATURE,
722 mask);
723 if (feature_id < 0)
724 return 0;
725
726 return smu_feature_bits_is_set(&enabled_features, feature_id);
727 }
728
smu_cmn_clk_dpm_is_enabled(struct smu_context * smu,enum smu_clk_type clk_type)729 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
730 enum smu_clk_type clk_type)
731 {
732 enum smu_feature_mask feature_id = 0;
733
734 switch (clk_type) {
735 case SMU_MCLK:
736 case SMU_UCLK:
737 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
738 break;
739 case SMU_GFXCLK:
740 case SMU_SCLK:
741 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
742 break;
743 case SMU_SOCCLK:
744 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
745 break;
746 case SMU_VCLK:
747 case SMU_VCLK1:
748 feature_id = SMU_FEATURE_DPM_VCLK_BIT;
749 break;
750 case SMU_DCLK:
751 case SMU_DCLK1:
752 feature_id = SMU_FEATURE_DPM_DCLK_BIT;
753 break;
754 case SMU_FCLK:
755 feature_id = SMU_FEATURE_DPM_FCLK_BIT;
756 break;
757 default:
758 return true;
759 }
760
761 if (!smu_cmn_feature_is_enabled(smu, feature_id))
762 return false;
763
764 return true;
765 }
766
smu_cmn_get_enabled_mask(struct smu_context * smu,struct smu_feature_bits * feature_mask)767 int smu_cmn_get_enabled_mask(struct smu_context *smu,
768 struct smu_feature_bits *feature_mask)
769 {
770 uint32_t features[2];
771 int ret = 0, index = 0;
772
773 if (!feature_mask)
774 return -EINVAL;
775
776 index = smu_cmn_to_asic_specific_index(smu,
777 CMN2ASIC_MAPPING_MSG,
778 SMU_MSG_GetEnabledSmuFeatures);
779 if (index > 0) {
780 ret = smu_cmn_send_smc_msg_with_param(
781 smu, SMU_MSG_GetEnabledSmuFeatures, 0, &features[0]);
782 if (ret)
783 return ret;
784
785 ret = smu_cmn_send_smc_msg_with_param(
786 smu, SMU_MSG_GetEnabledSmuFeatures, 1, &features[1]);
787 } else {
788 ret = smu_cmn_send_smc_msg(
789 smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &features[1]);
790 if (ret)
791 return ret;
792
793 ret = smu_cmn_send_smc_msg(
794 smu, SMU_MSG_GetEnabledSmuFeaturesLow, &features[0]);
795 }
796
797 if (!ret)
798 smu_feature_bits_from_arr32(feature_mask, features,
799 SMU_FEATURE_NUM_DEFAULT);
800
801 return ret;
802 }
803
smu_cmn_get_indep_throttler_status(const unsigned long dep_status,const uint8_t * throttler_map)804 uint64_t smu_cmn_get_indep_throttler_status(
805 const unsigned long dep_status,
806 const uint8_t *throttler_map)
807 {
808 uint64_t indep_status = 0;
809 uint8_t dep_bit = 0;
810
811 for_each_set_bit(dep_bit, &dep_status, 32)
812 indep_status |= 1ULL << throttler_map[dep_bit];
813
814 return indep_status;
815 }
816
smu_cmn_feature_update_enable_state(struct smu_context * smu,uint64_t feature_mask,bool enabled)817 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
818 uint64_t feature_mask,
819 bool enabled)
820 {
821 int ret = 0;
822
823 if (enabled) {
824 ret = smu_cmn_send_smc_msg_with_param(smu,
825 SMU_MSG_EnableSmuFeaturesLow,
826 lower_32_bits(feature_mask),
827 NULL);
828 if (ret)
829 return ret;
830 ret = smu_cmn_send_smc_msg_with_param(smu,
831 SMU_MSG_EnableSmuFeaturesHigh,
832 upper_32_bits(feature_mask),
833 NULL);
834 } else {
835 ret = smu_cmn_send_smc_msg_with_param(smu,
836 SMU_MSG_DisableSmuFeaturesLow,
837 lower_32_bits(feature_mask),
838 NULL);
839 if (ret)
840 return ret;
841 ret = smu_cmn_send_smc_msg_with_param(smu,
842 SMU_MSG_DisableSmuFeaturesHigh,
843 upper_32_bits(feature_mask),
844 NULL);
845 }
846
847 return ret;
848 }
849
smu_cmn_feature_set_enabled(struct smu_context * smu,enum smu_feature_mask mask,bool enable)850 int smu_cmn_feature_set_enabled(struct smu_context *smu,
851 enum smu_feature_mask mask,
852 bool enable)
853 {
854 int feature_id;
855
856 feature_id = smu_cmn_to_asic_specific_index(smu,
857 CMN2ASIC_MAPPING_FEATURE,
858 mask);
859 if (feature_id < 0)
860 return -EINVAL;
861
862 return smu_cmn_feature_update_enable_state(smu,
863 1ULL << feature_id,
864 enable);
865 }
866
867 #undef __SMU_DUMMY_MAP
868 #define __SMU_DUMMY_MAP(fea) #fea
869 static const char *__smu_feature_names[] = {
870 SMU_FEATURE_MASKS
871 };
872
smu_get_feature_name(struct smu_context * smu,enum smu_feature_mask feature)873 static const char *smu_get_feature_name(struct smu_context *smu,
874 enum smu_feature_mask feature)
875 {
876 if (feature >= SMU_FEATURE_COUNT)
877 return "unknown smu feature";
878 return __smu_feature_names[feature];
879 }
880
smu_cmn_get_pp_feature_mask(struct smu_context * smu,char * buf)881 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
882 char *buf)
883 {
884 int16_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
885 struct smu_feature_bits feature_mask;
886 uint32_t features[2];
887 int i, feature_index;
888 uint32_t count = 0;
889 size_t size = 0;
890
891 if (__smu_get_enabled_features(smu, &feature_mask))
892 return 0;
893
894 /* TBD: Need to handle for > 64 bits */
895 smu_feature_bits_to_arr32(&feature_mask, features, 64);
896 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
897 features[1], features[0]);
898
899 memset(sort_feature, -1, sizeof(sort_feature));
900
901 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
902 feature_index = smu_cmn_to_asic_specific_index(smu,
903 CMN2ASIC_MAPPING_FEATURE,
904 i);
905 if (feature_index < 0)
906 continue;
907
908 sort_feature[feature_index] = i;
909 }
910
911 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",
912 "No", "Feature", "Bit", "State");
913
914 for (feature_index = 0; feature_index < smu->smu_feature.feature_num;
915 feature_index++) {
916 if (sort_feature[feature_index] < 0)
917 continue;
918
919 size += sysfs_emit_at(
920 buf, size, "%02d. %-20s (%2d) : %s\n", count++,
921 smu_get_feature_name(smu, sort_feature[feature_index]),
922 feature_index,
923 smu_feature_bits_is_set(&feature_mask, feature_index) ?
924 "enabled" :
925 "disabled");
926 }
927
928 return size;
929 }
930
smu_cmn_set_pp_feature_mask(struct smu_context * smu,uint64_t new_mask)931 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
932 uint64_t new_mask)
933 {
934 int ret = 0;
935 struct smu_feature_bits feature_mask;
936 uint64_t feature_mask_u64;
937 uint64_t feature_2_enabled = 0;
938 uint64_t feature_2_disabled = 0;
939
940 ret = __smu_get_enabled_features(smu, &feature_mask);
941 if (ret)
942 return ret;
943
944 feature_mask_u64 = *(uint64_t *)feature_mask.bits;
945 feature_2_enabled = ~feature_mask_u64 & new_mask;
946 feature_2_disabled = feature_mask_u64 & ~new_mask;
947
948 if (feature_2_enabled) {
949 ret = smu_cmn_feature_update_enable_state(smu,
950 feature_2_enabled,
951 true);
952 if (ret)
953 return ret;
954 }
955 if (feature_2_disabled) {
956 ret = smu_cmn_feature_update_enable_state(smu,
957 feature_2_disabled,
958 false);
959 if (ret)
960 return ret;
961 }
962
963 return ret;
964 }
965
966 /**
967 * smu_cmn_disable_all_features_with_exception - disable all dpm features
968 * except this specified by
969 * @mask
970 *
971 * @smu: smu_context pointer
972 * @mask: the dpm feature which should not be disabled
973 * SMU_FEATURE_COUNT: no exception, all dpm features
974 * to disable
975 *
976 * Returns:
977 * 0 on success or a negative error code on failure.
978 */
smu_cmn_disable_all_features_with_exception(struct smu_context * smu,enum smu_feature_mask mask)979 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
980 enum smu_feature_mask mask)
981 {
982 uint64_t features_to_disable = U64_MAX;
983 int skipped_feature_id;
984
985 if (mask != SMU_FEATURE_COUNT) {
986 skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
987 CMN2ASIC_MAPPING_FEATURE,
988 mask);
989 if (skipped_feature_id < 0)
990 return -EINVAL;
991
992 features_to_disable &= ~(1ULL << skipped_feature_id);
993 }
994
995 return smu_cmn_feature_update_enable_state(smu,
996 features_to_disable,
997 0);
998 }
999
smu_cmn_get_smc_version(struct smu_context * smu,uint32_t * if_version,uint32_t * smu_version)1000 int smu_cmn_get_smc_version(struct smu_context *smu,
1001 uint32_t *if_version,
1002 uint32_t *smu_version)
1003 {
1004 int ret = 0;
1005
1006 if (!if_version && !smu_version)
1007 return -EINVAL;
1008
1009 if (smu->smc_fw_if_version && smu->smc_fw_version)
1010 {
1011 if (if_version)
1012 *if_version = smu->smc_fw_if_version;
1013
1014 if (smu_version)
1015 *smu_version = smu->smc_fw_version;
1016
1017 return 0;
1018 }
1019
1020 if (if_version) {
1021 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
1022 if (ret)
1023 return ret;
1024
1025 smu->smc_fw_if_version = *if_version;
1026 }
1027
1028 if (smu_version) {
1029 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
1030 if (ret)
1031 return ret;
1032
1033 smu->smc_fw_version = *smu_version;
1034 }
1035
1036 return ret;
1037 }
1038
smu_cmn_check_fw_version(struct smu_context * smu)1039 int smu_cmn_check_fw_version(struct smu_context *smu)
1040 {
1041 struct amdgpu_device *adev = smu->adev;
1042 uint32_t if_version = 0xff, smu_version = 0xff;
1043 uint8_t smu_program, smu_major, smu_minor, smu_debug;
1044 int ret;
1045
1046 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
1047 if (ret)
1048 return ret;
1049
1050 smu_program = (smu_version >> 24) & 0xff;
1051 smu_major = (smu_version >> 16) & 0xff;
1052 smu_minor = (smu_version >> 8) & 0xff;
1053 smu_debug = (smu_version >> 0) & 0xff;
1054 adev->pm.fw_version = smu_version;
1055
1056 dev_info_once(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
1057 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
1058 smu->smc_driver_if_version, if_version,
1059 smu_program, smu_version, smu_major, smu_minor, smu_debug);
1060
1061 return 0;
1062 }
1063
smu_cmn_update_table_read_arg(struct smu_context * smu,enum smu_table_id table_index,int argument,void * table_data,uint32_t * read_arg,bool drv2smu)1064 int smu_cmn_update_table_read_arg(struct smu_context *smu,
1065 enum smu_table_id table_index,
1066 int argument,
1067 void *table_data,
1068 uint32_t *read_arg,
1069 bool drv2smu)
1070 {
1071 struct amdgpu_device *adev = smu->adev;
1072 struct smu_table_context *smu_table = &smu->smu_table;
1073 struct smu_table *table = &smu_table->driver_table;
1074 struct smu_msg_ctl *ctl = &smu->msg_ctl;
1075 struct smu_msg_args args;
1076 int table_id = smu_cmn_to_asic_specific_index(smu,
1077 CMN2ASIC_MAPPING_TABLE,
1078 table_index);
1079 uint32_t table_size;
1080 int ret = 0;
1081
1082 if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)
1083 return -EINVAL;
1084
1085 table_size = smu_table->tables[table_index].size;
1086
1087 if (drv2smu) {
1088 memcpy(table->cpu_addr, table_data, table_size);
1089 /*
1090 * Flush hdp cache: to guard the content seen by
1091 * GPU is consitent with CPU.
1092 */
1093 amdgpu_hdp_flush(adev, NULL);
1094 }
1095
1096 args.msg = drv2smu ? SMU_MSG_TransferTableDram2Smu : SMU_MSG_TransferTableSmu2Dram;
1097 args.args[0] = ((argument & 0xFFFF) << 16) | (table_id & 0xffff);
1098 args.num_args = 1;
1099 args.out_args[0] = 0;
1100 args.num_out_args = read_arg ? 1 : 0;
1101 args.flags = read_arg ? SMU_MSG_FLAG_FORCE_READ_ARG : 0;
1102 args.timeout = 0;
1103
1104 ret = ctl->ops->send_msg(ctl, &args);
1105
1106 if (read_arg)
1107 *read_arg = args.out_args[0];
1108
1109 if (ret)
1110 return ret;
1111
1112 if (!drv2smu) {
1113 amdgpu_hdp_invalidate(adev, NULL);
1114 memcpy(table_data, table->cpu_addr, table_size);
1115 }
1116
1117 return 0;
1118 }
1119
smu_cmn_vram_cpy(struct smu_context * smu,void * dst,const void * src,size_t len)1120 int smu_cmn_vram_cpy(struct smu_context *smu, void *dst, const void *src,
1121 size_t len)
1122 {
1123 memcpy(dst, src, len);
1124
1125 /* Don't trust the copy operation if RAS fatal error happened. */
1126 if (amdgpu_ras_get_fed_status(smu->adev))
1127 return -EHWPOISON;
1128
1129 return 0;
1130 }
1131
smu_cmn_write_watermarks_table(struct smu_context * smu)1132 int smu_cmn_write_watermarks_table(struct smu_context *smu)
1133 {
1134 void *watermarks_table = smu->smu_table.watermarks_table;
1135
1136 if (!watermarks_table)
1137 return -EINVAL;
1138
1139 return smu_cmn_update_table(smu,
1140 SMU_TABLE_WATERMARKS,
1141 0,
1142 watermarks_table,
1143 true);
1144 }
1145
smu_cmn_write_pptable(struct smu_context * smu)1146 int smu_cmn_write_pptable(struct smu_context *smu)
1147 {
1148 void *pptable = smu->smu_table.driver_pptable;
1149
1150 return smu_cmn_update_table(smu,
1151 SMU_TABLE_PPTABLE,
1152 0,
1153 pptable,
1154 true);
1155 }
1156
smu_cmn_get_metrics_table(struct smu_context * smu,void * metrics_table,bool bypass_cache)1157 int smu_cmn_get_metrics_table(struct smu_context *smu,
1158 void *metrics_table,
1159 bool bypass_cache)
1160 {
1161 struct smu_table_context *smu_table = &smu->smu_table;
1162 uint32_t table_size =
1163 smu_table->tables[SMU_TABLE_SMU_METRICS].size;
1164 int ret = 0;
1165
1166 if (bypass_cache ||
1167 !smu_table->metrics_time ||
1168 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
1169 ret = smu_cmn_update_table(smu,
1170 SMU_TABLE_SMU_METRICS,
1171 0,
1172 smu_table->metrics_table,
1173 false);
1174 if (ret) {
1175 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
1176 return ret;
1177 }
1178 smu_table->metrics_time = jiffies;
1179 }
1180
1181 if (metrics_table)
1182 memcpy(metrics_table, smu_table->metrics_table, table_size);
1183
1184 return 0;
1185 }
1186
smu_cmn_get_combo_pptable(struct smu_context * smu)1187 int smu_cmn_get_combo_pptable(struct smu_context *smu)
1188 {
1189 void *pptable = smu->smu_table.combo_pptable;
1190
1191 return smu_cmn_update_table(smu,
1192 SMU_TABLE_COMBO_PPTABLE,
1193 0,
1194 pptable,
1195 false);
1196 }
1197
smu_cmn_set_mp1_state(struct smu_context * smu,enum pp_mp1_state mp1_state)1198 int smu_cmn_set_mp1_state(struct smu_context *smu,
1199 enum pp_mp1_state mp1_state)
1200 {
1201 enum smu_message_type msg;
1202 int ret;
1203
1204 switch (mp1_state) {
1205 case PP_MP1_STATE_SHUTDOWN:
1206 msg = SMU_MSG_PrepareMp1ForShutdown;
1207 break;
1208 case PP_MP1_STATE_UNLOAD:
1209 msg = SMU_MSG_PrepareMp1ForUnload;
1210 break;
1211 case PP_MP1_STATE_RESET:
1212 msg = SMU_MSG_PrepareMp1ForReset;
1213 break;
1214 case PP_MP1_STATE_NONE:
1215 default:
1216 return 0;
1217 }
1218
1219 ret = smu_cmn_send_smc_msg(smu, msg, NULL);
1220 if (ret)
1221 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1222
1223 return ret;
1224 }
1225
smu_cmn_is_audio_func_enabled(struct amdgpu_device * adev)1226 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
1227 {
1228 struct pci_dev *p = NULL;
1229 bool snd_driver_loaded;
1230
1231 /*
1232 * If the ASIC comes with no audio function, we always assume
1233 * it is "enabled".
1234 */
1235 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
1236 adev->pdev->bus->number, 1);
1237 if (!p)
1238 return true;
1239
1240 snd_driver_loaded = pci_is_enabled(p) ? true : false;
1241
1242 pci_dev_put(p);
1243
1244 return snd_driver_loaded;
1245 }
1246
smu_soc_policy_get_desc(struct smu_dpm_policy * policy,int level)1247 static char *smu_soc_policy_get_desc(struct smu_dpm_policy *policy, int level)
1248 {
1249 if (level < 0 || !(policy->level_mask & BIT(level)))
1250 return "Invalid";
1251
1252 switch (level) {
1253 case SOC_PSTATE_DEFAULT:
1254 return "soc_pstate_default";
1255 case SOC_PSTATE_0:
1256 return "soc_pstate_0";
1257 case SOC_PSTATE_1:
1258 return "soc_pstate_1";
1259 case SOC_PSTATE_2:
1260 return "soc_pstate_2";
1261 }
1262
1263 return "Invalid";
1264 }
1265
1266 static struct smu_dpm_policy_desc pstate_policy_desc = {
1267 .name = STR_SOC_PSTATE_POLICY,
1268 .get_desc = smu_soc_policy_get_desc,
1269 };
1270
smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy * policy)1271 void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy)
1272 {
1273 policy->desc = &pstate_policy_desc;
1274 }
1275
smu_xgmi_plpd_policy_get_desc(struct smu_dpm_policy * policy,int level)1276 static char *smu_xgmi_plpd_policy_get_desc(struct smu_dpm_policy *policy,
1277 int level)
1278 {
1279 if (level < 0 || !(policy->level_mask & BIT(level)))
1280 return "Invalid";
1281
1282 switch (level) {
1283 case XGMI_PLPD_DISALLOW:
1284 return "plpd_disallow";
1285 case XGMI_PLPD_DEFAULT:
1286 return "plpd_default";
1287 case XGMI_PLPD_OPTIMIZED:
1288 return "plpd_optimized";
1289 }
1290
1291 return "Invalid";
1292 }
1293
1294 static struct smu_dpm_policy_desc xgmi_plpd_policy_desc = {
1295 .name = STR_XGMI_PLPD_POLICY,
1296 .get_desc = smu_xgmi_plpd_policy_get_desc,
1297 };
1298
smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy * policy)1299 void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)
1300 {
1301 policy->desc = &xgmi_plpd_policy_desc;
1302 }
1303
smu_cmn_get_backend_workload_mask(struct smu_context * smu,u32 workload_mask,u32 * backend_workload_mask)1304 void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
1305 u32 workload_mask,
1306 u32 *backend_workload_mask)
1307 {
1308 int workload_type;
1309 u32 profile_mode;
1310
1311 *backend_workload_mask = 0;
1312
1313 for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {
1314 if (!(workload_mask & (1 << profile_mode)))
1315 continue;
1316
1317 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1318 workload_type = smu_cmn_to_asic_specific_index(smu,
1319 CMN2ASIC_MAPPING_WORKLOAD,
1320 profile_mode);
1321
1322 if (workload_type < 0)
1323 continue;
1324
1325 *backend_workload_mask |= 1 << workload_type;
1326 }
1327 }
1328
smu_cmn_reset_custom_level(struct smu_context * smu)1329 void smu_cmn_reset_custom_level(struct smu_context *smu)
1330 {
1331 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1332
1333 pstate_table->gfxclk_pstate.custom.min = 0;
1334 pstate_table->gfxclk_pstate.custom.max = 0;
1335 pstate_table->uclk_pstate.custom.min = 0;
1336 pstate_table->uclk_pstate.custom.max = 0;
1337 }
1338
smu_cmn_freqs_match(uint32_t freq1,uint32_t freq2)1339 static inline bool smu_cmn_freqs_match(uint32_t freq1, uint32_t freq2)
1340 {
1341 /* Frequencies within 25 MHz are considered equal */
1342 return (abs((int)freq1 - (int)freq2) <= 25);
1343 }
1344
smu_cmn_print_dpm_clk_levels(struct smu_context * smu,struct smu_dpm_table * dpm_table,uint32_t cur_clk,char * buf,int * offset)1345 int smu_cmn_print_dpm_clk_levels(struct smu_context *smu,
1346 struct smu_dpm_table *dpm_table,
1347 uint32_t cur_clk, char *buf, int *offset)
1348 {
1349 uint32_t min_clk, max_clk, level_index, count;
1350 uint32_t freq_values[3];
1351 int size, lvl, i;
1352 bool is_fine_grained;
1353 bool is_deep_sleep;
1354 bool freq_match;
1355
1356 if (!dpm_table || !buf)
1357 return -EINVAL;
1358
1359 level_index = 0;
1360 size = *offset;
1361 count = dpm_table->count;
1362 is_fine_grained = dpm_table->flags & SMU_DPM_TABLE_FINE_GRAINED;
1363 min_clk = SMU_DPM_TABLE_MIN(dpm_table);
1364 max_clk = SMU_DPM_TABLE_MAX(dpm_table);
1365
1366 /* Deep sleep - current clock < min_clock/2, TBD: cur_clk = 0 as GFXOFF */
1367 is_deep_sleep = cur_clk < min_clk / 2;
1368 if (is_deep_sleep) {
1369 size += sysfs_emit_at(buf, size, "S: %uMhz *\n", cur_clk);
1370 level_index = 1;
1371 }
1372
1373 if (!is_fine_grained || count == 1) {
1374 for (i = 0; i < count; i++) {
1375 freq_match = !is_deep_sleep &&
1376 smu_cmn_freqs_match(
1377 cur_clk,
1378 dpm_table->dpm_levels[i].value);
1379 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1380 level_index + i,
1381 dpm_table->dpm_levels[i].value,
1382 freq_match ? "*" : "");
1383 }
1384 } else {
1385 count = 2;
1386 freq_values[0] = min_clk;
1387 freq_values[1] = max_clk;
1388
1389 if (!is_deep_sleep) {
1390 if (smu_cmn_freqs_match(cur_clk, min_clk)) {
1391 lvl = 0;
1392 } else if (smu_cmn_freqs_match(cur_clk, max_clk)) {
1393 lvl = 1;
1394 } else {
1395 /* NOTE: use index '1' to show current clock value */
1396 lvl = 1;
1397 count = 3;
1398 freq_values[1] = cur_clk;
1399 freq_values[2] = max_clk;
1400 }
1401 }
1402
1403 for (i = 0; i < count; i++) {
1404 size += sysfs_emit_at(
1405 buf, size, "%d: %uMhz %s\n", level_index + i,
1406 freq_values[i],
1407 (!is_deep_sleep && i == lvl) ? "*" : "");
1408 }
1409 }
1410
1411 *offset = size;
1412
1413 return 0;
1414 }
1415
smu_cmn_print_pcie_levels(struct smu_context * smu,struct smu_pcie_table * pcie_table,uint32_t cur_gen,uint32_t cur_lane,char * buf,int * offset)1416 int smu_cmn_print_pcie_levels(struct smu_context *smu,
1417 struct smu_pcie_table *pcie_table,
1418 uint32_t cur_gen, uint32_t cur_lane, char *buf,
1419 int *offset)
1420 {
1421 int size, i;
1422
1423 if (!pcie_table || !buf)
1424 return -EINVAL;
1425
1426 size = *offset;
1427
1428 for (i = 0; i < pcie_table->lclk_levels; i++) {
1429 size += sysfs_emit_at(
1430 buf, size, "%d: %s %s %dMhz %s\n", i,
1431 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
1432 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
1433 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
1434 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," :
1435 (pcie_table->pcie_gen[i] == 4) ? "32.0GT/s," :
1436 (pcie_table->pcie_gen[i] == 5) ? "64.0GT/s," :
1437 "",
1438 (pcie_table->pcie_lane[i] == 1) ? "x1" :
1439 (pcie_table->pcie_lane[i] == 2) ? "x2" :
1440 (pcie_table->pcie_lane[i] == 3) ? "x4" :
1441 (pcie_table->pcie_lane[i] == 4) ? "x8" :
1442 (pcie_table->pcie_lane[i] == 5) ? "x12" :
1443 (pcie_table->pcie_lane[i] == 6) ? "x16" :
1444 (pcie_table->pcie_lane[i] == 7) ? "x32" :
1445 "",
1446 pcie_table->lclk_freq[i],
1447 (cur_gen == pcie_table->pcie_gen[i]) &&
1448 (cur_lane == pcie_table->pcie_lane[i]) ?
1449 "*" :
1450 "");
1451 }
1452
1453 *offset = size;
1454
1455 return 0;
1456 }
1457
smu_cmn_dpm_pcie_gen_idx(int gen)1458 int smu_cmn_dpm_pcie_gen_idx(int gen)
1459 {
1460 int ret;
1461
1462 switch (gen) {
1463 case 1 ... 5:
1464 ret = gen - 1;
1465 break;
1466 default:
1467 ret = -1;
1468 break;
1469 }
1470
1471 return ret;
1472 }
1473
smu_cmn_dpm_pcie_width_idx(int width)1474 int smu_cmn_dpm_pcie_width_idx(int width)
1475 {
1476 int ret;
1477
1478 switch (width) {
1479 case 1:
1480 ret = 1;
1481 break;
1482 case 2:
1483 ret = 2;
1484 break;
1485 case 4:
1486 ret = 3;
1487 break;
1488 case 8:
1489 ret = 4;
1490 break;
1491 case 12:
1492 ret = 5;
1493 break;
1494 case 16:
1495 ret = 6;
1496 break;
1497 case 32:
1498 ret = 7;
1499 break;
1500 default:
1501 ret = -1;
1502 break;
1503 }
1504
1505 return ret;
1506 }
1507