xref: /linux/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c (revision d728fd03e5f2117853d91b3626d434a97fe896d1)
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #define SWSMU_CODE_LAYER_L4
24 
25 #include "amdgpu.h"
26 #include "amdgpu_smu.h"
27 #include "smu_cmn.h"
28 #include "soc15_common.h"
29 
30 /*
31  * DO NOT use these for err/warn/info/debug messages.
32  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
33  * They are more MGPU friendly.
34  */
35 #undef pr_err
36 #undef pr_warn
37 #undef pr_info
38 #undef pr_debug
39 
40 #define MP1_C2PMSG_90__CONTENT_MASK                                                                    0xFFFFFFFFL
41 
42 const int link_speed[] = {25, 50, 80, 160, 320, 640};
43 
44 #undef __SMU_DUMMY_MAP
45 #define __SMU_DUMMY_MAP(type)	#type
46 static const char * const __smu_message_names[] = {
47 	SMU_MESSAGE_TYPES
48 };
49 
50 #define smu_cmn_call_asic_func(intf, smu, args...)                             \
51 	((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ?                          \
52 				     (smu)->ppt_funcs->intf(smu, ##args) :     \
53 				     -ENOTSUPP) :                              \
54 			    -EINVAL)
55 
56 static const char *smu_get_message_name(struct smu_context *smu,
57 					enum smu_message_type type)
58 {
59 	if (type >= SMU_MSG_MAX_COUNT)
60 		return "unknown smu message";
61 
62 	return __smu_message_names[type];
63 }
64 
65 static void smu_cmn_read_arg(struct smu_context *smu,
66 			     uint32_t *arg)
67 {
68 	struct amdgpu_device *adev = smu->adev;
69 
70 	*arg = RREG32(smu->param_reg);
71 }
72 
73 /* Redefine the SMU error codes here.
74  *
75  * Note that these definitions are redundant and should be removed
76  * when the SMU has exported a unified header file containing these
77  * macros, which header file we can just include and use the SMU's
78  * macros. At the moment, these error codes are defined by the SMU
79  * per-ASIC unfortunately, yet we're a one driver for all ASICs.
80  */
81 #define SMU_RESP_NONE           0
82 #define SMU_RESP_OK             1
83 #define SMU_RESP_CMD_FAIL       0xFF
84 #define SMU_RESP_CMD_UNKNOWN    0xFE
85 #define SMU_RESP_CMD_BAD_PREREQ 0xFD
86 #define SMU_RESP_BUSY_OTHER     0xFC
87 #define SMU_RESP_DEBUG_END      0xFB
88 
89 #define SMU_RESP_UNEXP (~0U)
90 /**
91  * __smu_cmn_poll_stat -- poll for a status from the SMU
92  * @smu: a pointer to SMU context
93  *
94  * Returns the status of the SMU, which could be,
95  *    0, the SMU is busy with your command;
96  *    1, execution status: success, execution result: success;
97  * 0xFF, execution status: success, execution result: failure;
98  * 0xFE, unknown command;
99  * 0xFD, valid command, but bad (command) prerequisites;
100  * 0xFC, the command was rejected as the SMU is busy;
101  * 0xFB, "SMC_Result_DebugDataDumpEnd".
102  *
103  * The values here are not defined by macros, because I'd rather we
104  * include a single header file which defines them, which is
105  * maintained by the SMU FW team, so that we're impervious to firmware
106  * changes. At the moment those values are defined in various header
107  * files, one for each ASIC, yet here we're a single ASIC-agnostic
108  * interface. Such a change can be followed-up by a subsequent patch.
109  */
110 static u32 __smu_cmn_poll_stat(struct smu_context *smu)
111 {
112 	struct amdgpu_device *adev = smu->adev;
113 	int timeout = adev->usec_timeout * 20;
114 	u32 reg;
115 
116 	for ( ; timeout > 0; timeout--) {
117 		reg = RREG32(smu->resp_reg);
118 		if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
119 			break;
120 
121 		udelay(1);
122 	}
123 
124 	return reg;
125 }
126 
127 static void __smu_cmn_reg_print_error(struct smu_context *smu,
128 				      u32 reg_c2pmsg_90,
129 				      int msg_index,
130 				      u32 param,
131 				      enum smu_message_type msg)
132 {
133 	struct amdgpu_device *adev = smu->adev;
134 	const char *message = smu_get_message_name(smu, msg);
135 	u32 msg_idx, prm;
136 
137 	switch (reg_c2pmsg_90) {
138 	case SMU_RESP_NONE: {
139 		msg_idx = RREG32(smu->msg_reg);
140 		prm     = RREG32(smu->param_reg);
141 		dev_err_ratelimited(adev->dev,
142 				    "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
143 				    msg_idx, prm);
144 		}
145 		break;
146 	case SMU_RESP_OK:
147 		/* The SMU executed the command. It completed with a
148 		 * successful result.
149 		 */
150 		break;
151 	case SMU_RESP_CMD_FAIL:
152 		/* The SMU executed the command. It completed with an
153 		 * unsuccessful result.
154 		 */
155 		break;
156 	case SMU_RESP_CMD_UNKNOWN:
157 		dev_err_ratelimited(adev->dev,
158 				    "SMU: unknown command: index:%d param:0x%08X message:%s",
159 				    msg_index, param, message);
160 		break;
161 	case SMU_RESP_CMD_BAD_PREREQ:
162 		dev_err_ratelimited(adev->dev,
163 				    "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s",
164 				    msg_index, param, message);
165 		break;
166 	case SMU_RESP_BUSY_OTHER:
167 		dev_err_ratelimited(adev->dev,
168 				    "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
169 				    msg_index, param, message);
170 		break;
171 	case SMU_RESP_DEBUG_END:
172 		dev_err_ratelimited(adev->dev,
173 				    "SMU: I'm debugging!");
174 		break;
175 	case SMU_RESP_UNEXP:
176 		if (amdgpu_device_bus_status_check(smu->adev)) {
177 			/* print error immediately if device is off the bus */
178 			dev_err(adev->dev,
179 				"SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
180 				reg_c2pmsg_90, msg_index, param, message);
181 			break;
182 		}
183 		fallthrough;
184 	default:
185 		dev_err_ratelimited(adev->dev,
186 				    "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
187 				    reg_c2pmsg_90, msg_index, param, message);
188 		break;
189 	}
190 }
191 
192 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90)
193 {
194 	int res;
195 
196 	switch (reg_c2pmsg_90) {
197 	case SMU_RESP_NONE:
198 		/* The SMU is busy--still executing your command.
199 		 */
200 		res = -ETIME;
201 		break;
202 	case SMU_RESP_OK:
203 		res = 0;
204 		break;
205 	case SMU_RESP_CMD_FAIL:
206 		/* Command completed successfully, but the command
207 		 * status was failure.
208 		 */
209 		res = -EIO;
210 		break;
211 	case SMU_RESP_CMD_UNKNOWN:
212 		/* Unknown command--ignored by the SMU.
213 		 */
214 		res = -EOPNOTSUPP;
215 		break;
216 	case SMU_RESP_CMD_BAD_PREREQ:
217 		/* Valid command--bad prerequisites.
218 		 */
219 		res = -EINVAL;
220 		break;
221 	case SMU_RESP_BUSY_OTHER:
222 		/* The SMU is busy with other commands. The client
223 		 * should retry in 10 us.
224 		 */
225 		res = -EBUSY;
226 		break;
227 	default:
228 		/* Unknown or debug response from the SMU.
229 		 */
230 		res = -EREMOTEIO;
231 		break;
232 	}
233 
234 	return res;
235 }
236 
237 static void __smu_cmn_send_msg(struct smu_context *smu,
238 			       u16 msg,
239 			       u32 param)
240 {
241 	struct amdgpu_device *adev = smu->adev;
242 
243 	WREG32(smu->resp_reg, 0);
244 	WREG32(smu->param_reg, param);
245 	WREG32(smu->msg_reg, msg);
246 }
247 
248 static inline uint32_t __smu_cmn_get_msg_flags(struct smu_context *smu,
249 					       enum smu_message_type msg)
250 {
251 	return smu->message_map[msg].flags;
252 }
253 
254 static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
255 				    enum smu_message_type msg, bool *poll)
256 {
257 	struct amdgpu_device *adev = smu->adev;
258 	uint32_t flags, resp;
259 	bool fed_status, pri;
260 
261 	flags = __smu_cmn_get_msg_flags(smu, msg);
262 	*poll = true;
263 
264 	pri = !!(flags & SMU_MSG_NO_PRECHECK);
265 	/* When there is RAS fatal error, FW won't process non-RAS priority
266 	 * messages. Don't allow any messages other than RAS priority messages.
267 	 */
268 	fed_status = amdgpu_ras_get_fed_status(adev);
269 	if (fed_status) {
270 		if (!(flags & SMU_MSG_RAS_PRI)) {
271 			dev_dbg(adev->dev,
272 				"RAS error detected, skip sending %s",
273 				smu_get_message_name(smu, msg));
274 			return -EACCES;
275 		}
276 	}
277 
278 	if (pri || fed_status) {
279 		/* FW will ignore non-priority messages when a RAS fatal error
280 		 * or reset condition is detected. Hence it is possible that a
281 		 * previous message wouldn't have got response. Allow to
282 		 * continue without polling for response status for priority
283 		 * messages.
284 		 */
285 		resp = RREG32(smu->resp_reg);
286 		dev_dbg(adev->dev,
287 			"Sending priority message %s response status: %x",
288 			smu_get_message_name(smu, msg), resp);
289 		if (resp == 0)
290 			*poll = false;
291 	}
292 
293 	return 0;
294 }
295 
296 static int __smu_cmn_send_debug_msg(struct smu_context *smu,
297 			       u32 msg,
298 			       u32 param)
299 {
300 	struct amdgpu_device *adev = smu->adev;
301 
302 	WREG32(smu->debug_param_reg, param);
303 	WREG32(smu->debug_msg_reg, msg);
304 	WREG32(smu->debug_resp_reg, 0);
305 
306 	return 0;
307 }
308 /**
309  * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status
310  * @smu: pointer to an SMU context
311  * @msg_index: message index
312  * @param: message parameter to send to the SMU
313  *
314  * Send a message to the SMU with the parameter passed. Do not wait
315  * for status/result of the message, thus the "without_waiting".
316  *
317  * Return 0 on success, -errno on error if we weren't able to _send_
318  * the message for some reason. See __smu_cmn_reg2errno() for details
319  * of the -errno.
320  */
321 int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
322 				     uint16_t msg_index,
323 				     uint32_t param)
324 {
325 	struct amdgpu_device *adev = smu->adev;
326 	u32 reg;
327 	int res;
328 
329 	if (adev->no_hw_access)
330 		return 0;
331 
332 	if (smu->smc_fw_state == SMU_FW_HANG) {
333 		dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n");
334 		res = -EREMOTEIO;
335 		goto Out;
336 	}
337 
338 	if (smu->smc_fw_state == SMU_FW_INIT) {
339 		smu->smc_fw_state = SMU_FW_RUNTIME;
340 	} else {
341 		reg = __smu_cmn_poll_stat(smu);
342 		res = __smu_cmn_reg2errno(smu, reg);
343 		if (reg == SMU_RESP_NONE || res == -EREMOTEIO)
344 			goto Out;
345 	}
346 
347 	__smu_cmn_send_msg(smu, msg_index, param);
348 	res = 0;
349 Out:
350 	if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
351 	    res && (res != -ETIME)) {
352 		amdgpu_device_halt(adev);
353 		WARN_ON(1);
354 	}
355 
356 	return res;
357 }
358 
359 /**
360  * smu_cmn_wait_for_response -- wait for response from the SMU
361  * @smu: pointer to an SMU context
362  *
363  * Wait for status from the SMU.
364  *
365  * Return 0 on success, -errno on error, indicating the execution
366  * status and result of the message being waited for. See
367  * __smu_cmn_reg2errno() for details of the -errno.
368  */
369 int smu_cmn_wait_for_response(struct smu_context *smu)
370 {
371 	u32 reg;
372 	int res;
373 
374 	reg = __smu_cmn_poll_stat(smu);
375 	res = __smu_cmn_reg2errno(smu, reg);
376 
377 	if (res == -EREMOTEIO)
378 		smu->smc_fw_state = SMU_FW_HANG;
379 
380 	if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
381 	    res && (res != -ETIME)) {
382 		amdgpu_device_halt(smu->adev);
383 		WARN_ON(1);
384 	}
385 
386 	return res;
387 }
388 
389 /**
390  * smu_cmn_send_smc_msg_with_param -- send a message with parameter
391  * @smu: pointer to an SMU context
392  * @msg: message to send
393  * @param: parameter to send to the SMU
394  * @read_arg: pointer to u32 to return a value from the SMU back
395  *            to the caller
396  *
397  * Send the message @msg with parameter @param to the SMU, wait for
398  * completion of the command, and return back a value from the SMU in
399  * @read_arg pointer.
400  *
401  * Return 0 on success, -errno when a problem is encountered sending
402  * message or receiving reply. If there is a PCI bus recovery or
403  * the destination is a virtual GPU which does not allow this message
404  * type, the message is simply dropped and success is also returned.
405  * See __smu_cmn_reg2errno() for details of the -errno.
406  *
407  * If we weren't able to send the message to the SMU, we also print
408  * the error to the standard log.
409  *
410  * Command completion status is printed only if the -errno is
411  * -EREMOTEIO, indicating that the SMU returned back an
412  * undefined/unknown/unspecified result. All other cases are
413  * well-defined, not printed, but instead given back to the client to
414  * decide what further to do.
415  *
416  * The return value, @read_arg is read back regardless, to give back
417  * more information to the client, which on error would most likely be
418  * @param, but we can't assume that. This also eliminates more
419  * conditionals.
420  */
421 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
422 				    enum smu_message_type msg,
423 				    uint32_t param,
424 				    uint32_t *read_arg)
425 {
426 	struct amdgpu_device *adev = smu->adev;
427 	int res, index;
428 	bool poll = true;
429 	u32 reg;
430 
431 	if (adev->no_hw_access)
432 		return 0;
433 
434 	index = smu_cmn_to_asic_specific_index(smu,
435 					       CMN2ASIC_MAPPING_MSG,
436 					       msg);
437 	if (index < 0)
438 		return index == -EACCES ? 0 : index;
439 
440 	mutex_lock(&smu->message_lock);
441 
442 	if (smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI) {
443 		res = __smu_cmn_ras_filter_msg(smu, msg, &poll);
444 		if (res)
445 			goto Out;
446 	}
447 
448 	if (smu->smc_fw_state == SMU_FW_HANG) {
449 		dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n");
450 		res = -EREMOTEIO;
451 		goto Out;
452 	} else if (smu->smc_fw_state == SMU_FW_INIT) {
453 		/* Ignore initial smu response register value */
454 		poll = false;
455 		smu->smc_fw_state = SMU_FW_RUNTIME;
456 	}
457 
458 	if (poll) {
459 		reg = __smu_cmn_poll_stat(smu);
460 		res = __smu_cmn_reg2errno(smu, reg);
461 		if (reg == SMU_RESP_NONE || res == -EREMOTEIO) {
462 			__smu_cmn_reg_print_error(smu, reg, index, param, msg);
463 			goto Out;
464 		}
465 	}
466 	__smu_cmn_send_msg(smu, (uint16_t) index, param);
467 	reg = __smu_cmn_poll_stat(smu);
468 	res = __smu_cmn_reg2errno(smu, reg);
469 	if (res != 0) {
470 		if (res == -EREMOTEIO)
471 			smu->smc_fw_state = SMU_FW_HANG;
472 		__smu_cmn_reg_print_error(smu, reg, index, param, msg);
473 	}
474 	if (read_arg) {
475 		smu_cmn_read_arg(smu, read_arg);
476 		dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x, readval: 0x%08x\n",
477 			smu_get_message_name(smu, msg), index, param, reg, *read_arg);
478 	} else {
479 		dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n",
480 			smu_get_message_name(smu, msg), index, param, reg);
481 	}
482 Out:
483 	if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
484 		amdgpu_device_halt(adev);
485 		WARN_ON(1);
486 	}
487 
488 	mutex_unlock(&smu->message_lock);
489 	return res;
490 }
491 
492 int smu_cmn_send_smc_msg(struct smu_context *smu,
493 			 enum smu_message_type msg,
494 			 uint32_t *read_arg)
495 {
496 	return smu_cmn_send_smc_msg_with_param(smu,
497 					       msg,
498 					       0,
499 					       read_arg);
500 }
501 
502 int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
503 			 uint32_t msg)
504 {
505 	return __smu_cmn_send_debug_msg(smu, msg, 0);
506 }
507 
508 int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
509 			 uint32_t msg, uint32_t param)
510 {
511 	return __smu_cmn_send_debug_msg(smu, msg, param);
512 }
513 
514 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
515 				   enum smu_cmn2asic_mapping_type type,
516 				   uint32_t index)
517 {
518 	struct cmn2asic_msg_mapping msg_mapping;
519 	struct cmn2asic_mapping mapping;
520 
521 	switch (type) {
522 	case CMN2ASIC_MAPPING_MSG:
523 		if (index >= SMU_MSG_MAX_COUNT ||
524 		    !smu->message_map)
525 			return -EINVAL;
526 
527 		msg_mapping = smu->message_map[index];
528 		if (!msg_mapping.valid_mapping)
529 			return -EINVAL;
530 
531 		if (amdgpu_sriov_vf(smu->adev) &&
532 		    !(msg_mapping.flags & SMU_MSG_VF_FLAG))
533 			return -EACCES;
534 
535 		return msg_mapping.map_to;
536 
537 	case CMN2ASIC_MAPPING_CLK:
538 		if (index >= SMU_CLK_COUNT ||
539 		    !smu->clock_map)
540 			return -EINVAL;
541 
542 		mapping = smu->clock_map[index];
543 		if (!mapping.valid_mapping)
544 			return -EINVAL;
545 
546 		return mapping.map_to;
547 
548 	case CMN2ASIC_MAPPING_FEATURE:
549 		if (index >= SMU_FEATURE_COUNT ||
550 		    !smu->feature_map)
551 			return -EINVAL;
552 
553 		mapping = smu->feature_map[index];
554 		if (!mapping.valid_mapping)
555 			return -EINVAL;
556 
557 		return mapping.map_to;
558 
559 	case CMN2ASIC_MAPPING_TABLE:
560 		if (index >= SMU_TABLE_COUNT ||
561 		    !smu->table_map)
562 			return -EINVAL;
563 
564 		mapping = smu->table_map[index];
565 		if (!mapping.valid_mapping)
566 			return -EINVAL;
567 
568 		return mapping.map_to;
569 
570 	case CMN2ASIC_MAPPING_PWR:
571 		if (index >= SMU_POWER_SOURCE_COUNT ||
572 		    !smu->pwr_src_map)
573 			return -EINVAL;
574 
575 		mapping = smu->pwr_src_map[index];
576 		if (!mapping.valid_mapping)
577 			return -EINVAL;
578 
579 		return mapping.map_to;
580 
581 	case CMN2ASIC_MAPPING_WORKLOAD:
582 		if (index >= PP_SMC_POWER_PROFILE_COUNT ||
583 		    !smu->workload_map)
584 			return -EINVAL;
585 
586 		mapping = smu->workload_map[index];
587 		if (!mapping.valid_mapping)
588 			return -ENOTSUPP;
589 
590 		return mapping.map_to;
591 
592 	default:
593 		return -EINVAL;
594 	}
595 }
596 
597 int smu_cmn_feature_is_supported(struct smu_context *smu,
598 				 enum smu_feature_mask mask)
599 {
600 	struct smu_feature *feature = &smu->smu_feature;
601 	int feature_id;
602 
603 	feature_id = smu_cmn_to_asic_specific_index(smu,
604 						    CMN2ASIC_MAPPING_FEATURE,
605 						    mask);
606 	if (feature_id < 0)
607 		return 0;
608 
609 	WARN_ON(feature_id > feature->feature_num);
610 
611 	return test_bit(feature_id, feature->supported);
612 }
613 
614 static int __smu_get_enabled_features(struct smu_context *smu,
615 			       uint64_t *enabled_features)
616 {
617 	return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);
618 }
619 
620 int smu_cmn_feature_is_enabled(struct smu_context *smu,
621 			       enum smu_feature_mask mask)
622 {
623 	struct amdgpu_device *adev = smu->adev;
624 	uint64_t enabled_features;
625 	int feature_id;
626 
627 	if (__smu_get_enabled_features(smu, &enabled_features)) {
628 		dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
629 		return 0;
630 	}
631 
632 	/*
633 	 * For Renoir and Cyan Skillfish, they are assumed to have all features
634 	 * enabled. Also considering they have no feature_map available, the
635 	 * check here can avoid unwanted feature_map check below.
636 	 */
637 	if (enabled_features == ULLONG_MAX)
638 		return 1;
639 
640 	feature_id = smu_cmn_to_asic_specific_index(smu,
641 						    CMN2ASIC_MAPPING_FEATURE,
642 						    mask);
643 	if (feature_id < 0)
644 		return 0;
645 
646 	return test_bit(feature_id, (unsigned long *)&enabled_features);
647 }
648 
649 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
650 				enum smu_clk_type clk_type)
651 {
652 	enum smu_feature_mask feature_id = 0;
653 
654 	switch (clk_type) {
655 	case SMU_MCLK:
656 	case SMU_UCLK:
657 		feature_id = SMU_FEATURE_DPM_UCLK_BIT;
658 		break;
659 	case SMU_GFXCLK:
660 	case SMU_SCLK:
661 		feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
662 		break;
663 	case SMU_SOCCLK:
664 		feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
665 		break;
666 	case SMU_VCLK:
667 	case SMU_VCLK1:
668 		feature_id = SMU_FEATURE_DPM_VCLK_BIT;
669 		break;
670 	case SMU_DCLK:
671 	case SMU_DCLK1:
672 		feature_id = SMU_FEATURE_DPM_DCLK_BIT;
673 		break;
674 	case SMU_FCLK:
675 		feature_id = SMU_FEATURE_DPM_FCLK_BIT;
676 		break;
677 	default:
678 		return true;
679 	}
680 
681 	if (!smu_cmn_feature_is_enabled(smu, feature_id))
682 		return false;
683 
684 	return true;
685 }
686 
687 int smu_cmn_get_enabled_mask(struct smu_context *smu,
688 			     uint64_t *feature_mask)
689 {
690 	uint32_t *feature_mask_high;
691 	uint32_t *feature_mask_low;
692 	int ret = 0, index = 0;
693 
694 	if (!feature_mask)
695 		return -EINVAL;
696 
697 	feature_mask_low = &((uint32_t *)feature_mask)[0];
698 	feature_mask_high = &((uint32_t *)feature_mask)[1];
699 
700 	index = smu_cmn_to_asic_specific_index(smu,
701 						CMN2ASIC_MAPPING_MSG,
702 						SMU_MSG_GetEnabledSmuFeatures);
703 	if (index > 0) {
704 		ret = smu_cmn_send_smc_msg_with_param(smu,
705 						      SMU_MSG_GetEnabledSmuFeatures,
706 						      0,
707 						      feature_mask_low);
708 		if (ret)
709 			return ret;
710 
711 		ret = smu_cmn_send_smc_msg_with_param(smu,
712 						      SMU_MSG_GetEnabledSmuFeatures,
713 						      1,
714 						      feature_mask_high);
715 	} else {
716 		ret = smu_cmn_send_smc_msg(smu,
717 					   SMU_MSG_GetEnabledSmuFeaturesHigh,
718 					   feature_mask_high);
719 		if (ret)
720 			return ret;
721 
722 		ret = smu_cmn_send_smc_msg(smu,
723 					   SMU_MSG_GetEnabledSmuFeaturesLow,
724 					   feature_mask_low);
725 	}
726 
727 	return ret;
728 }
729 
730 uint64_t smu_cmn_get_indep_throttler_status(
731 					const unsigned long dep_status,
732 					const uint8_t *throttler_map)
733 {
734 	uint64_t indep_status = 0;
735 	uint8_t dep_bit = 0;
736 
737 	for_each_set_bit(dep_bit, &dep_status, 32)
738 		indep_status |= 1ULL << throttler_map[dep_bit];
739 
740 	return indep_status;
741 }
742 
743 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
744 					uint64_t feature_mask,
745 					bool enabled)
746 {
747 	int ret = 0;
748 
749 	if (enabled) {
750 		ret = smu_cmn_send_smc_msg_with_param(smu,
751 						  SMU_MSG_EnableSmuFeaturesLow,
752 						  lower_32_bits(feature_mask),
753 						  NULL);
754 		if (ret)
755 			return ret;
756 		ret = smu_cmn_send_smc_msg_with_param(smu,
757 						  SMU_MSG_EnableSmuFeaturesHigh,
758 						  upper_32_bits(feature_mask),
759 						  NULL);
760 	} else {
761 		ret = smu_cmn_send_smc_msg_with_param(smu,
762 						  SMU_MSG_DisableSmuFeaturesLow,
763 						  lower_32_bits(feature_mask),
764 						  NULL);
765 		if (ret)
766 			return ret;
767 		ret = smu_cmn_send_smc_msg_with_param(smu,
768 						  SMU_MSG_DisableSmuFeaturesHigh,
769 						  upper_32_bits(feature_mask),
770 						  NULL);
771 	}
772 
773 	return ret;
774 }
775 
776 int smu_cmn_feature_set_enabled(struct smu_context *smu,
777 				enum smu_feature_mask mask,
778 				bool enable)
779 {
780 	int feature_id;
781 
782 	feature_id = smu_cmn_to_asic_specific_index(smu,
783 						    CMN2ASIC_MAPPING_FEATURE,
784 						    mask);
785 	if (feature_id < 0)
786 		return -EINVAL;
787 
788 	return smu_cmn_feature_update_enable_state(smu,
789 					       1ULL << feature_id,
790 					       enable);
791 }
792 
793 #undef __SMU_DUMMY_MAP
794 #define __SMU_DUMMY_MAP(fea)	#fea
795 static const char *__smu_feature_names[] = {
796 	SMU_FEATURE_MASKS
797 };
798 
799 static const char *smu_get_feature_name(struct smu_context *smu,
800 					enum smu_feature_mask feature)
801 {
802 	if (feature >= SMU_FEATURE_COUNT)
803 		return "unknown smu feature";
804 	return __smu_feature_names[feature];
805 }
806 
807 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
808 				   char *buf)
809 {
810 	int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
811 	uint64_t feature_mask;
812 	int i, feature_index;
813 	uint32_t count = 0;
814 	size_t size = 0;
815 
816 	if (__smu_get_enabled_features(smu, &feature_mask))
817 		return 0;
818 
819 	size =  sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
820 			upper_32_bits(feature_mask), lower_32_bits(feature_mask));
821 
822 	memset(sort_feature, -1, sizeof(sort_feature));
823 
824 	for (i = 0; i < SMU_FEATURE_COUNT; i++) {
825 		feature_index = smu_cmn_to_asic_specific_index(smu,
826 							       CMN2ASIC_MAPPING_FEATURE,
827 							       i);
828 		if (feature_index < 0)
829 			continue;
830 
831 		sort_feature[feature_index] = i;
832 	}
833 
834 	size += sysfs_emit_at(buf, size, "%-2s. %-20s  %-3s : %-s\n",
835 			"No", "Feature", "Bit", "State");
836 
837 	for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) {
838 		if (sort_feature[feature_index] < 0)
839 			continue;
840 
841 		size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
842 				count++,
843 				smu_get_feature_name(smu, sort_feature[feature_index]),
844 				feature_index,
845 				!!test_bit(feature_index, (unsigned long *)&feature_mask) ?
846 				"enabled" : "disabled");
847 	}
848 
849 	return size;
850 }
851 
852 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
853 				uint64_t new_mask)
854 {
855 	int ret = 0;
856 	uint64_t feature_mask;
857 	uint64_t feature_2_enabled = 0;
858 	uint64_t feature_2_disabled = 0;
859 
860 	ret = __smu_get_enabled_features(smu, &feature_mask);
861 	if (ret)
862 		return ret;
863 
864 	feature_2_enabled  = ~feature_mask & new_mask;
865 	feature_2_disabled = feature_mask & ~new_mask;
866 
867 	if (feature_2_enabled) {
868 		ret = smu_cmn_feature_update_enable_state(smu,
869 							  feature_2_enabled,
870 							  true);
871 		if (ret)
872 			return ret;
873 	}
874 	if (feature_2_disabled) {
875 		ret = smu_cmn_feature_update_enable_state(smu,
876 							  feature_2_disabled,
877 							  false);
878 		if (ret)
879 			return ret;
880 	}
881 
882 	return ret;
883 }
884 
885 /**
886  * smu_cmn_disable_all_features_with_exception - disable all dpm features
887  *                                               except this specified by
888  *                                               @mask
889  *
890  * @smu:               smu_context pointer
891  * @mask:              the dpm feature which should not be disabled
892  *                     SMU_FEATURE_COUNT: no exception, all dpm features
893  *                     to disable
894  *
895  * Returns:
896  * 0 on success or a negative error code on failure.
897  */
898 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
899 						enum smu_feature_mask mask)
900 {
901 	uint64_t features_to_disable = U64_MAX;
902 	int skipped_feature_id;
903 
904 	if (mask != SMU_FEATURE_COUNT) {
905 		skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
906 								    CMN2ASIC_MAPPING_FEATURE,
907 								    mask);
908 		if (skipped_feature_id < 0)
909 			return -EINVAL;
910 
911 		features_to_disable &= ~(1ULL << skipped_feature_id);
912 	}
913 
914 	return smu_cmn_feature_update_enable_state(smu,
915 						   features_to_disable,
916 						   0);
917 }
918 
919 int smu_cmn_get_smc_version(struct smu_context *smu,
920 			    uint32_t *if_version,
921 			    uint32_t *smu_version)
922 {
923 	int ret = 0;
924 
925 	if (!if_version && !smu_version)
926 		return -EINVAL;
927 
928 	if (smu->smc_fw_if_version && smu->smc_fw_version)
929 	{
930 		if (if_version)
931 			*if_version = smu->smc_fw_if_version;
932 
933 		if (smu_version)
934 			*smu_version = smu->smc_fw_version;
935 
936 		return 0;
937 	}
938 
939 	if (if_version) {
940 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
941 		if (ret)
942 			return ret;
943 
944 		smu->smc_fw_if_version = *if_version;
945 	}
946 
947 	if (smu_version) {
948 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
949 		if (ret)
950 			return ret;
951 
952 		smu->smc_fw_version = *smu_version;
953 	}
954 
955 	return ret;
956 }
957 
958 int smu_cmn_update_table(struct smu_context *smu,
959 			 enum smu_table_id table_index,
960 			 int argument,
961 			 void *table_data,
962 			 bool drv2smu)
963 {
964 	struct smu_table_context *smu_table = &smu->smu_table;
965 	struct amdgpu_device *adev = smu->adev;
966 	struct smu_table *table = &smu_table->driver_table;
967 	int table_id = smu_cmn_to_asic_specific_index(smu,
968 						      CMN2ASIC_MAPPING_TABLE,
969 						      table_index);
970 	uint32_t table_size;
971 	int ret = 0;
972 	if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
973 		return -EINVAL;
974 
975 	table_size = smu_table->tables[table_index].size;
976 
977 	if (drv2smu) {
978 		memcpy(table->cpu_addr, table_data, table_size);
979 		/*
980 		 * Flush hdp cache: to guard the content seen by
981 		 * GPU is consitent with CPU.
982 		 */
983 		amdgpu_asic_flush_hdp(adev, NULL);
984 	}
985 
986 	ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
987 					  SMU_MSG_TransferTableDram2Smu :
988 					  SMU_MSG_TransferTableSmu2Dram,
989 					  table_id | ((argument & 0xFFFF) << 16),
990 					  NULL);
991 	if (ret)
992 		return ret;
993 
994 	if (!drv2smu) {
995 		amdgpu_asic_invalidate_hdp(adev, NULL);
996 		memcpy(table_data, table->cpu_addr, table_size);
997 	}
998 
999 	return 0;
1000 }
1001 
1002 int smu_cmn_write_watermarks_table(struct smu_context *smu)
1003 {
1004 	void *watermarks_table = smu->smu_table.watermarks_table;
1005 
1006 	if (!watermarks_table)
1007 		return -EINVAL;
1008 
1009 	return smu_cmn_update_table(smu,
1010 				    SMU_TABLE_WATERMARKS,
1011 				    0,
1012 				    watermarks_table,
1013 				    true);
1014 }
1015 
1016 int smu_cmn_write_pptable(struct smu_context *smu)
1017 {
1018 	void *pptable = smu->smu_table.driver_pptable;
1019 
1020 	return smu_cmn_update_table(smu,
1021 				    SMU_TABLE_PPTABLE,
1022 				    0,
1023 				    pptable,
1024 				    true);
1025 }
1026 
1027 int smu_cmn_get_metrics_table(struct smu_context *smu,
1028 			      void *metrics_table,
1029 			      bool bypass_cache)
1030 {
1031 	struct smu_table_context *smu_table = &smu->smu_table;
1032 	uint32_t table_size =
1033 		smu_table->tables[SMU_TABLE_SMU_METRICS].size;
1034 	int ret = 0;
1035 
1036 	if (bypass_cache ||
1037 	    !smu_table->metrics_time ||
1038 	    time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
1039 		ret = smu_cmn_update_table(smu,
1040 				       SMU_TABLE_SMU_METRICS,
1041 				       0,
1042 				       smu_table->metrics_table,
1043 				       false);
1044 		if (ret) {
1045 			dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
1046 			return ret;
1047 		}
1048 		smu_table->metrics_time = jiffies;
1049 	}
1050 
1051 	if (metrics_table)
1052 		memcpy(metrics_table, smu_table->metrics_table, table_size);
1053 
1054 	return 0;
1055 }
1056 
1057 int smu_cmn_get_combo_pptable(struct smu_context *smu)
1058 {
1059 	void *pptable = smu->smu_table.combo_pptable;
1060 
1061 	return smu_cmn_update_table(smu,
1062 				    SMU_TABLE_COMBO_PPTABLE,
1063 				    0,
1064 				    pptable,
1065 				    false);
1066 }
1067 
1068 int smu_cmn_set_mp1_state(struct smu_context *smu,
1069 			  enum pp_mp1_state mp1_state)
1070 {
1071 	enum smu_message_type msg;
1072 	int ret;
1073 
1074 	switch (mp1_state) {
1075 	case PP_MP1_STATE_SHUTDOWN:
1076 		msg = SMU_MSG_PrepareMp1ForShutdown;
1077 		break;
1078 	case PP_MP1_STATE_UNLOAD:
1079 		msg = SMU_MSG_PrepareMp1ForUnload;
1080 		break;
1081 	case PP_MP1_STATE_RESET:
1082 		msg = SMU_MSG_PrepareMp1ForReset;
1083 		break;
1084 	case PP_MP1_STATE_NONE:
1085 	default:
1086 		return 0;
1087 	}
1088 
1089 	ret = smu_cmn_send_smc_msg(smu, msg, NULL);
1090 	if (ret)
1091 		dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1092 
1093 	return ret;
1094 }
1095 
1096 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
1097 {
1098 	struct pci_dev *p = NULL;
1099 	bool snd_driver_loaded;
1100 
1101 	/*
1102 	 * If the ASIC comes with no audio function, we always assume
1103 	 * it is "enabled".
1104 	 */
1105 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
1106 			adev->pdev->bus->number, 1);
1107 	if (!p)
1108 		return true;
1109 
1110 	snd_driver_loaded = pci_is_enabled(p) ? true : false;
1111 
1112 	pci_dev_put(p);
1113 
1114 	return snd_driver_loaded;
1115 }
1116 
1117 static char *smu_soc_policy_get_desc(struct smu_dpm_policy *policy, int level)
1118 {
1119 	if (level < 0 || !(policy->level_mask & BIT(level)))
1120 		return "Invalid";
1121 
1122 	switch (level) {
1123 	case SOC_PSTATE_DEFAULT:
1124 		return "soc_pstate_default";
1125 	case SOC_PSTATE_0:
1126 		return "soc_pstate_0";
1127 	case SOC_PSTATE_1:
1128 		return "soc_pstate_1";
1129 	case SOC_PSTATE_2:
1130 		return "soc_pstate_2";
1131 	}
1132 
1133 	return "Invalid";
1134 }
1135 
1136 static struct smu_dpm_policy_desc pstate_policy_desc = {
1137 	.name = STR_SOC_PSTATE_POLICY,
1138 	.get_desc = smu_soc_policy_get_desc,
1139 };
1140 
1141 void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy)
1142 {
1143 	policy->desc = &pstate_policy_desc;
1144 }
1145 
1146 static char *smu_xgmi_plpd_policy_get_desc(struct smu_dpm_policy *policy,
1147 					   int level)
1148 {
1149 	if (level < 0 || !(policy->level_mask & BIT(level)))
1150 		return "Invalid";
1151 
1152 	switch (level) {
1153 	case XGMI_PLPD_DISALLOW:
1154 		return "plpd_disallow";
1155 	case XGMI_PLPD_DEFAULT:
1156 		return "plpd_default";
1157 	case XGMI_PLPD_OPTIMIZED:
1158 		return "plpd_optimized";
1159 	}
1160 
1161 	return "Invalid";
1162 }
1163 
1164 static struct smu_dpm_policy_desc xgmi_plpd_policy_desc = {
1165 	.name = STR_XGMI_PLPD_POLICY,
1166 	.get_desc = smu_xgmi_plpd_policy_get_desc,
1167 };
1168 
1169 void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)
1170 {
1171 	policy->desc = &xgmi_plpd_policy_desc;
1172 }
1173 
1174 void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
1175 				       u32 workload_mask,
1176 				       u32 *backend_workload_mask)
1177 {
1178 	int workload_type;
1179 	u32 profile_mode;
1180 
1181 	*backend_workload_mask = 0;
1182 
1183 	for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {
1184 		if (!(workload_mask & (1 << profile_mode)))
1185 			continue;
1186 
1187 		/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1188 		workload_type = smu_cmn_to_asic_specific_index(smu,
1189 							       CMN2ASIC_MAPPING_WORKLOAD,
1190 							       profile_mode);
1191 
1192 		if (workload_type < 0)
1193 			continue;
1194 
1195 		*backend_workload_mask |= 1 << workload_type;
1196 	}
1197 }
1198