xref: /linux/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #define SWSMU_CODE_LAYER_L4
24 
25 #include "amdgpu.h"
26 #include "amdgpu_smu.h"
27 #include "smu_cmn.h"
28 #include "soc15_common.h"
29 
30 /*
31  * DO NOT use these for err/warn/info/debug messages.
32  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
33  * They are more MGPU friendly.
34  */
35 #undef pr_err
36 #undef pr_warn
37 #undef pr_info
38 #undef pr_debug
39 
40 #define MP1_C2PMSG_90__CONTENT_MASK                                                                    0xFFFFFFFFL
41 
42 const int link_speed[] = {25, 50, 80, 160, 320, 640};
43 
44 #undef __SMU_DUMMY_MAP
45 #define __SMU_DUMMY_MAP(type)	#type
46 static const char * const __smu_message_names[] = {
47 	SMU_MESSAGE_TYPES
48 };
49 
50 #define smu_cmn_call_asic_func(intf, smu, args...)                             \
51 	((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ?                          \
52 				     (smu)->ppt_funcs->intf(smu, ##args) :     \
53 				     -ENOTSUPP) :                              \
54 			    -EINVAL)
55 
smu_get_message_name(struct smu_context * smu,enum smu_message_type type)56 static const char *smu_get_message_name(struct smu_context *smu,
57 					enum smu_message_type type)
58 {
59 	if (type >= SMU_MSG_MAX_COUNT)
60 		return "unknown smu message";
61 
62 	return __smu_message_names[type];
63 }
64 
smu_cmn_read_arg(struct smu_context * smu,uint32_t * arg)65 static void smu_cmn_read_arg(struct smu_context *smu,
66 			     uint32_t *arg)
67 {
68 	struct amdgpu_device *adev = smu->adev;
69 
70 	*arg = RREG32(smu->param_reg);
71 }
72 
73 /* Redefine the SMU error codes here.
74  *
75  * Note that these definitions are redundant and should be removed
76  * when the SMU has exported a unified header file containing these
77  * macros, which header file we can just include and use the SMU's
78  * macros. At the moment, these error codes are defined by the SMU
79  * per-ASIC unfortunately, yet we're a one driver for all ASICs.
80  */
81 #define SMU_RESP_NONE           0
82 #define SMU_RESP_OK             1
83 #define SMU_RESP_CMD_FAIL       0xFF
84 #define SMU_RESP_CMD_UNKNOWN    0xFE
85 #define SMU_RESP_CMD_BAD_PREREQ 0xFD
86 #define SMU_RESP_BUSY_OTHER     0xFC
87 #define SMU_RESP_DEBUG_END      0xFB
88 
89 #define SMU_RESP_UNEXP (~0U)
90 /**
91  * __smu_cmn_poll_stat -- poll for a status from the SMU
92  * @smu: a pointer to SMU context
93  *
94  * Returns the status of the SMU, which could be,
95  *    0, the SMU is busy with your command;
96  *    1, execution status: success, execution result: success;
97  * 0xFF, execution status: success, execution result: failure;
98  * 0xFE, unknown command;
99  * 0xFD, valid command, but bad (command) prerequisites;
100  * 0xFC, the command was rejected as the SMU is busy;
101  * 0xFB, "SMC_Result_DebugDataDumpEnd".
102  *
103  * The values here are not defined by macros, because I'd rather we
104  * include a single header file which defines them, which is
105  * maintained by the SMU FW team, so that we're impervious to firmware
106  * changes. At the moment those values are defined in various header
107  * files, one for each ASIC, yet here we're a single ASIC-agnostic
108  * interface. Such a change can be followed-up by a subsequent patch.
109  */
__smu_cmn_poll_stat(struct smu_context * smu)110 static u32 __smu_cmn_poll_stat(struct smu_context *smu)
111 {
112 	struct amdgpu_device *adev = smu->adev;
113 	int timeout = adev->usec_timeout * 20;
114 	u32 reg;
115 
116 	for ( ; timeout > 0; timeout--) {
117 		reg = RREG32(smu->resp_reg);
118 		if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
119 			break;
120 
121 		udelay(1);
122 	}
123 
124 	return reg;
125 }
126 
__smu_cmn_reg_print_error(struct smu_context * smu,u32 reg_c2pmsg_90,int msg_index,u32 param,enum smu_message_type msg)127 static void __smu_cmn_reg_print_error(struct smu_context *smu,
128 				      u32 reg_c2pmsg_90,
129 				      int msg_index,
130 				      u32 param,
131 				      enum smu_message_type msg)
132 {
133 	struct amdgpu_device *adev = smu->adev;
134 	const char *message = smu_get_message_name(smu, msg);
135 	u32 msg_idx, prm;
136 
137 	switch (reg_c2pmsg_90) {
138 	case SMU_RESP_NONE: {
139 		msg_idx = RREG32(smu->msg_reg);
140 		prm     = RREG32(smu->param_reg);
141 		dev_err_ratelimited(adev->dev,
142 				    "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
143 				    msg_idx, prm);
144 		}
145 		break;
146 	case SMU_RESP_OK:
147 		/* The SMU executed the command. It completed with a
148 		 * successful result.
149 		 */
150 		break;
151 	case SMU_RESP_CMD_FAIL:
152 		/* The SMU executed the command. It completed with an
153 		 * unsuccessful result.
154 		 */
155 		break;
156 	case SMU_RESP_CMD_UNKNOWN:
157 		dev_err_ratelimited(adev->dev,
158 				    "SMU: unknown command: index:%d param:0x%08X message:%s",
159 				    msg_index, param, message);
160 		break;
161 	case SMU_RESP_CMD_BAD_PREREQ:
162 		dev_err_ratelimited(adev->dev,
163 				    "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s",
164 				    msg_index, param, message);
165 		break;
166 	case SMU_RESP_BUSY_OTHER:
167 		dev_err_ratelimited(adev->dev,
168 				    "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
169 				    msg_index, param, message);
170 		break;
171 	case SMU_RESP_DEBUG_END:
172 		dev_err_ratelimited(adev->dev,
173 				    "SMU: I'm debugging!");
174 		break;
175 	case SMU_RESP_UNEXP:
176 		if (amdgpu_device_bus_status_check(smu->adev)) {
177 			/* print error immediately if device is off the bus */
178 			dev_err(adev->dev,
179 				"SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
180 				reg_c2pmsg_90, msg_index, param, message);
181 			break;
182 		}
183 		fallthrough;
184 	default:
185 		dev_err_ratelimited(adev->dev,
186 				    "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
187 				    reg_c2pmsg_90, msg_index, param, message);
188 		break;
189 	}
190 }
191 
__smu_cmn_reg2errno(struct smu_context * smu,u32 reg_c2pmsg_90)192 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90)
193 {
194 	int res;
195 
196 	switch (reg_c2pmsg_90) {
197 	case SMU_RESP_NONE:
198 		/* The SMU is busy--still executing your command.
199 		 */
200 		res = -ETIME;
201 		break;
202 	case SMU_RESP_OK:
203 		res = 0;
204 		break;
205 	case SMU_RESP_CMD_FAIL:
206 		/* Command completed successfully, but the command
207 		 * status was failure.
208 		 */
209 		res = -EIO;
210 		break;
211 	case SMU_RESP_CMD_UNKNOWN:
212 		/* Unknown command--ignored by the SMU.
213 		 */
214 		res = -EOPNOTSUPP;
215 		break;
216 	case SMU_RESP_CMD_BAD_PREREQ:
217 		/* Valid command--bad prerequisites.
218 		 */
219 		res = -EINVAL;
220 		break;
221 	case SMU_RESP_BUSY_OTHER:
222 		/* The SMU is busy with other commands. The client
223 		 * should retry in 10 us.
224 		 */
225 		res = -EBUSY;
226 		break;
227 	default:
228 		/* Unknown or debug response from the SMU.
229 		 */
230 		res = -EREMOTEIO;
231 		break;
232 	}
233 
234 	return res;
235 }
236 
__smu_cmn_send_msg(struct smu_context * smu,u16 msg,u32 param)237 static void __smu_cmn_send_msg(struct smu_context *smu,
238 			       u16 msg,
239 			       u32 param)
240 {
241 	struct amdgpu_device *adev = smu->adev;
242 
243 	WREG32(smu->resp_reg, 0);
244 	WREG32(smu->param_reg, param);
245 	WREG32(smu->msg_reg, msg);
246 }
247 
__smu_cmn_get_msg_flags(struct smu_context * smu,enum smu_message_type msg)248 static inline uint32_t __smu_cmn_get_msg_flags(struct smu_context *smu,
249 					       enum smu_message_type msg)
250 {
251 	return smu->message_map[msg].flags;
252 }
253 
__smu_cmn_ras_filter_msg(struct smu_context * smu,enum smu_message_type msg,bool * poll)254 static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
255 				    enum smu_message_type msg, bool *poll)
256 {
257 	struct amdgpu_device *adev = smu->adev;
258 	uint32_t flags, resp;
259 	bool fed_status;
260 
261 	flags = __smu_cmn_get_msg_flags(smu, msg);
262 	*poll = true;
263 
264 	/* When there is RAS fatal error, FW won't process non-RAS priority
265 	 * messages. Don't allow any messages other than RAS priority messages.
266 	 */
267 	fed_status = amdgpu_ras_get_fed_status(adev);
268 	if (fed_status) {
269 		if (!(flags & SMU_MSG_RAS_PRI)) {
270 			dev_dbg(adev->dev,
271 				"RAS error detected, skip sending %s",
272 				smu_get_message_name(smu, msg));
273 			return -EACCES;
274 		}
275 
276 		/* FW will ignore non-priority messages when a RAS fatal error
277 		 * is detected. Hence it is possible that a previous message
278 		 * wouldn't have got response. Allow to continue without polling
279 		 * for response status for priority messages.
280 		 */
281 		resp = RREG32(smu->resp_reg);
282 		dev_dbg(adev->dev,
283 			"Sending RAS priority message %s response status: %x",
284 			smu_get_message_name(smu, msg), resp);
285 		if (resp == 0)
286 			*poll = false;
287 	}
288 
289 	return 0;
290 }
291 
__smu_cmn_send_debug_msg(struct smu_context * smu,u32 msg,u32 param)292 static int __smu_cmn_send_debug_msg(struct smu_context *smu,
293 			       u32 msg,
294 			       u32 param)
295 {
296 	struct amdgpu_device *adev = smu->adev;
297 
298 	WREG32(smu->debug_param_reg, param);
299 	WREG32(smu->debug_msg_reg, msg);
300 	WREG32(smu->debug_resp_reg, 0);
301 
302 	return 0;
303 }
304 /**
305  * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status
306  * @smu: pointer to an SMU context
307  * @msg_index: message index
308  * @param: message parameter to send to the SMU
309  *
310  * Send a message to the SMU with the parameter passed. Do not wait
311  * for status/result of the message, thus the "without_waiting".
312  *
313  * Return 0 on success, -errno on error if we weren't able to _send_
314  * the message for some reason. See __smu_cmn_reg2errno() for details
315  * of the -errno.
316  */
smu_cmn_send_msg_without_waiting(struct smu_context * smu,uint16_t msg_index,uint32_t param)317 int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
318 				     uint16_t msg_index,
319 				     uint32_t param)
320 {
321 	struct amdgpu_device *adev = smu->adev;
322 	u32 reg;
323 	int res;
324 
325 	if (adev->no_hw_access)
326 		return 0;
327 
328 	if (smu->smc_fw_state == SMU_FW_HANG) {
329 		dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n");
330 		res = -EREMOTEIO;
331 		goto Out;
332 	}
333 
334 	if (smu->smc_fw_state == SMU_FW_INIT) {
335 		smu->smc_fw_state = SMU_FW_RUNTIME;
336 	} else {
337 		reg = __smu_cmn_poll_stat(smu);
338 		res = __smu_cmn_reg2errno(smu, reg);
339 		if (reg == SMU_RESP_NONE || res == -EREMOTEIO)
340 			goto Out;
341 	}
342 
343 	__smu_cmn_send_msg(smu, msg_index, param);
344 	res = 0;
345 Out:
346 	if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
347 	    res && (res != -ETIME)) {
348 		amdgpu_device_halt(adev);
349 		WARN_ON(1);
350 	}
351 
352 	return res;
353 }
354 
355 /**
356  * smu_cmn_wait_for_response -- wait for response from the SMU
357  * @smu: pointer to an SMU context
358  *
359  * Wait for status from the SMU.
360  *
361  * Return 0 on success, -errno on error, indicating the execution
362  * status and result of the message being waited for. See
363  * __smu_cmn_reg2errno() for details of the -errno.
364  */
smu_cmn_wait_for_response(struct smu_context * smu)365 int smu_cmn_wait_for_response(struct smu_context *smu)
366 {
367 	u32 reg;
368 	int res;
369 
370 	reg = __smu_cmn_poll_stat(smu);
371 	res = __smu_cmn_reg2errno(smu, reg);
372 
373 	if (res == -EREMOTEIO)
374 		smu->smc_fw_state = SMU_FW_HANG;
375 
376 	if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
377 	    res && (res != -ETIME)) {
378 		amdgpu_device_halt(smu->adev);
379 		WARN_ON(1);
380 	}
381 
382 	return res;
383 }
384 
385 /**
386  * smu_cmn_send_smc_msg_with_param -- send a message with parameter
387  * @smu: pointer to an SMU context
388  * @msg: message to send
389  * @param: parameter to send to the SMU
390  * @read_arg: pointer to u32 to return a value from the SMU back
391  *            to the caller
392  *
393  * Send the message @msg with parameter @param to the SMU, wait for
394  * completion of the command, and return back a value from the SMU in
395  * @read_arg pointer.
396  *
397  * Return 0 on success, -errno when a problem is encountered sending
398  * message or receiving reply. If there is a PCI bus recovery or
399  * the destination is a virtual GPU which does not allow this message
400  * type, the message is simply dropped and success is also returned.
401  * See __smu_cmn_reg2errno() for details of the -errno.
402  *
403  * If we weren't able to send the message to the SMU, we also print
404  * the error to the standard log.
405  *
406  * Command completion status is printed only if the -errno is
407  * -EREMOTEIO, indicating that the SMU returned back an
408  * undefined/unknown/unspecified result. All other cases are
409  * well-defined, not printed, but instead given back to the client to
410  * decide what further to do.
411  *
412  * The return value, @read_arg is read back regardless, to give back
413  * more information to the client, which on error would most likely be
414  * @param, but we can't assume that. This also eliminates more
415  * conditionals.
416  */
smu_cmn_send_smc_msg_with_param(struct smu_context * smu,enum smu_message_type msg,uint32_t param,uint32_t * read_arg)417 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
418 				    enum smu_message_type msg,
419 				    uint32_t param,
420 				    uint32_t *read_arg)
421 {
422 	struct amdgpu_device *adev = smu->adev;
423 	int res, index;
424 	bool poll = true;
425 	u32 reg;
426 
427 	if (adev->no_hw_access)
428 		return 0;
429 
430 	index = smu_cmn_to_asic_specific_index(smu,
431 					       CMN2ASIC_MAPPING_MSG,
432 					       msg);
433 	if (index < 0)
434 		return index == -EACCES ? 0 : index;
435 
436 	mutex_lock(&smu->message_lock);
437 
438 	if (smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI) {
439 		res = __smu_cmn_ras_filter_msg(smu, msg, &poll);
440 		if (res)
441 			goto Out;
442 	}
443 
444 	if (smu->smc_fw_state == SMU_FW_HANG) {
445 		dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n");
446 		res = -EREMOTEIO;
447 		goto Out;
448 	} else if (smu->smc_fw_state == SMU_FW_INIT) {
449 		/* Ignore initial smu response register value */
450 		poll = false;
451 		smu->smc_fw_state = SMU_FW_RUNTIME;
452 	}
453 
454 	if (poll) {
455 		reg = __smu_cmn_poll_stat(smu);
456 		res = __smu_cmn_reg2errno(smu, reg);
457 		if (reg == SMU_RESP_NONE || res == -EREMOTEIO) {
458 			__smu_cmn_reg_print_error(smu, reg, index, param, msg);
459 			goto Out;
460 		}
461 	}
462 	__smu_cmn_send_msg(smu, (uint16_t) index, param);
463 	reg = __smu_cmn_poll_stat(smu);
464 	res = __smu_cmn_reg2errno(smu, reg);
465 	if (res != 0) {
466 		if (res == -EREMOTEIO)
467 			smu->smc_fw_state = SMU_FW_HANG;
468 		__smu_cmn_reg_print_error(smu, reg, index, param, msg);
469 	}
470 	if (read_arg) {
471 		smu_cmn_read_arg(smu, read_arg);
472 		dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x, readval: 0x%08x\n",
473 			smu_get_message_name(smu, msg), index, param, reg, *read_arg);
474 	} else {
475 		dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n",
476 			smu_get_message_name(smu, msg), index, param, reg);
477 	}
478 Out:
479 	if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
480 		amdgpu_device_halt(adev);
481 		WARN_ON(1);
482 	}
483 
484 	mutex_unlock(&smu->message_lock);
485 	return res;
486 }
487 
smu_cmn_send_smc_msg(struct smu_context * smu,enum smu_message_type msg,uint32_t * read_arg)488 int smu_cmn_send_smc_msg(struct smu_context *smu,
489 			 enum smu_message_type msg,
490 			 uint32_t *read_arg)
491 {
492 	return smu_cmn_send_smc_msg_with_param(smu,
493 					       msg,
494 					       0,
495 					       read_arg);
496 }
497 
smu_cmn_send_debug_smc_msg(struct smu_context * smu,uint32_t msg)498 int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
499 			 uint32_t msg)
500 {
501 	return __smu_cmn_send_debug_msg(smu, msg, 0);
502 }
503 
smu_cmn_send_debug_smc_msg_with_param(struct smu_context * smu,uint32_t msg,uint32_t param)504 int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
505 			 uint32_t msg, uint32_t param)
506 {
507 	return __smu_cmn_send_debug_msg(smu, msg, param);
508 }
509 
smu_cmn_to_asic_specific_index(struct smu_context * smu,enum smu_cmn2asic_mapping_type type,uint32_t index)510 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
511 				   enum smu_cmn2asic_mapping_type type,
512 				   uint32_t index)
513 {
514 	struct cmn2asic_msg_mapping msg_mapping;
515 	struct cmn2asic_mapping mapping;
516 
517 	switch (type) {
518 	case CMN2ASIC_MAPPING_MSG:
519 		if (index >= SMU_MSG_MAX_COUNT ||
520 		    !smu->message_map)
521 			return -EINVAL;
522 
523 		msg_mapping = smu->message_map[index];
524 		if (!msg_mapping.valid_mapping)
525 			return -EINVAL;
526 
527 		if (amdgpu_sriov_vf(smu->adev) &&
528 		    !(msg_mapping.flags & SMU_MSG_VF_FLAG))
529 			return -EACCES;
530 
531 		return msg_mapping.map_to;
532 
533 	case CMN2ASIC_MAPPING_CLK:
534 		if (index >= SMU_CLK_COUNT ||
535 		    !smu->clock_map)
536 			return -EINVAL;
537 
538 		mapping = smu->clock_map[index];
539 		if (!mapping.valid_mapping)
540 			return -EINVAL;
541 
542 		return mapping.map_to;
543 
544 	case CMN2ASIC_MAPPING_FEATURE:
545 		if (index >= SMU_FEATURE_COUNT ||
546 		    !smu->feature_map)
547 			return -EINVAL;
548 
549 		mapping = smu->feature_map[index];
550 		if (!mapping.valid_mapping)
551 			return -EINVAL;
552 
553 		return mapping.map_to;
554 
555 	case CMN2ASIC_MAPPING_TABLE:
556 		if (index >= SMU_TABLE_COUNT ||
557 		    !smu->table_map)
558 			return -EINVAL;
559 
560 		mapping = smu->table_map[index];
561 		if (!mapping.valid_mapping)
562 			return -EINVAL;
563 
564 		return mapping.map_to;
565 
566 	case CMN2ASIC_MAPPING_PWR:
567 		if (index >= SMU_POWER_SOURCE_COUNT ||
568 		    !smu->pwr_src_map)
569 			return -EINVAL;
570 
571 		mapping = smu->pwr_src_map[index];
572 		if (!mapping.valid_mapping)
573 			return -EINVAL;
574 
575 		return mapping.map_to;
576 
577 	case CMN2ASIC_MAPPING_WORKLOAD:
578 		if (index >= PP_SMC_POWER_PROFILE_COUNT ||
579 		    !smu->workload_map)
580 			return -EINVAL;
581 
582 		mapping = smu->workload_map[index];
583 		if (!mapping.valid_mapping)
584 			return -ENOTSUPP;
585 
586 		return mapping.map_to;
587 
588 	default:
589 		return -EINVAL;
590 	}
591 }
592 
smu_cmn_feature_is_supported(struct smu_context * smu,enum smu_feature_mask mask)593 int smu_cmn_feature_is_supported(struct smu_context *smu,
594 				 enum smu_feature_mask mask)
595 {
596 	struct smu_feature *feature = &smu->smu_feature;
597 	int feature_id;
598 
599 	feature_id = smu_cmn_to_asic_specific_index(smu,
600 						    CMN2ASIC_MAPPING_FEATURE,
601 						    mask);
602 	if (feature_id < 0)
603 		return 0;
604 
605 	WARN_ON(feature_id > feature->feature_num);
606 
607 	return test_bit(feature_id, feature->supported);
608 }
609 
__smu_get_enabled_features(struct smu_context * smu,uint64_t * enabled_features)610 static int __smu_get_enabled_features(struct smu_context *smu,
611 			       uint64_t *enabled_features)
612 {
613 	return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);
614 }
615 
smu_cmn_feature_is_enabled(struct smu_context * smu,enum smu_feature_mask mask)616 int smu_cmn_feature_is_enabled(struct smu_context *smu,
617 			       enum smu_feature_mask mask)
618 {
619 	struct amdgpu_device *adev = smu->adev;
620 	uint64_t enabled_features;
621 	int feature_id;
622 
623 	if (__smu_get_enabled_features(smu, &enabled_features)) {
624 		dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
625 		return 0;
626 	}
627 
628 	/*
629 	 * For Renoir and Cyan Skillfish, they are assumed to have all features
630 	 * enabled. Also considering they have no feature_map available, the
631 	 * check here can avoid unwanted feature_map check below.
632 	 */
633 	if (enabled_features == ULLONG_MAX)
634 		return 1;
635 
636 	feature_id = smu_cmn_to_asic_specific_index(smu,
637 						    CMN2ASIC_MAPPING_FEATURE,
638 						    mask);
639 	if (feature_id < 0)
640 		return 0;
641 
642 	return test_bit(feature_id, (unsigned long *)&enabled_features);
643 }
644 
smu_cmn_clk_dpm_is_enabled(struct smu_context * smu,enum smu_clk_type clk_type)645 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
646 				enum smu_clk_type clk_type)
647 {
648 	enum smu_feature_mask feature_id = 0;
649 
650 	switch (clk_type) {
651 	case SMU_MCLK:
652 	case SMU_UCLK:
653 		feature_id = SMU_FEATURE_DPM_UCLK_BIT;
654 		break;
655 	case SMU_GFXCLK:
656 	case SMU_SCLK:
657 		feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
658 		break;
659 	case SMU_SOCCLK:
660 		feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
661 		break;
662 	case SMU_VCLK:
663 	case SMU_VCLK1:
664 		feature_id = SMU_FEATURE_DPM_VCLK_BIT;
665 		break;
666 	case SMU_DCLK:
667 	case SMU_DCLK1:
668 		feature_id = SMU_FEATURE_DPM_DCLK_BIT;
669 		break;
670 	case SMU_FCLK:
671 		feature_id = SMU_FEATURE_DPM_FCLK_BIT;
672 		break;
673 	default:
674 		return true;
675 	}
676 
677 	if (!smu_cmn_feature_is_enabled(smu, feature_id))
678 		return false;
679 
680 	return true;
681 }
682 
smu_cmn_get_enabled_mask(struct smu_context * smu,uint64_t * feature_mask)683 int smu_cmn_get_enabled_mask(struct smu_context *smu,
684 			     uint64_t *feature_mask)
685 {
686 	uint32_t *feature_mask_high;
687 	uint32_t *feature_mask_low;
688 	int ret = 0, index = 0;
689 
690 	if (!feature_mask)
691 		return -EINVAL;
692 
693 	feature_mask_low = &((uint32_t *)feature_mask)[0];
694 	feature_mask_high = &((uint32_t *)feature_mask)[1];
695 
696 	index = smu_cmn_to_asic_specific_index(smu,
697 						CMN2ASIC_MAPPING_MSG,
698 						SMU_MSG_GetEnabledSmuFeatures);
699 	if (index > 0) {
700 		ret = smu_cmn_send_smc_msg_with_param(smu,
701 						      SMU_MSG_GetEnabledSmuFeatures,
702 						      0,
703 						      feature_mask_low);
704 		if (ret)
705 			return ret;
706 
707 		ret = smu_cmn_send_smc_msg_with_param(smu,
708 						      SMU_MSG_GetEnabledSmuFeatures,
709 						      1,
710 						      feature_mask_high);
711 	} else {
712 		ret = smu_cmn_send_smc_msg(smu,
713 					   SMU_MSG_GetEnabledSmuFeaturesHigh,
714 					   feature_mask_high);
715 		if (ret)
716 			return ret;
717 
718 		ret = smu_cmn_send_smc_msg(smu,
719 					   SMU_MSG_GetEnabledSmuFeaturesLow,
720 					   feature_mask_low);
721 	}
722 
723 	return ret;
724 }
725 
smu_cmn_get_indep_throttler_status(const unsigned long dep_status,const uint8_t * throttler_map)726 uint64_t smu_cmn_get_indep_throttler_status(
727 					const unsigned long dep_status,
728 					const uint8_t *throttler_map)
729 {
730 	uint64_t indep_status = 0;
731 	uint8_t dep_bit = 0;
732 
733 	for_each_set_bit(dep_bit, &dep_status, 32)
734 		indep_status |= 1ULL << throttler_map[dep_bit];
735 
736 	return indep_status;
737 }
738 
smu_cmn_feature_update_enable_state(struct smu_context * smu,uint64_t feature_mask,bool enabled)739 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
740 					uint64_t feature_mask,
741 					bool enabled)
742 {
743 	int ret = 0;
744 
745 	if (enabled) {
746 		ret = smu_cmn_send_smc_msg_with_param(smu,
747 						  SMU_MSG_EnableSmuFeaturesLow,
748 						  lower_32_bits(feature_mask),
749 						  NULL);
750 		if (ret)
751 			return ret;
752 		ret = smu_cmn_send_smc_msg_with_param(smu,
753 						  SMU_MSG_EnableSmuFeaturesHigh,
754 						  upper_32_bits(feature_mask),
755 						  NULL);
756 	} else {
757 		ret = smu_cmn_send_smc_msg_with_param(smu,
758 						  SMU_MSG_DisableSmuFeaturesLow,
759 						  lower_32_bits(feature_mask),
760 						  NULL);
761 		if (ret)
762 			return ret;
763 		ret = smu_cmn_send_smc_msg_with_param(smu,
764 						  SMU_MSG_DisableSmuFeaturesHigh,
765 						  upper_32_bits(feature_mask),
766 						  NULL);
767 	}
768 
769 	return ret;
770 }
771 
smu_cmn_feature_set_enabled(struct smu_context * smu,enum smu_feature_mask mask,bool enable)772 int smu_cmn_feature_set_enabled(struct smu_context *smu,
773 				enum smu_feature_mask mask,
774 				bool enable)
775 {
776 	int feature_id;
777 
778 	feature_id = smu_cmn_to_asic_specific_index(smu,
779 						    CMN2ASIC_MAPPING_FEATURE,
780 						    mask);
781 	if (feature_id < 0)
782 		return -EINVAL;
783 
784 	return smu_cmn_feature_update_enable_state(smu,
785 					       1ULL << feature_id,
786 					       enable);
787 }
788 
789 #undef __SMU_DUMMY_MAP
790 #define __SMU_DUMMY_MAP(fea)	#fea
791 static const char *__smu_feature_names[] = {
792 	SMU_FEATURE_MASKS
793 };
794 
smu_get_feature_name(struct smu_context * smu,enum smu_feature_mask feature)795 static const char *smu_get_feature_name(struct smu_context *smu,
796 					enum smu_feature_mask feature)
797 {
798 	if (feature >= SMU_FEATURE_COUNT)
799 		return "unknown smu feature";
800 	return __smu_feature_names[feature];
801 }
802 
smu_cmn_get_pp_feature_mask(struct smu_context * smu,char * buf)803 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
804 				   char *buf)
805 {
806 	int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
807 	uint64_t feature_mask;
808 	int i, feature_index;
809 	uint32_t count = 0;
810 	size_t size = 0;
811 
812 	if (__smu_get_enabled_features(smu, &feature_mask))
813 		return 0;
814 
815 	size =  sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
816 			upper_32_bits(feature_mask), lower_32_bits(feature_mask));
817 
818 	memset(sort_feature, -1, sizeof(sort_feature));
819 
820 	for (i = 0; i < SMU_FEATURE_COUNT; i++) {
821 		feature_index = smu_cmn_to_asic_specific_index(smu,
822 							       CMN2ASIC_MAPPING_FEATURE,
823 							       i);
824 		if (feature_index < 0)
825 			continue;
826 
827 		sort_feature[feature_index] = i;
828 	}
829 
830 	size += sysfs_emit_at(buf, size, "%-2s. %-20s  %-3s : %-s\n",
831 			"No", "Feature", "Bit", "State");
832 
833 	for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) {
834 		if (sort_feature[feature_index] < 0)
835 			continue;
836 
837 		size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
838 				count++,
839 				smu_get_feature_name(smu, sort_feature[feature_index]),
840 				feature_index,
841 				!!test_bit(feature_index, (unsigned long *)&feature_mask) ?
842 				"enabled" : "disabled");
843 	}
844 
845 	return size;
846 }
847 
smu_cmn_set_pp_feature_mask(struct smu_context * smu,uint64_t new_mask)848 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
849 				uint64_t new_mask)
850 {
851 	int ret = 0;
852 	uint64_t feature_mask;
853 	uint64_t feature_2_enabled = 0;
854 	uint64_t feature_2_disabled = 0;
855 
856 	ret = __smu_get_enabled_features(smu, &feature_mask);
857 	if (ret)
858 		return ret;
859 
860 	feature_2_enabled  = ~feature_mask & new_mask;
861 	feature_2_disabled = feature_mask & ~new_mask;
862 
863 	if (feature_2_enabled) {
864 		ret = smu_cmn_feature_update_enable_state(smu,
865 							  feature_2_enabled,
866 							  true);
867 		if (ret)
868 			return ret;
869 	}
870 	if (feature_2_disabled) {
871 		ret = smu_cmn_feature_update_enable_state(smu,
872 							  feature_2_disabled,
873 							  false);
874 		if (ret)
875 			return ret;
876 	}
877 
878 	return ret;
879 }
880 
881 /**
882  * smu_cmn_disable_all_features_with_exception - disable all dpm features
883  *                                               except this specified by
884  *                                               @mask
885  *
886  * @smu:               smu_context pointer
887  * @mask:              the dpm feature which should not be disabled
888  *                     SMU_FEATURE_COUNT: no exception, all dpm features
889  *                     to disable
890  *
891  * Returns:
892  * 0 on success or a negative error code on failure.
893  */
smu_cmn_disable_all_features_with_exception(struct smu_context * smu,enum smu_feature_mask mask)894 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
895 						enum smu_feature_mask mask)
896 {
897 	uint64_t features_to_disable = U64_MAX;
898 	int skipped_feature_id;
899 
900 	if (mask != SMU_FEATURE_COUNT) {
901 		skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
902 								    CMN2ASIC_MAPPING_FEATURE,
903 								    mask);
904 		if (skipped_feature_id < 0)
905 			return -EINVAL;
906 
907 		features_to_disable &= ~(1ULL << skipped_feature_id);
908 	}
909 
910 	return smu_cmn_feature_update_enable_state(smu,
911 						   features_to_disable,
912 						   0);
913 }
914 
smu_cmn_get_smc_version(struct smu_context * smu,uint32_t * if_version,uint32_t * smu_version)915 int smu_cmn_get_smc_version(struct smu_context *smu,
916 			    uint32_t *if_version,
917 			    uint32_t *smu_version)
918 {
919 	int ret = 0;
920 
921 	if (!if_version && !smu_version)
922 		return -EINVAL;
923 
924 	if (smu->smc_fw_if_version && smu->smc_fw_version)
925 	{
926 		if (if_version)
927 			*if_version = smu->smc_fw_if_version;
928 
929 		if (smu_version)
930 			*smu_version = smu->smc_fw_version;
931 
932 		return 0;
933 	}
934 
935 	if (if_version) {
936 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
937 		if (ret)
938 			return ret;
939 
940 		smu->smc_fw_if_version = *if_version;
941 	}
942 
943 	if (smu_version) {
944 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
945 		if (ret)
946 			return ret;
947 
948 		smu->smc_fw_version = *smu_version;
949 	}
950 
951 	return ret;
952 }
953 
smu_cmn_update_table(struct smu_context * smu,enum smu_table_id table_index,int argument,void * table_data,bool drv2smu)954 int smu_cmn_update_table(struct smu_context *smu,
955 			 enum smu_table_id table_index,
956 			 int argument,
957 			 void *table_data,
958 			 bool drv2smu)
959 {
960 	struct smu_table_context *smu_table = &smu->smu_table;
961 	struct amdgpu_device *adev = smu->adev;
962 	struct smu_table *table = &smu_table->driver_table;
963 	int table_id = smu_cmn_to_asic_specific_index(smu,
964 						      CMN2ASIC_MAPPING_TABLE,
965 						      table_index);
966 	uint32_t table_size;
967 	int ret = 0;
968 	if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
969 		return -EINVAL;
970 
971 	table_size = smu_table->tables[table_index].size;
972 
973 	if (drv2smu) {
974 		memcpy(table->cpu_addr, table_data, table_size);
975 		/*
976 		 * Flush hdp cache: to guard the content seen by
977 		 * GPU is consitent with CPU.
978 		 */
979 		amdgpu_asic_flush_hdp(adev, NULL);
980 	}
981 
982 	ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
983 					  SMU_MSG_TransferTableDram2Smu :
984 					  SMU_MSG_TransferTableSmu2Dram,
985 					  table_id | ((argument & 0xFFFF) << 16),
986 					  NULL);
987 	if (ret)
988 		return ret;
989 
990 	if (!drv2smu) {
991 		amdgpu_asic_invalidate_hdp(adev, NULL);
992 		memcpy(table_data, table->cpu_addr, table_size);
993 	}
994 
995 	return 0;
996 }
997 
smu_cmn_write_watermarks_table(struct smu_context * smu)998 int smu_cmn_write_watermarks_table(struct smu_context *smu)
999 {
1000 	void *watermarks_table = smu->smu_table.watermarks_table;
1001 
1002 	if (!watermarks_table)
1003 		return -EINVAL;
1004 
1005 	return smu_cmn_update_table(smu,
1006 				    SMU_TABLE_WATERMARKS,
1007 				    0,
1008 				    watermarks_table,
1009 				    true);
1010 }
1011 
smu_cmn_write_pptable(struct smu_context * smu)1012 int smu_cmn_write_pptable(struct smu_context *smu)
1013 {
1014 	void *pptable = smu->smu_table.driver_pptable;
1015 
1016 	return smu_cmn_update_table(smu,
1017 				    SMU_TABLE_PPTABLE,
1018 				    0,
1019 				    pptable,
1020 				    true);
1021 }
1022 
smu_cmn_get_metrics_table(struct smu_context * smu,void * metrics_table,bool bypass_cache)1023 int smu_cmn_get_metrics_table(struct smu_context *smu,
1024 			      void *metrics_table,
1025 			      bool bypass_cache)
1026 {
1027 	struct smu_table_context *smu_table = &smu->smu_table;
1028 	uint32_t table_size =
1029 		smu_table->tables[SMU_TABLE_SMU_METRICS].size;
1030 	int ret = 0;
1031 
1032 	if (bypass_cache ||
1033 	    !smu_table->metrics_time ||
1034 	    time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
1035 		ret = smu_cmn_update_table(smu,
1036 				       SMU_TABLE_SMU_METRICS,
1037 				       0,
1038 				       smu_table->metrics_table,
1039 				       false);
1040 		if (ret) {
1041 			dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
1042 			return ret;
1043 		}
1044 		smu_table->metrics_time = jiffies;
1045 	}
1046 
1047 	if (metrics_table)
1048 		memcpy(metrics_table, smu_table->metrics_table, table_size);
1049 
1050 	return 0;
1051 }
1052 
smu_cmn_get_combo_pptable(struct smu_context * smu)1053 int smu_cmn_get_combo_pptable(struct smu_context *smu)
1054 {
1055 	void *pptable = smu->smu_table.combo_pptable;
1056 
1057 	return smu_cmn_update_table(smu,
1058 				    SMU_TABLE_COMBO_PPTABLE,
1059 				    0,
1060 				    pptable,
1061 				    false);
1062 }
1063 
smu_cmn_set_mp1_state(struct smu_context * smu,enum pp_mp1_state mp1_state)1064 int smu_cmn_set_mp1_state(struct smu_context *smu,
1065 			  enum pp_mp1_state mp1_state)
1066 {
1067 	enum smu_message_type msg;
1068 	int ret;
1069 
1070 	switch (mp1_state) {
1071 	case PP_MP1_STATE_SHUTDOWN:
1072 		msg = SMU_MSG_PrepareMp1ForShutdown;
1073 		break;
1074 	case PP_MP1_STATE_UNLOAD:
1075 		msg = SMU_MSG_PrepareMp1ForUnload;
1076 		break;
1077 	case PP_MP1_STATE_RESET:
1078 		msg = SMU_MSG_PrepareMp1ForReset;
1079 		break;
1080 	case PP_MP1_STATE_NONE:
1081 	default:
1082 		return 0;
1083 	}
1084 
1085 	ret = smu_cmn_send_smc_msg(smu, msg, NULL);
1086 	if (ret)
1087 		dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1088 
1089 	return ret;
1090 }
1091 
smu_cmn_is_audio_func_enabled(struct amdgpu_device * adev)1092 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
1093 {
1094 	struct pci_dev *p = NULL;
1095 	bool snd_driver_loaded;
1096 
1097 	/*
1098 	 * If the ASIC comes with no audio function, we always assume
1099 	 * it is "enabled".
1100 	 */
1101 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
1102 			adev->pdev->bus->number, 1);
1103 	if (!p)
1104 		return true;
1105 
1106 	snd_driver_loaded = pci_is_enabled(p) ? true : false;
1107 
1108 	pci_dev_put(p);
1109 
1110 	return snd_driver_loaded;
1111 }
1112 
smu_soc_policy_get_desc(struct smu_dpm_policy * policy,int level)1113 static char *smu_soc_policy_get_desc(struct smu_dpm_policy *policy, int level)
1114 {
1115 	if (level < 0 || !(policy->level_mask & BIT(level)))
1116 		return "Invalid";
1117 
1118 	switch (level) {
1119 	case SOC_PSTATE_DEFAULT:
1120 		return "soc_pstate_default";
1121 	case SOC_PSTATE_0:
1122 		return "soc_pstate_0";
1123 	case SOC_PSTATE_1:
1124 		return "soc_pstate_1";
1125 	case SOC_PSTATE_2:
1126 		return "soc_pstate_2";
1127 	}
1128 
1129 	return "Invalid";
1130 }
1131 
1132 static struct smu_dpm_policy_desc pstate_policy_desc = {
1133 	.name = STR_SOC_PSTATE_POLICY,
1134 	.get_desc = smu_soc_policy_get_desc,
1135 };
1136 
smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy * policy)1137 void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy)
1138 {
1139 	policy->desc = &pstate_policy_desc;
1140 }
1141 
smu_xgmi_plpd_policy_get_desc(struct smu_dpm_policy * policy,int level)1142 static char *smu_xgmi_plpd_policy_get_desc(struct smu_dpm_policy *policy,
1143 					   int level)
1144 {
1145 	if (level < 0 || !(policy->level_mask & BIT(level)))
1146 		return "Invalid";
1147 
1148 	switch (level) {
1149 	case XGMI_PLPD_DISALLOW:
1150 		return "plpd_disallow";
1151 	case XGMI_PLPD_DEFAULT:
1152 		return "plpd_default";
1153 	case XGMI_PLPD_OPTIMIZED:
1154 		return "plpd_optimized";
1155 	}
1156 
1157 	return "Invalid";
1158 }
1159 
1160 static struct smu_dpm_policy_desc xgmi_plpd_policy_desc = {
1161 	.name = STR_XGMI_PLPD_POLICY,
1162 	.get_desc = smu_xgmi_plpd_policy_get_desc,
1163 };
1164 
smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy * policy)1165 void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)
1166 {
1167 	policy->desc = &xgmi_plpd_policy_desc;
1168 }
1169 
smu_cmn_get_backend_workload_mask(struct smu_context * smu,u32 workload_mask,u32 * backend_workload_mask)1170 void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
1171 				       u32 workload_mask,
1172 				       u32 *backend_workload_mask)
1173 {
1174 	int workload_type;
1175 	u32 profile_mode;
1176 
1177 	*backend_workload_mask = 0;
1178 
1179 	for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {
1180 		if (!(workload_mask & (1 << profile_mode)))
1181 			continue;
1182 
1183 		/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1184 		workload_type = smu_cmn_to_asic_specific_index(smu,
1185 							       CMN2ASIC_MAPPING_WORKLOAD,
1186 							       profile_mode);
1187 
1188 		if (workload_type < 0)
1189 			continue;
1190 
1191 		*backend_workload_mask |= 1 << workload_type;
1192 	}
1193 }
1194