xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42 
43 #include "amdgpu_ras.h"
44 #include "amdgpu_securedisplay.h"
45 #include "amdgpu_atomfirmware.h"
46 
47 #define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*16)
48 
49 static int psp_load_smu_fw(struct psp_context *psp);
50 static int psp_rap_terminate(struct psp_context *psp);
51 static int psp_securedisplay_terminate(struct psp_context *psp);
52 
53 static int psp_ring_init(struct psp_context *psp,
54 			 enum psp_ring_type ring_type)
55 {
56 	int ret = 0;
57 	struct psp_ring *ring;
58 	struct amdgpu_device *adev = psp->adev;
59 
60 	ring = &psp->km_ring;
61 
62 	ring->ring_type = ring_type;
63 
64 	/* allocate 4k Page of Local Frame Buffer memory for ring */
65 	ring->ring_size = 0x1000;
66 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 				      AMDGPU_GEM_DOMAIN_VRAM |
68 				      AMDGPU_GEM_DOMAIN_GTT,
69 				      &adev->firmware.rbuf,
70 				      &ring->ring_mem_mc_addr,
71 				      (void **)&ring->ring_mem);
72 	if (ret) {
73 		ring->ring_size = 0;
74 		return ret;
75 	}
76 
77 	return 0;
78 }
79 
80 /*
81  * Due to DF Cstate management centralized to PMFW, the firmware
82  * loading sequence will be updated as below:
83  *   - Load KDB
84  *   - Load SYS_DRV
85  *   - Load tOS
86  *   - Load PMFW
87  *   - Setup TMR
88  *   - Load other non-psp fw
89  *   - Load ASD
90  *   - Load XGMI/RAS/HDCP/DTM TA if any
91  *
92  * This new sequence is required for
93  *   - Arcturus and onwards
94  */
95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96 {
97 	struct amdgpu_device *adev = psp->adev;
98 
99 	if (amdgpu_sriov_vf(adev)) {
100 		psp->pmfw_centralized_cstate_management = false;
101 		return;
102 	}
103 
104 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 	case IP_VERSION(11, 0, 0):
106 	case IP_VERSION(11, 0, 4):
107 	case IP_VERSION(11, 0, 5):
108 	case IP_VERSION(11, 0, 7):
109 	case IP_VERSION(11, 0, 9):
110 	case IP_VERSION(11, 0, 11):
111 	case IP_VERSION(11, 0, 12):
112 	case IP_VERSION(11, 0, 13):
113 	case IP_VERSION(13, 0, 0):
114 	case IP_VERSION(13, 0, 2):
115 	case IP_VERSION(13, 0, 7):
116 		psp->pmfw_centralized_cstate_management = true;
117 		break;
118 	default:
119 		psp->pmfw_centralized_cstate_management = false;
120 		break;
121 	}
122 }
123 
124 static int psp_init_sriov_microcode(struct psp_context *psp)
125 {
126 	struct amdgpu_device *adev = psp->adev;
127 	char ucode_prefix[30];
128 	int ret = 0;
129 
130 	amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131 
132 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 	case IP_VERSION(9, 0, 0):
134 	case IP_VERSION(11, 0, 7):
135 	case IP_VERSION(11, 0, 9):
136 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 		ret = psp_init_cap_microcode(psp, ucode_prefix);
138 		break;
139 	case IP_VERSION(13, 0, 2):
140 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 		ret = psp_init_cap_microcode(psp, ucode_prefix);
142 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 		break;
144 	case IP_VERSION(13, 0, 0):
145 		adev->virt.autoload_ucode_id = 0;
146 		break;
147 	case IP_VERSION(13, 0, 6):
148 	case IP_VERSION(13, 0, 14):
149 		ret = psp_init_cap_microcode(psp, ucode_prefix);
150 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
151 		break;
152 	case IP_VERSION(13, 0, 10):
153 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
154 		ret = psp_init_cap_microcode(psp, ucode_prefix);
155 		break;
156 	case IP_VERSION(13, 0, 12):
157 		ret = psp_init_ta_microcode(psp, ucode_prefix);
158 		break;
159 	default:
160 		return -EINVAL;
161 	}
162 	return ret;
163 }
164 
165 static int psp_early_init(struct amdgpu_ip_block *ip_block)
166 {
167 	struct amdgpu_device *adev = ip_block->adev;
168 	struct psp_context *psp = &adev->psp;
169 
170 	psp->autoload_supported = true;
171 	psp->boot_time_tmr = true;
172 
173 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
174 	case IP_VERSION(9, 0, 0):
175 		psp_v3_1_set_psp_funcs(psp);
176 		psp->autoload_supported = false;
177 		psp->boot_time_tmr = false;
178 		break;
179 	case IP_VERSION(10, 0, 0):
180 	case IP_VERSION(10, 0, 1):
181 		psp_v10_0_set_psp_funcs(psp);
182 		psp->autoload_supported = false;
183 		psp->boot_time_tmr = false;
184 		break;
185 	case IP_VERSION(11, 0, 2):
186 	case IP_VERSION(11, 0, 4):
187 		psp_v11_0_set_psp_funcs(psp);
188 		psp->autoload_supported = false;
189 		psp->boot_time_tmr = false;
190 		break;
191 	case IP_VERSION(11, 0, 0):
192 	case IP_VERSION(11, 0, 7):
193 		adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
194 		fallthrough;
195 	case IP_VERSION(11, 0, 5):
196 	case IP_VERSION(11, 0, 9):
197 	case IP_VERSION(11, 0, 11):
198 	case IP_VERSION(11, 5, 0):
199 	case IP_VERSION(11, 5, 2):
200 	case IP_VERSION(11, 0, 12):
201 	case IP_VERSION(11, 0, 13):
202 		psp_v11_0_set_psp_funcs(psp);
203 		psp->boot_time_tmr = false;
204 		break;
205 	case IP_VERSION(11, 0, 3):
206 	case IP_VERSION(12, 0, 1):
207 		psp_v12_0_set_psp_funcs(psp);
208 		psp->autoload_supported = false;
209 		psp->boot_time_tmr = false;
210 		break;
211 	case IP_VERSION(13, 0, 2):
212 		psp->boot_time_tmr = false;
213 		fallthrough;
214 	case IP_VERSION(13, 0, 6):
215 	case IP_VERSION(13, 0, 14):
216 		psp_v13_0_set_psp_funcs(psp);
217 		psp->autoload_supported = false;
218 		break;
219 	case IP_VERSION(13, 0, 12):
220 		psp_v13_0_set_psp_funcs(psp);
221 		psp->autoload_supported = false;
222 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
223 		break;
224 	case IP_VERSION(13, 0, 1):
225 	case IP_VERSION(13, 0, 3):
226 	case IP_VERSION(13, 0, 5):
227 	case IP_VERSION(13, 0, 8):
228 	case IP_VERSION(13, 0, 11):
229 	case IP_VERSION(14, 0, 0):
230 	case IP_VERSION(14, 0, 1):
231 	case IP_VERSION(14, 0, 4):
232 		psp_v13_0_set_psp_funcs(psp);
233 		psp->boot_time_tmr = false;
234 		break;
235 	case IP_VERSION(11, 0, 8):
236 		if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
237 			psp_v11_0_8_set_psp_funcs(psp);
238 		}
239 		psp->autoload_supported = false;
240 		psp->boot_time_tmr = false;
241 		break;
242 	case IP_VERSION(13, 0, 0):
243 	case IP_VERSION(13, 0, 7):
244 	case IP_VERSION(13, 0, 10):
245 		psp_v13_0_set_psp_funcs(psp);
246 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
247 		psp->boot_time_tmr = false;
248 		break;
249 	case IP_VERSION(13, 0, 4):
250 		psp_v13_0_4_set_psp_funcs(psp);
251 		psp->boot_time_tmr = false;
252 		break;
253 	case IP_VERSION(14, 0, 2):
254 	case IP_VERSION(14, 0, 3):
255 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
256 		psp_v14_0_set_psp_funcs(psp);
257 		break;
258 	case IP_VERSION(14, 0, 5):
259 		psp_v14_0_set_psp_funcs(psp);
260 		psp->boot_time_tmr = false;
261 		break;
262 	default:
263 		return -EINVAL;
264 	}
265 
266 	psp->adev = adev;
267 
268 	adev->psp_timeout = 20000;
269 
270 	psp_check_pmfw_centralized_cstate_management(psp);
271 
272 	if (amdgpu_sriov_vf(adev))
273 		return psp_init_sriov_microcode(psp);
274 	else
275 		return psp_init_microcode(psp);
276 }
277 
278 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
279 {
280 	amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
281 			      &mem_ctx->shared_buf);
282 	mem_ctx->shared_bo = NULL;
283 }
284 
285 static void psp_free_shared_bufs(struct psp_context *psp)
286 {
287 	void *tmr_buf;
288 	void **pptr;
289 
290 	/* free TMR memory buffer */
291 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
292 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
293 	psp->tmr_bo = NULL;
294 
295 	/* free xgmi shared memory */
296 	psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
297 
298 	/* free ras shared memory */
299 	psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
300 
301 	/* free hdcp shared memory */
302 	psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
303 
304 	/* free dtm shared memory */
305 	psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
306 
307 	/* free rap shared memory */
308 	psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
309 
310 	/* free securedisplay shared memory */
311 	psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
312 
313 
314 }
315 
316 static void psp_memory_training_fini(struct psp_context *psp)
317 {
318 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
319 
320 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
321 	kfree(ctx->sys_cache);
322 	ctx->sys_cache = NULL;
323 }
324 
325 static int psp_memory_training_init(struct psp_context *psp)
326 {
327 	int ret;
328 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
329 
330 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
331 		dev_dbg(psp->adev->dev, "memory training is not supported!\n");
332 		return 0;
333 	}
334 
335 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
336 	if (ctx->sys_cache == NULL) {
337 		dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
338 		ret = -ENOMEM;
339 		goto Err_out;
340 	}
341 
342 	dev_dbg(psp->adev->dev,
343 		"train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
344 		ctx->train_data_size,
345 		ctx->p2c_train_data_offset,
346 		ctx->c2p_train_data_offset);
347 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
348 	return 0;
349 
350 Err_out:
351 	psp_memory_training_fini(psp);
352 	return ret;
353 }
354 
355 /*
356  * Helper funciton to query psp runtime database entry
357  *
358  * @adev: amdgpu_device pointer
359  * @entry_type: the type of psp runtime database entry
360  * @db_entry: runtime database entry pointer
361  *
362  * Return false if runtime database doesn't exit or entry is invalid
363  * or true if the specific database entry is found, and copy to @db_entry
364  */
365 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
366 				     enum psp_runtime_entry_type entry_type,
367 				     void *db_entry)
368 {
369 	uint64_t db_header_pos, db_dir_pos;
370 	struct psp_runtime_data_header db_header = {0};
371 	struct psp_runtime_data_directory db_dir = {0};
372 	bool ret = false;
373 	int i;
374 
375 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
376 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
377 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
378 		return false;
379 
380 	db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
381 	db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
382 
383 	/* read runtime db header from vram */
384 	amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
385 			sizeof(struct psp_runtime_data_header), false);
386 
387 	if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
388 		/* runtime db doesn't exist, exit */
389 		dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
390 		return false;
391 	}
392 
393 	/* read runtime database entry from vram */
394 	amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
395 			sizeof(struct psp_runtime_data_directory), false);
396 
397 	if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
398 		/* invalid db entry count, exit */
399 		dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
400 		return false;
401 	}
402 
403 	/* look up for requested entry type */
404 	for (i = 0; i < db_dir.entry_count && !ret; i++) {
405 		if (db_dir.entry_list[i].entry_type == entry_type) {
406 			switch (entry_type) {
407 			case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
408 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
409 					/* invalid db entry size */
410 					dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
411 					return false;
412 				}
413 				/* read runtime database entry */
414 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
415 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
416 				ret = true;
417 				break;
418 			case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
419 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
420 					/* invalid db entry size */
421 					dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
422 					return false;
423 				}
424 				/* read runtime database entry */
425 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
426 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
427 				ret = true;
428 				break;
429 			default:
430 				ret = false;
431 				break;
432 			}
433 		}
434 	}
435 
436 	return ret;
437 }
438 
439 static int psp_sw_init(struct amdgpu_ip_block *ip_block)
440 {
441 	struct amdgpu_device *adev = ip_block->adev;
442 	struct psp_context *psp = &adev->psp;
443 	int ret;
444 	struct psp_runtime_boot_cfg_entry boot_cfg_entry;
445 	struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
446 	struct psp_runtime_scpm_entry scpm_entry;
447 
448 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
449 	if (!psp->cmd) {
450 		dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
451 		return -ENOMEM;
452 	}
453 
454 	adev->psp.xgmi_context.supports_extended_data =
455 		!adev->gmc.xgmi.connected_to_cpu &&
456 		amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
457 
458 	memset(&scpm_entry, 0, sizeof(scpm_entry));
459 	if ((psp_get_runtime_db_entry(adev,
460 				PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
461 				&scpm_entry)) &&
462 	    (scpm_entry.scpm_status != SCPM_DISABLE)) {
463 		adev->scpm_enabled = true;
464 		adev->scpm_status = scpm_entry.scpm_status;
465 	} else {
466 		adev->scpm_enabled = false;
467 		adev->scpm_status = SCPM_DISABLE;
468 	}
469 
470 	/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
471 
472 	memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
473 	if (psp_get_runtime_db_entry(adev,
474 				PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
475 				&boot_cfg_entry)) {
476 		psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
477 		if ((psp->boot_cfg_bitmask) &
478 		    BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
479 			/* If psp runtime database exists, then
480 			 * only enable two stage memory training
481 			 * when TWO_STAGE_DRAM_TRAINING bit is set
482 			 * in runtime database
483 			 */
484 			mem_training_ctx->enable_mem_training = true;
485 		}
486 
487 	} else {
488 		/* If psp runtime database doesn't exist or is
489 		 * invalid, force enable two stage memory training
490 		 */
491 		mem_training_ctx->enable_mem_training = true;
492 	}
493 
494 	if (mem_training_ctx->enable_mem_training) {
495 		ret = psp_memory_training_init(psp);
496 		if (ret) {
497 			dev_err(adev->dev, "Failed to initialize memory training!\n");
498 			return ret;
499 		}
500 
501 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
502 		if (ret) {
503 			dev_err(adev->dev, "Failed to process memory training!\n");
504 			return ret;
505 		}
506 	}
507 
508 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
509 				      (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
510 				      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
511 				      &psp->fw_pri_bo,
512 				      &psp->fw_pri_mc_addr,
513 				      &psp->fw_pri_buf);
514 	if (ret)
515 		return ret;
516 
517 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
518 				      AMDGPU_GEM_DOMAIN_VRAM |
519 				      AMDGPU_GEM_DOMAIN_GTT,
520 				      &psp->fence_buf_bo,
521 				      &psp->fence_buf_mc_addr,
522 				      &psp->fence_buf);
523 	if (ret)
524 		goto failed1;
525 
526 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
527 				      AMDGPU_GEM_DOMAIN_VRAM |
528 				      AMDGPU_GEM_DOMAIN_GTT,
529 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
530 				      (void **)&psp->cmd_buf_mem);
531 	if (ret)
532 		goto failed2;
533 
534 	return 0;
535 
536 failed2:
537 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
538 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
539 failed1:
540 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
541 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
542 	return ret;
543 }
544 
545 static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
546 {
547 	struct amdgpu_device *adev = ip_block->adev;
548 	struct psp_context *psp = &adev->psp;
549 
550 	psp_memory_training_fini(psp);
551 
552 	amdgpu_ucode_release(&psp->sos_fw);
553 	amdgpu_ucode_release(&psp->asd_fw);
554 	amdgpu_ucode_release(&psp->ta_fw);
555 	amdgpu_ucode_release(&psp->cap_fw);
556 	amdgpu_ucode_release(&psp->toc_fw);
557 
558 	kfree(psp->cmd);
559 	psp->cmd = NULL;
560 
561 	psp_free_shared_bufs(psp);
562 
563 	if (psp->km_ring.ring_mem)
564 		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
565 				      &psp->km_ring.ring_mem_mc_addr,
566 				      (void **)&psp->km_ring.ring_mem);
567 
568 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
569 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
570 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
571 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
572 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
573 			      (void **)&psp->cmd_buf_mem);
574 
575 	return 0;
576 }
577 
578 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val,
579 		 uint32_t mask, uint32_t flags)
580 {
581 	bool check_changed = flags & PSP_WAITREG_CHANGED;
582 	bool verbose = !(flags & PSP_WAITREG_NOVERBOSE);
583 	uint32_t val;
584 	int i;
585 	struct amdgpu_device *adev = psp->adev;
586 
587 	if (psp->adev->no_hw_access)
588 		return 0;
589 
590 	for (i = 0; i < adev->usec_timeout; i++) {
591 		val = RREG32(reg_index);
592 		if (check_changed) {
593 			if (val != reg_val)
594 				return 0;
595 		} else {
596 			if ((val & mask) == reg_val)
597 				return 0;
598 		}
599 		udelay(1);
600 	}
601 
602 	if (verbose)
603 		dev_err(adev->dev,
604 			"psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x",
605 			reg_index, mask, val, reg_val);
606 
607 	return -ETIME;
608 }
609 
610 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
611 			       uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
612 {
613 	uint32_t val;
614 	int i;
615 	struct amdgpu_device *adev = psp->adev;
616 
617 	if (psp->adev->no_hw_access)
618 		return 0;
619 
620 	for (i = 0; i < msec_timeout; i++) {
621 		val = RREG32(reg_index);
622 		if ((val & mask) == reg_val)
623 			return 0;
624 		msleep(1);
625 	}
626 
627 	return -ETIME;
628 }
629 
630 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
631 {
632 	switch (cmd_id) {
633 	case GFX_CMD_ID_LOAD_TA:
634 		return "LOAD_TA";
635 	case GFX_CMD_ID_UNLOAD_TA:
636 		return "UNLOAD_TA";
637 	case GFX_CMD_ID_INVOKE_CMD:
638 		return "INVOKE_CMD";
639 	case GFX_CMD_ID_LOAD_ASD:
640 		return "LOAD_ASD";
641 	case GFX_CMD_ID_SETUP_TMR:
642 		return "SETUP_TMR";
643 	case GFX_CMD_ID_LOAD_IP_FW:
644 		return "LOAD_IP_FW";
645 	case GFX_CMD_ID_DESTROY_TMR:
646 		return "DESTROY_TMR";
647 	case GFX_CMD_ID_SAVE_RESTORE:
648 		return "SAVE_RESTORE_IP_FW";
649 	case GFX_CMD_ID_SETUP_VMR:
650 		return "SETUP_VMR";
651 	case GFX_CMD_ID_DESTROY_VMR:
652 		return "DESTROY_VMR";
653 	case GFX_CMD_ID_PROG_REG:
654 		return "PROG_REG";
655 	case GFX_CMD_ID_GET_FW_ATTESTATION:
656 		return "GET_FW_ATTESTATION";
657 	case GFX_CMD_ID_LOAD_TOC:
658 		return "ID_LOAD_TOC";
659 	case GFX_CMD_ID_AUTOLOAD_RLC:
660 		return "AUTOLOAD_RLC";
661 	case GFX_CMD_ID_BOOT_CFG:
662 		return "BOOT_CFG";
663 	case GFX_CMD_ID_CONFIG_SQ_PERFMON:
664 		return "CONFIG_SQ_PERFMON";
665 	case GFX_CMD_ID_FB_FW_RESERV_ADDR:
666 		return "FB_FW_RESERV_ADDR";
667 	case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
668 		return "FB_FW_RESERV_EXT_ADDR";
669 	case GFX_CMD_ID_SRIOV_SPATIAL_PART:
670 		return "SPATIAL_PARTITION";
671 	case GFX_CMD_ID_FB_NPS_MODE:
672 		return "NPS_MODE_CHANGE";
673 	default:
674 		return "UNKNOWN CMD";
675 	}
676 }
677 
678 static bool psp_err_warn(struct psp_context *psp)
679 {
680 	struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
681 
682 	/* This response indicates reg list is already loaded */
683 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
684 	    cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
685 	    cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
686 	    cmd->resp.status == TEE_ERROR_CANCEL)
687 		return false;
688 
689 	return true;
690 }
691 
692 static int
693 psp_cmd_submit_buf(struct psp_context *psp,
694 		   struct amdgpu_firmware_info *ucode,
695 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
696 {
697 	int ret;
698 	int index;
699 	int timeout = psp->adev->psp_timeout;
700 	bool ras_intr = false;
701 	bool skip_unsupport = false;
702 
703 	if (psp->adev->no_hw_access)
704 		return 0;
705 
706 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
707 
708 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
709 
710 	index = atomic_inc_return(&psp->fence_value);
711 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
712 	if (ret) {
713 		atomic_dec(&psp->fence_value);
714 		goto exit;
715 	}
716 
717 	amdgpu_device_invalidate_hdp(psp->adev, NULL);
718 	while (*((unsigned int *)psp->fence_buf) != index) {
719 		if (--timeout == 0)
720 			break;
721 		/*
722 		 * Shouldn't wait for timeout when err_event_athub occurs,
723 		 * because gpu reset thread triggered and lock resource should
724 		 * be released for psp resume sequence.
725 		 */
726 		ras_intr = amdgpu_ras_intr_triggered();
727 		if (ras_intr)
728 			break;
729 		usleep_range(10, 100);
730 		amdgpu_device_invalidate_hdp(psp->adev, NULL);
731 	}
732 
733 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
734 	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
735 		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
736 
737 	memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
738 
739 	/* In some cases, psp response status is not 0 even there is no
740 	 * problem while the command is submitted. Some version of PSP FW
741 	 * doesn't write 0 to that field.
742 	 * So here we would like to only print a warning instead of an error
743 	 * during psp initialization to avoid breaking hw_init and it doesn't
744 	 * return -EINVAL.
745 	 */
746 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
747 		if (ucode)
748 			dev_warn(psp->adev->dev,
749 				 "failed to load ucode %s(0x%X) ",
750 				 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
751 		if (psp_err_warn(psp))
752 			dev_warn(
753 				psp->adev->dev,
754 				"psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
755 				psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
756 				psp->cmd_buf_mem->cmd_id,
757 				psp->cmd_buf_mem->resp.status);
758 		/* If any firmware (including CAP) load fails under SRIOV, it should
759 		 * return failure to stop the VF from initializing.
760 		 * Also return failure in case of timeout
761 		 */
762 		if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
763 			ret = -EINVAL;
764 			goto exit;
765 		}
766 	}
767 
768 	if (ucode) {
769 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
770 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
771 	}
772 
773 exit:
774 	return ret;
775 }
776 
777 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
778 {
779 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
780 
781 	mutex_lock(&psp->mutex);
782 
783 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
784 
785 	return cmd;
786 }
787 
788 static void release_psp_cmd_buf(struct psp_context *psp)
789 {
790 	mutex_unlock(&psp->mutex);
791 }
792 
793 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
794 				 struct psp_gfx_cmd_resp *cmd,
795 				 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
796 {
797 	struct amdgpu_device *adev = psp->adev;
798 	uint32_t size = 0;
799 	uint64_t tmr_pa = 0;
800 
801 	if (tmr_bo) {
802 		size = amdgpu_bo_size(tmr_bo);
803 		tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
804 	}
805 
806 	if (amdgpu_sriov_vf(psp->adev))
807 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
808 	else
809 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
810 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
811 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
812 	cmd->cmd.cmd_setup_tmr.buf_size = size;
813 	cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
814 	cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
815 	cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
816 }
817 
818 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
819 				      uint64_t pri_buf_mc, uint32_t size)
820 {
821 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
822 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
823 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
824 	cmd->cmd.cmd_load_toc.toc_size = size;
825 }
826 
827 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
828 static int psp_load_toc(struct psp_context *psp,
829 			uint32_t *tmr_size)
830 {
831 	int ret;
832 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
833 
834 	/* Copy toc to psp firmware private buffer */
835 	psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
836 
837 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
838 
839 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
840 				 psp->fence_buf_mc_addr);
841 	if (!ret)
842 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
843 
844 	release_psp_cmd_buf(psp);
845 
846 	return ret;
847 }
848 
849 /* Set up Trusted Memory Region */
850 static int psp_tmr_init(struct psp_context *psp)
851 {
852 	int ret = 0;
853 	int tmr_size;
854 	void *tmr_buf;
855 	void **pptr;
856 
857 	/*
858 	 * According to HW engineer, they prefer the TMR address be "naturally
859 	 * aligned" , e.g. the start address be an integer divide of TMR size.
860 	 *
861 	 * Note: this memory need be reserved till the driver
862 	 * uninitializes.
863 	 */
864 	tmr_size = PSP_TMR_SIZE(psp->adev);
865 
866 	/* For ASICs support RLC autoload, psp will parse the toc
867 	 * and calculate the total size of TMR needed
868 	 */
869 	if (!amdgpu_sriov_vf(psp->adev) &&
870 	    psp->toc.start_addr &&
871 	    psp->toc.size_bytes &&
872 	    psp->fw_pri_buf) {
873 		ret = psp_load_toc(psp, &tmr_size);
874 		if (ret) {
875 			dev_err(psp->adev->dev, "Failed to load toc\n");
876 			return ret;
877 		}
878 	}
879 
880 	if (!psp->tmr_bo && !psp->boot_time_tmr) {
881 		pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
882 		ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
883 					      PSP_TMR_ALIGNMENT,
884 					      AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM,
885 					      &psp->tmr_bo, &psp->tmr_mc_addr,
886 					      pptr);
887 	}
888 	if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
889 		psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
890 
891 	return ret;
892 }
893 
894 static bool psp_skip_tmr(struct psp_context *psp)
895 {
896 	switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
897 	case IP_VERSION(11, 0, 9):
898 	case IP_VERSION(11, 0, 7):
899 	case IP_VERSION(13, 0, 2):
900 	case IP_VERSION(13, 0, 6):
901 	case IP_VERSION(13, 0, 10):
902 	case IP_VERSION(13, 0, 12):
903 	case IP_VERSION(13, 0, 14):
904 		return true;
905 	default:
906 		return false;
907 	}
908 }
909 
910 static int psp_tmr_load(struct psp_context *psp)
911 {
912 	int ret;
913 	struct psp_gfx_cmd_resp *cmd;
914 
915 	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
916 	 * Already set up by host driver.
917 	 */
918 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
919 		return 0;
920 
921 	cmd = acquire_psp_cmd_buf(psp);
922 
923 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
924 	if (psp->tmr_bo)
925 		dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
926 			 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
927 
928 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
929 				 psp->fence_buf_mc_addr);
930 
931 	release_psp_cmd_buf(psp);
932 
933 	return ret;
934 }
935 
936 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
937 					struct psp_gfx_cmd_resp *cmd)
938 {
939 	if (amdgpu_sriov_vf(psp->adev))
940 		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
941 	else
942 		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
943 }
944 
945 static int psp_tmr_unload(struct psp_context *psp)
946 {
947 	int ret;
948 	struct psp_gfx_cmd_resp *cmd;
949 
950 	/* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
951 	 * as TMR is not loaded at all
952 	 */
953 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
954 		return 0;
955 
956 	cmd = acquire_psp_cmd_buf(psp);
957 
958 	psp_prep_tmr_unload_cmd_buf(psp, cmd);
959 	dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
960 
961 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
962 				 psp->fence_buf_mc_addr);
963 
964 	release_psp_cmd_buf(psp);
965 
966 	return ret;
967 }
968 
969 static int psp_tmr_terminate(struct psp_context *psp)
970 {
971 	return psp_tmr_unload(psp);
972 }
973 
974 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
975 					uint64_t *output_ptr)
976 {
977 	int ret;
978 	struct psp_gfx_cmd_resp *cmd;
979 
980 	if (!output_ptr)
981 		return -EINVAL;
982 
983 	if (amdgpu_sriov_vf(psp->adev))
984 		return 0;
985 
986 	cmd = acquire_psp_cmd_buf(psp);
987 
988 	cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
989 
990 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
991 				 psp->fence_buf_mc_addr);
992 
993 	if (!ret) {
994 		*output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
995 			      ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
996 	}
997 
998 	release_psp_cmd_buf(psp);
999 
1000 	return ret;
1001 }
1002 
1003 static int psp_get_fw_reservation_info(struct psp_context *psp,
1004 						   uint32_t cmd_id,
1005 						   uint64_t *addr,
1006 						   uint32_t *size)
1007 {
1008 	int ret;
1009 	uint32_t status;
1010 	struct psp_gfx_cmd_resp *cmd;
1011 
1012 	cmd = acquire_psp_cmd_buf(psp);
1013 
1014 	cmd->cmd_id = cmd_id;
1015 
1016 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1017 				 psp->fence_buf_mc_addr);
1018 	if (ret) {
1019 		release_psp_cmd_buf(psp);
1020 		return ret;
1021 	}
1022 
1023 	status = cmd->resp.status;
1024 	if (status == PSP_ERR_UNKNOWN_COMMAND) {
1025 		release_psp_cmd_buf(psp);
1026 		*addr = 0;
1027 		*size = 0;
1028 		return 0;
1029 	}
1030 
1031 	*addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 |
1032 		cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo;
1033 	*size = cmd->resp.uresp.fw_reserve_info.reserve_size;
1034 
1035 	release_psp_cmd_buf(psp);
1036 
1037 	return 0;
1038 }
1039 
1040 int psp_update_fw_reservation(struct psp_context *psp)
1041 {
1042 	int ret;
1043 	uint64_t reserv_addr, reserv_addr_ext;
1044 	uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
1045 	struct amdgpu_device *adev = psp->adev;
1046 
1047 	mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
1048 
1049 	if (amdgpu_sriov_vf(psp->adev))
1050 		return 0;
1051 
1052 	switch (mp0_ip_ver) {
1053 	case IP_VERSION(14, 0, 2):
1054 		if (adev->psp.sos.fw_version < 0x3b0e0d)
1055 			return 0;
1056 		break;
1057 
1058 	case IP_VERSION(14, 0, 3):
1059 		if (adev->psp.sos.fw_version < 0x3a0e14)
1060 			return 0;
1061 		break;
1062 
1063 	default:
1064 		return 0;
1065 	}
1066 
1067 	ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
1068 	if (ret)
1069 		return ret;
1070 	ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
1071 	if (ret)
1072 		return ret;
1073 
1074 	if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
1075 		dev_warn(adev->dev, "reserve fw region is not valid!\n");
1076 		return 0;
1077 	}
1078 
1079 	amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
1080 
1081 	reserv_size = roundup(reserv_size, SZ_1M);
1082 
1083 	ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL);
1084 	if (ret) {
1085 		dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret);
1086 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
1087 		return ret;
1088 	}
1089 
1090 	reserv_size_ext = roundup(reserv_size_ext, SZ_1M);
1091 
1092 	ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext,
1093 					 &adev->mman.fw_reserved_memory_extend, NULL);
1094 	if (ret) {
1095 		dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret);
1096 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL);
1097 		return ret;
1098 	}
1099 
1100 	return 0;
1101 }
1102 
1103 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
1104 {
1105 	struct psp_context *psp = &adev->psp;
1106 	struct psp_gfx_cmd_resp *cmd;
1107 	int ret;
1108 
1109 	if (amdgpu_sriov_vf(adev))
1110 		return 0;
1111 
1112 	cmd = acquire_psp_cmd_buf(psp);
1113 
1114 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1115 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
1116 
1117 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1118 	if (!ret) {
1119 		*boot_cfg =
1120 			(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
1121 	}
1122 
1123 	release_psp_cmd_buf(psp);
1124 
1125 	return ret;
1126 }
1127 
1128 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
1129 {
1130 	int ret;
1131 	struct psp_context *psp = &adev->psp;
1132 	struct psp_gfx_cmd_resp *cmd;
1133 
1134 	if (amdgpu_sriov_vf(adev))
1135 		return 0;
1136 
1137 	cmd = acquire_psp_cmd_buf(psp);
1138 
1139 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1140 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1141 	cmd->cmd.boot_cfg.boot_config = boot_cfg;
1142 	cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1143 
1144 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1145 
1146 	release_psp_cmd_buf(psp);
1147 
1148 	return ret;
1149 }
1150 
1151 static int psp_rl_load(struct amdgpu_device *adev)
1152 {
1153 	int ret;
1154 	struct psp_context *psp = &adev->psp;
1155 	struct psp_gfx_cmd_resp *cmd;
1156 
1157 	if (!is_psp_fw_valid(psp->rl))
1158 		return 0;
1159 
1160 	cmd = acquire_psp_cmd_buf(psp);
1161 
1162 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1163 	memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1164 
1165 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1166 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1167 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1168 	cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1169 	cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1170 
1171 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1172 
1173 	release_psp_cmd_buf(psp);
1174 
1175 	return ret;
1176 }
1177 
1178 int psp_memory_partition(struct psp_context *psp, int mode)
1179 {
1180 	struct psp_gfx_cmd_resp *cmd;
1181 	int ret;
1182 
1183 	if (amdgpu_sriov_vf(psp->adev))
1184 		return 0;
1185 
1186 	cmd = acquire_psp_cmd_buf(psp);
1187 
1188 	cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
1189 	cmd->cmd.cmd_memory_part.mode = mode;
1190 
1191 	dev_info(psp->adev->dev,
1192 		 "Requesting %d memory partition change through PSP", mode);
1193 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1194 	if (ret)
1195 		dev_err(psp->adev->dev,
1196 			"PSP request failed to change to NPS%d mode\n", mode);
1197 
1198 	release_psp_cmd_buf(psp);
1199 
1200 	return ret;
1201 }
1202 
1203 int psp_spatial_partition(struct psp_context *psp, int mode)
1204 {
1205 	struct psp_gfx_cmd_resp *cmd;
1206 	int ret;
1207 
1208 	if (amdgpu_sriov_vf(psp->adev))
1209 		return 0;
1210 
1211 	cmd = acquire_psp_cmd_buf(psp);
1212 
1213 	cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1214 	cmd->cmd.cmd_spatial_part.mode = mode;
1215 
1216 	dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1217 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1218 
1219 	release_psp_cmd_buf(psp);
1220 
1221 	return ret;
1222 }
1223 
1224 static int psp_asd_initialize(struct psp_context *psp)
1225 {
1226 	int ret;
1227 
1228 	/* If PSP version doesn't match ASD version, asd loading will be failed.
1229 	 * add workaround to bypass it for sriov now.
1230 	 * TODO: add version check to make it common
1231 	 */
1232 	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1233 		return 0;
1234 
1235 	/* bypass asd if display hardware is not available */
1236 	if (!amdgpu_device_has_display_hardware(psp->adev) &&
1237 	    amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1238 		return 0;
1239 
1240 	psp->asd_context.mem_context.shared_mc_addr  = 0;
1241 	psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1242 	psp->asd_context.ta_load_type                = GFX_CMD_ID_LOAD_ASD;
1243 
1244 	ret = psp_ta_load(psp, &psp->asd_context);
1245 	if (!ret)
1246 		psp->asd_context.initialized = true;
1247 
1248 	return ret;
1249 }
1250 
1251 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1252 				       uint32_t session_id)
1253 {
1254 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1255 	cmd->cmd.cmd_unload_ta.session_id = session_id;
1256 }
1257 
1258 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1259 {
1260 	int ret;
1261 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1262 
1263 	psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1264 
1265 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1266 
1267 	context->resp_status = cmd->resp.status;
1268 
1269 	release_psp_cmd_buf(psp);
1270 
1271 	return ret;
1272 }
1273 
1274 static int psp_asd_terminate(struct psp_context *psp)
1275 {
1276 	int ret;
1277 
1278 	if (amdgpu_sriov_vf(psp->adev))
1279 		return 0;
1280 
1281 	if (!psp->asd_context.initialized)
1282 		return 0;
1283 
1284 	ret = psp_ta_unload(psp, &psp->asd_context);
1285 	if (!ret)
1286 		psp->asd_context.initialized = false;
1287 
1288 	return ret;
1289 }
1290 
1291 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1292 		uint32_t id, uint32_t value)
1293 {
1294 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1295 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1296 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1297 }
1298 
1299 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1300 		uint32_t value)
1301 {
1302 	struct psp_gfx_cmd_resp *cmd;
1303 	int ret = 0;
1304 
1305 	if (reg >= PSP_REG_LAST)
1306 		return -EINVAL;
1307 
1308 	cmd = acquire_psp_cmd_buf(psp);
1309 
1310 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1311 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1312 	if (ret)
1313 		dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1314 
1315 	release_psp_cmd_buf(psp);
1316 
1317 	return ret;
1318 }
1319 
1320 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1321 				     uint64_t ta_bin_mc,
1322 				     struct ta_context *context)
1323 {
1324 	cmd->cmd_id				= context->ta_load_type;
1325 	cmd->cmd.cmd_load_ta.app_phy_addr_lo	= lower_32_bits(ta_bin_mc);
1326 	cmd->cmd.cmd_load_ta.app_phy_addr_hi	= upper_32_bits(ta_bin_mc);
1327 	cmd->cmd.cmd_load_ta.app_len		= context->bin_desc.size_bytes;
1328 
1329 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1330 		lower_32_bits(context->mem_context.shared_mc_addr);
1331 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1332 		upper_32_bits(context->mem_context.shared_mc_addr);
1333 	cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1334 }
1335 
1336 int psp_ta_init_shared_buf(struct psp_context *psp,
1337 				  struct ta_mem_context *mem_ctx)
1338 {
1339 	/*
1340 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1341 	 * physical) for ta to host memory
1342 	 */
1343 	return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1344 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1345 				      AMDGPU_GEM_DOMAIN_GTT,
1346 				      &mem_ctx->shared_bo,
1347 				      &mem_ctx->shared_mc_addr,
1348 				      &mem_ctx->shared_buf);
1349 }
1350 
1351 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1352 				       uint32_t ta_cmd_id,
1353 				       uint32_t session_id)
1354 {
1355 	cmd->cmd_id				= GFX_CMD_ID_INVOKE_CMD;
1356 	cmd->cmd.cmd_invoke_cmd.session_id	= session_id;
1357 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id	= ta_cmd_id;
1358 }
1359 
1360 int psp_ta_invoke(struct psp_context *psp,
1361 		  uint32_t ta_cmd_id,
1362 		  struct ta_context *context)
1363 {
1364 	int ret;
1365 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1366 
1367 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1368 
1369 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1370 				 psp->fence_buf_mc_addr);
1371 
1372 	context->resp_status = cmd->resp.status;
1373 
1374 	release_psp_cmd_buf(psp);
1375 
1376 	return ret;
1377 }
1378 
1379 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1380 {
1381 	int ret;
1382 	struct psp_gfx_cmd_resp *cmd;
1383 
1384 	cmd = acquire_psp_cmd_buf(psp);
1385 
1386 	psp_copy_fw(psp, context->bin_desc.start_addr,
1387 		    context->bin_desc.size_bytes);
1388 
1389 	if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
1390 		context->mem_context.shared_bo)
1391 		context->mem_context.shared_mc_addr =
1392 			amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
1393 
1394 	psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1395 
1396 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1397 				 psp->fence_buf_mc_addr);
1398 
1399 	context->resp_status = cmd->resp.status;
1400 
1401 	if (!ret)
1402 		context->session_id = cmd->resp.session_id;
1403 
1404 	release_psp_cmd_buf(psp);
1405 
1406 	return ret;
1407 }
1408 
1409 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1410 {
1411 	return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1412 }
1413 
1414 int psp_xgmi_terminate(struct psp_context *psp)
1415 {
1416 	int ret;
1417 	struct amdgpu_device *adev = psp->adev;
1418 
1419 	/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1420 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1421 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1422 	     adev->gmc.xgmi.connected_to_cpu))
1423 		return 0;
1424 
1425 	if (!psp->xgmi_context.context.initialized)
1426 		return 0;
1427 
1428 	ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1429 
1430 	psp->xgmi_context.context.initialized = false;
1431 
1432 	return ret;
1433 }
1434 
1435 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1436 {
1437 	struct ta_xgmi_shared_memory *xgmi_cmd;
1438 	int ret;
1439 
1440 	if (!psp->ta_fw ||
1441 	    !psp->xgmi_context.context.bin_desc.size_bytes ||
1442 	    !psp->xgmi_context.context.bin_desc.start_addr)
1443 		return -ENOENT;
1444 
1445 	if (!load_ta)
1446 		goto invoke;
1447 
1448 	psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1449 	psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1450 
1451 	if (!psp->xgmi_context.context.mem_context.shared_buf) {
1452 		ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1453 		if (ret)
1454 			return ret;
1455 	}
1456 
1457 	/* Load XGMI TA */
1458 	ret = psp_ta_load(psp, &psp->xgmi_context.context);
1459 	if (!ret)
1460 		psp->xgmi_context.context.initialized = true;
1461 	else
1462 		return ret;
1463 
1464 invoke:
1465 	/* Initialize XGMI session */
1466 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1467 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1468 	xgmi_cmd->flag_extend_link_record = set_extended_data;
1469 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1470 
1471 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1472 	/* note down the capbility flag for XGMI TA */
1473 	psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1474 
1475 	return ret;
1476 }
1477 
1478 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1479 {
1480 	struct ta_xgmi_shared_memory *xgmi_cmd;
1481 	int ret;
1482 
1483 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1484 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1485 
1486 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1487 
1488 	/* Invoke xgmi ta to get hive id */
1489 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1490 	if (ret)
1491 		return ret;
1492 
1493 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1494 
1495 	return 0;
1496 }
1497 
1498 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1499 {
1500 	struct ta_xgmi_shared_memory *xgmi_cmd;
1501 	int ret;
1502 
1503 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1504 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1505 
1506 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1507 
1508 	/* Invoke xgmi ta to get the node id */
1509 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1510 	if (ret)
1511 		return ret;
1512 
1513 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1514 
1515 	return 0;
1516 }
1517 
1518 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1519 {
1520 	return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1521 			IP_VERSION(13, 0, 2) &&
1522 		psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1523 	       amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1524 		       IP_VERSION(13, 0, 6);
1525 }
1526 
1527 /*
1528  * Chips that support extended topology information require the driver to
1529  * reflect topology information in the opposite direction.  This is
1530  * because the TA has already exceeded its link record limit and if the
1531  * TA holds bi-directional information, the driver would have to do
1532  * multiple fetches instead of just two.
1533  */
1534 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1535 					struct psp_xgmi_node_info node_info)
1536 {
1537 	struct amdgpu_device *mirror_adev;
1538 	struct amdgpu_hive_info *hive;
1539 	uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1540 	uint64_t dst_node_id = node_info.node_id;
1541 	uint8_t dst_num_hops = node_info.num_hops;
1542 	uint8_t dst_is_sharing_enabled = node_info.is_sharing_enabled;
1543 	uint8_t dst_num_links = node_info.num_links;
1544 
1545 	hive = amdgpu_get_xgmi_hive(psp->adev);
1546 	if (WARN_ON(!hive))
1547 		return;
1548 
1549 	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1550 		struct psp_xgmi_topology_info *mirror_top_info;
1551 		int j;
1552 
1553 		if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1554 			continue;
1555 
1556 		mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1557 		for (j = 0; j < mirror_top_info->num_nodes; j++) {
1558 			if (mirror_top_info->nodes[j].node_id != src_node_id)
1559 				continue;
1560 
1561 			mirror_top_info->nodes[j].num_hops = dst_num_hops;
1562 			mirror_top_info->nodes[j].is_sharing_enabled = dst_is_sharing_enabled;
1563 			/* prevent 0 num_links value re-reflection since reflection
1564 			 * criteria is based on num_hops (direct or indirect).
1565 			 */
1566 			if (dst_num_links) {
1567 				mirror_top_info->nodes[j].num_links = dst_num_links;
1568 				/* swap src and dst due to frame of reference */
1569 				for (int k = 0; k < dst_num_links; k++) {
1570 					mirror_top_info->nodes[j].port_num[k].src_xgmi_port_num =
1571 						node_info.port_num[k].dst_xgmi_port_num;
1572 					mirror_top_info->nodes[j].port_num[k].dst_xgmi_port_num =
1573 						node_info.port_num[k].src_xgmi_port_num;
1574 				}
1575 			}
1576 
1577 			break;
1578 		}
1579 
1580 		break;
1581 	}
1582 
1583 	amdgpu_put_xgmi_hive(hive);
1584 }
1585 
1586 int psp_xgmi_get_topology_info(struct psp_context *psp,
1587 			       int number_devices,
1588 			       struct psp_xgmi_topology_info *topology,
1589 			       bool get_extended_data)
1590 {
1591 	struct ta_xgmi_shared_memory *xgmi_cmd;
1592 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1593 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1594 	int i;
1595 	int ret;
1596 
1597 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1598 		return -EINVAL;
1599 
1600 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1601 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1602 	xgmi_cmd->flag_extend_link_record = get_extended_data;
1603 
1604 	/* Fill in the shared memory with topology information as input */
1605 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1606 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1607 	topology_info_input->num_nodes = number_devices;
1608 
1609 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1610 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1611 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1612 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1613 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1614 	}
1615 
1616 	/* Invoke xgmi ta to get the topology information */
1617 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1618 	if (ret)
1619 		return ret;
1620 
1621 	/* Read the output topology information from the shared memory */
1622 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1623 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1624 	for (i = 0; i < topology->num_nodes; i++) {
1625 		/* extended data will either be 0 or equal to non-extended data */
1626 		if (topology_info_output->nodes[i].num_hops)
1627 			topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1628 
1629 		/* non-extended data gets everything here so no need to update */
1630 		if (!get_extended_data) {
1631 			topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1632 			topology->nodes[i].is_sharing_enabled =
1633 					topology_info_output->nodes[i].is_sharing_enabled;
1634 			topology->nodes[i].sdma_engine =
1635 					topology_info_output->nodes[i].sdma_engine;
1636 		}
1637 
1638 	}
1639 
1640 	/* Invoke xgmi ta again to get the link information */
1641 	if (psp_xgmi_peer_link_info_supported(psp)) {
1642 		struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1643 		struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1644 		bool requires_reflection =
1645 			(psp->xgmi_context.supports_extended_data &&
1646 			 get_extended_data) ||
1647 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1648 				IP_VERSION(13, 0, 6) ||
1649 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1650 				IP_VERSION(13, 0, 14) ||
1651 			amdgpu_sriov_vf(psp->adev);
1652 		bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG ||
1653 			amdgpu_sriov_xgmi_ta_ext_peer_link_en(psp->adev);
1654 
1655 		/* popluate the shared output buffer rather than the cmd input buffer
1656 		 * with node_ids as the input for GET_PEER_LINKS command execution.
1657 		 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1658 		 * The same requirement for GET_EXTEND_PEER_LINKS command.
1659 		 */
1660 		if (ta_port_num_support) {
1661 			link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1662 
1663 			for (i = 0; i < topology->num_nodes; i++)
1664 				link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1665 
1666 			link_extend_info_output->num_nodes = topology->num_nodes;
1667 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1668 		} else {
1669 			link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1670 
1671 			for (i = 0; i < topology->num_nodes; i++)
1672 				link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1673 
1674 			link_info_output->num_nodes = topology->num_nodes;
1675 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1676 		}
1677 
1678 		ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1679 		if (ret)
1680 			return ret;
1681 
1682 		for (i = 0; i < topology->num_nodes; i++) {
1683 			uint8_t node_num_links = ta_port_num_support ?
1684 				link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1685 			/* accumulate num_links on extended data */
1686 			if (get_extended_data) {
1687 				topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1688 			} else {
1689 				topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1690 								topology->nodes[i].num_links : node_num_links;
1691 			}
1692 			/* popluate the connected port num info if supported and available */
1693 			if (ta_port_num_support && topology->nodes[i].num_links) {
1694 				memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1695 				       sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1696 			}
1697 
1698 			/* reflect the topology information for bi-directionality */
1699 			if (requires_reflection && topology->nodes[i].num_hops)
1700 				psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1701 		}
1702 	}
1703 
1704 	return 0;
1705 }
1706 
1707 int psp_xgmi_set_topology_info(struct psp_context *psp,
1708 			       int number_devices,
1709 			       struct psp_xgmi_topology_info *topology)
1710 {
1711 	struct ta_xgmi_shared_memory *xgmi_cmd;
1712 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1713 	int i;
1714 
1715 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1716 		return -EINVAL;
1717 
1718 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1719 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1720 
1721 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1722 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1723 	topology_info_input->num_nodes = number_devices;
1724 
1725 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1726 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1727 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1728 		topology_info_input->nodes[i].is_sharing_enabled = 1;
1729 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1730 	}
1731 
1732 	/* Invoke xgmi ta to set topology information */
1733 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1734 }
1735 
1736 // ras begin
1737 static void psp_ras_ta_check_status(struct psp_context *psp)
1738 {
1739 	struct ta_ras_shared_memory *ras_cmd =
1740 		(struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1741 
1742 	switch (ras_cmd->ras_status) {
1743 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1744 		dev_warn(psp->adev->dev,
1745 			 "RAS WARNING: cmd failed due to unsupported ip\n");
1746 		break;
1747 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1748 		dev_warn(psp->adev->dev,
1749 			 "RAS WARNING: cmd failed due to unsupported error injection\n");
1750 		break;
1751 	case TA_RAS_STATUS__SUCCESS:
1752 		break;
1753 	case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1754 		if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1755 			dev_warn(psp->adev->dev,
1756 				 "RAS WARNING: Inject error to critical region is not allowed\n");
1757 		break;
1758 	default:
1759 		dev_warn(psp->adev->dev,
1760 			 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1761 		break;
1762 	}
1763 }
1764 
1765 static int psp_ras_send_cmd(struct psp_context *psp,
1766 		enum ras_command cmd_id, void *in, void *out)
1767 {
1768 	struct ta_ras_shared_memory *ras_cmd;
1769 	uint32_t cmd = cmd_id;
1770 	int ret = 0;
1771 
1772 	if (!in)
1773 		return -EINVAL;
1774 
1775 	mutex_lock(&psp->ras_context.mutex);
1776 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1777 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1778 
1779 	switch (cmd) {
1780 	case TA_RAS_COMMAND__ENABLE_FEATURES:
1781 	case TA_RAS_COMMAND__DISABLE_FEATURES:
1782 		memcpy(&ras_cmd->ras_in_message,
1783 			in, sizeof(ras_cmd->ras_in_message));
1784 		break;
1785 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1786 		memcpy(&ras_cmd->ras_in_message.trigger_error,
1787 			in, sizeof(ras_cmd->ras_in_message.trigger_error));
1788 		break;
1789 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1790 		memcpy(&ras_cmd->ras_in_message.address,
1791 			in, sizeof(ras_cmd->ras_in_message.address));
1792 		break;
1793 	default:
1794 		dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1795 		ret = -EINVAL;
1796 		goto err_out;
1797 	}
1798 
1799 	ras_cmd->cmd_id = cmd;
1800 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1801 
1802 	switch (cmd) {
1803 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1804 		if (!ret && out)
1805 			memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1806 		break;
1807 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1808 		if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1809 			ret = -EINVAL;
1810 		else if (out)
1811 			memcpy(out,
1812 				&ras_cmd->ras_out_message.address,
1813 				sizeof(ras_cmd->ras_out_message.address));
1814 		break;
1815 	default:
1816 		break;
1817 	}
1818 
1819 err_out:
1820 	mutex_unlock(&psp->ras_context.mutex);
1821 
1822 	return ret;
1823 }
1824 
1825 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1826 {
1827 	struct ta_ras_shared_memory *ras_cmd;
1828 	int ret;
1829 
1830 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1831 
1832 	/*
1833 	 * TODO: bypass the loading in sriov for now
1834 	 */
1835 	if (amdgpu_sriov_vf(psp->adev))
1836 		return 0;
1837 
1838 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1839 
1840 	if (amdgpu_ras_intr_triggered())
1841 		return ret;
1842 
1843 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1844 		dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1845 		return -EINVAL;
1846 	}
1847 
1848 	if (!ret) {
1849 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1850 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1851 
1852 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1853 		} else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1854 			dev_warn(psp->adev->dev,
1855 				 "RAS internal register access blocked\n");
1856 
1857 		psp_ras_ta_check_status(psp);
1858 	}
1859 
1860 	return ret;
1861 }
1862 
1863 int psp_ras_enable_features(struct psp_context *psp,
1864 		union ta_ras_cmd_input *info, bool enable)
1865 {
1866 	enum ras_command cmd_id;
1867 	int ret;
1868 
1869 	if (!psp->ras_context.context.initialized || !info)
1870 		return -EINVAL;
1871 
1872 	cmd_id = enable ?
1873 		TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1874 	ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
1875 	if (ret)
1876 		return -EINVAL;
1877 
1878 	return 0;
1879 }
1880 
1881 int psp_ras_terminate(struct psp_context *psp)
1882 {
1883 	int ret;
1884 
1885 	/*
1886 	 * TODO: bypass the terminate in sriov for now
1887 	 */
1888 	if (amdgpu_sriov_vf(psp->adev))
1889 		return 0;
1890 
1891 	if (!psp->ras_context.context.initialized)
1892 		return 0;
1893 
1894 	ret = psp_ta_unload(psp, &psp->ras_context.context);
1895 
1896 	psp->ras_context.context.initialized = false;
1897 
1898 	mutex_destroy(&psp->ras_context.mutex);
1899 
1900 	return ret;
1901 }
1902 
1903 int psp_ras_initialize(struct psp_context *psp)
1904 {
1905 	int ret;
1906 	uint32_t boot_cfg = 0xFF;
1907 	struct amdgpu_device *adev = psp->adev;
1908 	struct ta_ras_shared_memory *ras_cmd;
1909 
1910 	/*
1911 	 * TODO: bypass the initialize in sriov for now
1912 	 */
1913 	if (amdgpu_sriov_vf(adev))
1914 		return 0;
1915 
1916 	if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1917 	    !adev->psp.ras_context.context.bin_desc.start_addr) {
1918 		dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1919 		return 0;
1920 	}
1921 
1922 	if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1923 		/* query GECC enablement status from boot config
1924 		 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1925 		 */
1926 		ret = psp_boot_config_get(adev, &boot_cfg);
1927 		if (ret)
1928 			dev_warn(adev->dev, "PSP get boot config failed\n");
1929 
1930 		if (boot_cfg == 1 && !adev->ras_default_ecc_enabled &&
1931 		    amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1932 			dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n");
1933 			dev_warn(adev->dev,
1934 				"To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n");
1935 		} else {
1936 			if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) &&
1937 				amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1938 				if (boot_cfg == 1) {
1939 					dev_info(adev->dev, "GECC is enabled\n");
1940 				} else {
1941 					/* enable GECC in next boot cycle if it is disabled
1942 					 * in boot config, or force enable GECC if failed to
1943 					 * get boot configuration
1944 					 */
1945 					ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1946 					if (ret)
1947 						dev_warn(adev->dev, "PSP set boot config failed\n");
1948 					else
1949 						dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1950 				}
1951 			} else {
1952 				if (!boot_cfg) {
1953 					if (!adev->ras_default_ecc_enabled &&
1954 					    amdgpu_ras_enable != 1 &&
1955 					    amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
1956 						dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n");
1957 					else
1958 						dev_info(adev->dev, "GECC is disabled\n");
1959 				} else {
1960 					/* disable GECC in next boot cycle if ras is
1961 					 * disabled by module parameter amdgpu_ras_enable
1962 					 * and/or amdgpu_ras_mask, or boot_config_get call
1963 					 * is failed
1964 					 */
1965 					ret = psp_boot_config_set(adev, 0);
1966 					if (ret)
1967 						dev_warn(adev->dev, "PSP set boot config failed\n");
1968 					else
1969 						dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1970 				}
1971 			}
1972 		}
1973 	}
1974 
1975 	psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1976 	psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1977 
1978 	if (!psp->ras_context.context.mem_context.shared_buf) {
1979 		ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1980 		if (ret)
1981 			return ret;
1982 	}
1983 
1984 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1985 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1986 
1987 	if (amdgpu_ras_is_poison_mode_supported(adev))
1988 		ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1989 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1990 		ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1991 	ras_cmd->ras_in_message.init_flags.xcc_mask =
1992 		adev->gfx.xcc_mask;
1993 	ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1994 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
1995 		ras_cmd->ras_in_message.init_flags.nps_mode =
1996 			adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1997 	ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask;
1998 
1999 	ret = psp_ta_load(psp, &psp->ras_context.context);
2000 
2001 	if (!ret && !ras_cmd->ras_status) {
2002 		psp->ras_context.context.initialized = true;
2003 		mutex_init(&psp->ras_context.mutex);
2004 	} else {
2005 		if (ras_cmd->ras_status)
2006 			dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
2007 
2008 		/* fail to load RAS TA */
2009 		psp->ras_context.context.initialized = false;
2010 	}
2011 
2012 	return ret;
2013 }
2014 
2015 int psp_ras_trigger_error(struct psp_context *psp,
2016 			  struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
2017 {
2018 	struct amdgpu_device *adev = psp->adev;
2019 	int ret;
2020 	uint32_t dev_mask;
2021 	uint32_t ras_status = 0;
2022 
2023 	if (!psp->ras_context.context.initialized || !info)
2024 		return -EINVAL;
2025 
2026 	switch (info->block_id) {
2027 	case TA_RAS_BLOCK__GFX:
2028 		dev_mask = GET_MASK(GC, instance_mask);
2029 		break;
2030 	case TA_RAS_BLOCK__SDMA:
2031 		dev_mask = GET_MASK(SDMA0, instance_mask);
2032 		break;
2033 	case TA_RAS_BLOCK__VCN:
2034 	case TA_RAS_BLOCK__JPEG:
2035 		dev_mask = GET_MASK(VCN, instance_mask);
2036 		break;
2037 	default:
2038 		dev_mask = instance_mask;
2039 		break;
2040 	}
2041 
2042 	/* reuse sub_block_index for backward compatibility */
2043 	dev_mask <<= AMDGPU_RAS_INST_SHIFT;
2044 	dev_mask &= AMDGPU_RAS_INST_MASK;
2045 	info->sub_block_index |= dev_mask;
2046 
2047 	ret = psp_ras_send_cmd(psp,
2048 			TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
2049 	if (ret)
2050 		return -EINVAL;
2051 
2052 	/* If err_event_athub occurs error inject was successful, however
2053 	 *  return status from TA is no long reliable
2054 	 */
2055 	if (amdgpu_ras_intr_triggered())
2056 		return 0;
2057 
2058 	if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
2059 		return -EACCES;
2060 	else if (ras_status)
2061 		return -EINVAL;
2062 
2063 	return 0;
2064 }
2065 
2066 int psp_ras_query_address(struct psp_context *psp,
2067 			  struct ta_ras_query_address_input *addr_in,
2068 			  struct ta_ras_query_address_output *addr_out)
2069 {
2070 	int ret;
2071 
2072 	if (!psp->ras_context.context.initialized ||
2073 		!addr_in || !addr_out)
2074 		return -EINVAL;
2075 
2076 	ret = psp_ras_send_cmd(psp,
2077 			TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
2078 
2079 	return ret;
2080 }
2081 // ras end
2082 
2083 // HDCP start
2084 static int psp_hdcp_initialize(struct psp_context *psp)
2085 {
2086 	int ret;
2087 
2088 	/*
2089 	 * TODO: bypass the initialize in sriov for now
2090 	 */
2091 	if (amdgpu_sriov_vf(psp->adev))
2092 		return 0;
2093 
2094 	/* bypass hdcp initialization if dmu is harvested */
2095 	if (!amdgpu_device_has_display_hardware(psp->adev))
2096 		return 0;
2097 
2098 	if (!psp->hdcp_context.context.bin_desc.size_bytes ||
2099 	    !psp->hdcp_context.context.bin_desc.start_addr) {
2100 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
2101 		return 0;
2102 	}
2103 
2104 	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
2105 	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2106 
2107 	if (!psp->hdcp_context.context.mem_context.shared_buf) {
2108 		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
2109 		if (ret)
2110 			return ret;
2111 	}
2112 
2113 	ret = psp_ta_load(psp, &psp->hdcp_context.context);
2114 	if (!ret) {
2115 		psp->hdcp_context.context.initialized = true;
2116 		mutex_init(&psp->hdcp_context.mutex);
2117 	}
2118 
2119 	return ret;
2120 }
2121 
2122 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2123 {
2124 	/*
2125 	 * TODO: bypass the loading in sriov for now
2126 	 */
2127 	if (amdgpu_sriov_vf(psp->adev))
2128 		return 0;
2129 
2130 	if (!psp->hdcp_context.context.initialized)
2131 		return 0;
2132 
2133 	return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
2134 }
2135 
2136 static int psp_hdcp_terminate(struct psp_context *psp)
2137 {
2138 	int ret;
2139 
2140 	/*
2141 	 * TODO: bypass the terminate in sriov for now
2142 	 */
2143 	if (amdgpu_sriov_vf(psp->adev))
2144 		return 0;
2145 
2146 	if (!psp->hdcp_context.context.initialized)
2147 		return 0;
2148 
2149 	ret = psp_ta_unload(psp, &psp->hdcp_context.context);
2150 
2151 	psp->hdcp_context.context.initialized = false;
2152 
2153 	return ret;
2154 }
2155 // HDCP end
2156 
2157 // DTM start
2158 static int psp_dtm_initialize(struct psp_context *psp)
2159 {
2160 	int ret;
2161 
2162 	/*
2163 	 * TODO: bypass the initialize in sriov for now
2164 	 */
2165 	if (amdgpu_sriov_vf(psp->adev))
2166 		return 0;
2167 
2168 	/* bypass dtm initialization if dmu is harvested */
2169 	if (!amdgpu_device_has_display_hardware(psp->adev))
2170 		return 0;
2171 
2172 	if (!psp->dtm_context.context.bin_desc.size_bytes ||
2173 	    !psp->dtm_context.context.bin_desc.start_addr) {
2174 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
2175 		return 0;
2176 	}
2177 
2178 	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
2179 	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2180 
2181 	if (!psp->dtm_context.context.mem_context.shared_buf) {
2182 		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
2183 		if (ret)
2184 			return ret;
2185 	}
2186 
2187 	ret = psp_ta_load(psp, &psp->dtm_context.context);
2188 	if (!ret) {
2189 		psp->dtm_context.context.initialized = true;
2190 		mutex_init(&psp->dtm_context.mutex);
2191 	}
2192 
2193 	return ret;
2194 }
2195 
2196 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2197 {
2198 	/*
2199 	 * TODO: bypass the loading in sriov for now
2200 	 */
2201 	if (amdgpu_sriov_vf(psp->adev))
2202 		return 0;
2203 
2204 	if (!psp->dtm_context.context.initialized)
2205 		return 0;
2206 
2207 	return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
2208 }
2209 
2210 static int psp_dtm_terminate(struct psp_context *psp)
2211 {
2212 	int ret;
2213 
2214 	/*
2215 	 * TODO: bypass the terminate in sriov for now
2216 	 */
2217 	if (amdgpu_sriov_vf(psp->adev))
2218 		return 0;
2219 
2220 	if (!psp->dtm_context.context.initialized)
2221 		return 0;
2222 
2223 	ret = psp_ta_unload(psp, &psp->dtm_context.context);
2224 
2225 	psp->dtm_context.context.initialized = false;
2226 
2227 	return ret;
2228 }
2229 // DTM end
2230 
2231 // RAP start
2232 static int psp_rap_initialize(struct psp_context *psp)
2233 {
2234 	int ret;
2235 	enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2236 
2237 	/*
2238 	 * TODO: bypass the initialize in sriov for now
2239 	 */
2240 	if (amdgpu_sriov_vf(psp->adev))
2241 		return 0;
2242 
2243 	if (!psp->rap_context.context.bin_desc.size_bytes ||
2244 	    !psp->rap_context.context.bin_desc.start_addr) {
2245 		dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2246 		return 0;
2247 	}
2248 
2249 	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2250 	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2251 
2252 	if (!psp->rap_context.context.mem_context.shared_buf) {
2253 		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2254 		if (ret)
2255 			return ret;
2256 	}
2257 
2258 	ret = psp_ta_load(psp, &psp->rap_context.context);
2259 	if (!ret) {
2260 		psp->rap_context.context.initialized = true;
2261 		mutex_init(&psp->rap_context.mutex);
2262 	} else
2263 		return ret;
2264 
2265 	ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2266 	if (ret || status != TA_RAP_STATUS__SUCCESS) {
2267 		psp_rap_terminate(psp);
2268 		/* free rap shared memory */
2269 		psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2270 
2271 		dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2272 			 ret, status);
2273 
2274 		return ret;
2275 	}
2276 
2277 	return 0;
2278 }
2279 
2280 static int psp_rap_terminate(struct psp_context *psp)
2281 {
2282 	int ret;
2283 
2284 	if (!psp->rap_context.context.initialized)
2285 		return 0;
2286 
2287 	ret = psp_ta_unload(psp, &psp->rap_context.context);
2288 
2289 	psp->rap_context.context.initialized = false;
2290 
2291 	return ret;
2292 }
2293 
2294 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2295 {
2296 	struct ta_rap_shared_memory *rap_cmd;
2297 	int ret = 0;
2298 
2299 	if (!psp->rap_context.context.initialized)
2300 		return 0;
2301 
2302 	if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2303 	    ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2304 		return -EINVAL;
2305 
2306 	mutex_lock(&psp->rap_context.mutex);
2307 
2308 	rap_cmd = (struct ta_rap_shared_memory *)
2309 		  psp->rap_context.context.mem_context.shared_buf;
2310 	memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2311 
2312 	rap_cmd->cmd_id = ta_cmd_id;
2313 	rap_cmd->validation_method_id = METHOD_A;
2314 
2315 	ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2316 	if (ret)
2317 		goto out_unlock;
2318 
2319 	if (status)
2320 		*status = rap_cmd->rap_status;
2321 
2322 out_unlock:
2323 	mutex_unlock(&psp->rap_context.mutex);
2324 
2325 	return ret;
2326 }
2327 // RAP end
2328 
2329 /* securedisplay start */
2330 static int psp_securedisplay_initialize(struct psp_context *psp)
2331 {
2332 	int ret;
2333 	struct ta_securedisplay_cmd *securedisplay_cmd;
2334 
2335 	/*
2336 	 * TODO: bypass the initialize in sriov for now
2337 	 */
2338 	if (amdgpu_sriov_vf(psp->adev))
2339 		return 0;
2340 
2341 	/* bypass securedisplay initialization if dmu is harvested */
2342 	if (!amdgpu_device_has_display_hardware(psp->adev))
2343 		return 0;
2344 
2345 	if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2346 	    !psp->securedisplay_context.context.bin_desc.start_addr) {
2347 		dev_info(psp->adev->dev,
2348 			 "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
2349 		return 0;
2350 	}
2351 
2352 	psp->securedisplay_context.context.mem_context.shared_mem_size =
2353 		PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2354 	psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2355 
2356 	if (!psp->securedisplay_context.context.initialized) {
2357 		ret = psp_ta_init_shared_buf(psp,
2358 					     &psp->securedisplay_context.context.mem_context);
2359 		if (ret)
2360 			return ret;
2361 	}
2362 
2363 	ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2364 	if (!ret && !psp->securedisplay_context.context.resp_status) {
2365 		psp->securedisplay_context.context.initialized = true;
2366 		mutex_init(&psp->securedisplay_context.mutex);
2367 	} else {
2368 		/* don't try again */
2369 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2370 		return ret;
2371 	}
2372 
2373 	mutex_lock(&psp->securedisplay_context.mutex);
2374 
2375 	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2376 			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2377 
2378 	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2379 
2380 	mutex_unlock(&psp->securedisplay_context.mutex);
2381 
2382 	if (ret) {
2383 		psp_securedisplay_terminate(psp);
2384 		/* free securedisplay shared memory */
2385 		psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2386 		dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2387 		return -EINVAL;
2388 	}
2389 
2390 	if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2391 		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2392 		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2393 			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2394 		/* don't try again */
2395 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2396 	}
2397 
2398 	return 0;
2399 }
2400 
2401 static int psp_securedisplay_terminate(struct psp_context *psp)
2402 {
2403 	int ret;
2404 
2405 	/*
2406 	 * TODO:bypass the terminate in sriov for now
2407 	 */
2408 	if (amdgpu_sriov_vf(psp->adev))
2409 		return 0;
2410 
2411 	if (!psp->securedisplay_context.context.initialized)
2412 		return 0;
2413 
2414 	ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2415 
2416 	psp->securedisplay_context.context.initialized = false;
2417 
2418 	return ret;
2419 }
2420 
2421 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2422 {
2423 	int ret;
2424 
2425 	if (!psp->securedisplay_context.context.initialized)
2426 		return -EINVAL;
2427 
2428 	if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2429 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
2430 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
2431 		return -EINVAL;
2432 
2433 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2434 
2435 	return ret;
2436 }
2437 /* SECUREDISPLAY end */
2438 
2439 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2440 {
2441 	struct psp_context *psp = &adev->psp;
2442 	int ret = 0;
2443 
2444 	if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2445 		ret = psp->funcs->wait_for_bootloader(psp);
2446 
2447 	return ret;
2448 }
2449 
2450 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2451 {
2452 	if (psp->funcs &&
2453 	    psp->funcs->get_ras_capability) {
2454 		return psp->funcs->get_ras_capability(psp);
2455 	} else {
2456 		return false;
2457 	}
2458 }
2459 
2460 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
2461 {
2462 	struct psp_context *psp = &adev->psp;
2463 
2464 	if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
2465 		return false;
2466 
2467 	if (psp->funcs && psp->funcs->is_reload_needed)
2468 		return psp->funcs->is_reload_needed(psp);
2469 
2470 	return false;
2471 }
2472 
2473 static void psp_update_gpu_addresses(struct amdgpu_device *adev)
2474 {
2475 	struct psp_context *psp = &adev->psp;
2476 
2477 	if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
2478 		psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
2479 		psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
2480 		psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
2481 	}
2482 	if (adev->firmware.rbuf && psp->km_ring.ring_mem)
2483 		psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
2484 }
2485 
2486 static int psp_hw_start(struct psp_context *psp)
2487 {
2488 	struct amdgpu_device *adev = psp->adev;
2489 	int ret;
2490 
2491 	if (amdgpu_virt_xgmi_migrate_enabled(adev))
2492 		psp_update_gpu_addresses(adev);
2493 
2494 	if (!amdgpu_sriov_vf(adev)) {
2495 		if ((is_psp_fw_valid(psp->kdb)) &&
2496 		    (psp->funcs->bootloader_load_kdb != NULL)) {
2497 			ret = psp_bootloader_load_kdb(psp);
2498 			if (ret) {
2499 				dev_err(adev->dev, "PSP load kdb failed!\n");
2500 				return ret;
2501 			}
2502 		}
2503 
2504 		if ((is_psp_fw_valid(psp->spl)) &&
2505 		    (psp->funcs->bootloader_load_spl != NULL)) {
2506 			ret = psp_bootloader_load_spl(psp);
2507 			if (ret) {
2508 				dev_err(adev->dev, "PSP load spl failed!\n");
2509 				return ret;
2510 			}
2511 		}
2512 
2513 		if ((is_psp_fw_valid(psp->sys)) &&
2514 		    (psp->funcs->bootloader_load_sysdrv != NULL)) {
2515 			ret = psp_bootloader_load_sysdrv(psp);
2516 			if (ret) {
2517 				dev_err(adev->dev, "PSP load sys drv failed!\n");
2518 				return ret;
2519 			}
2520 		}
2521 
2522 		if ((is_psp_fw_valid(psp->soc_drv)) &&
2523 		    (psp->funcs->bootloader_load_soc_drv != NULL)) {
2524 			ret = psp_bootloader_load_soc_drv(psp);
2525 			if (ret) {
2526 				dev_err(adev->dev, "PSP load soc drv failed!\n");
2527 				return ret;
2528 			}
2529 		}
2530 
2531 		if ((is_psp_fw_valid(psp->intf_drv)) &&
2532 		    (psp->funcs->bootloader_load_intf_drv != NULL)) {
2533 			ret = psp_bootloader_load_intf_drv(psp);
2534 			if (ret) {
2535 				dev_err(adev->dev, "PSP load intf drv failed!\n");
2536 				return ret;
2537 			}
2538 		}
2539 
2540 		if ((is_psp_fw_valid(psp->dbg_drv)) &&
2541 		    (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2542 			ret = psp_bootloader_load_dbg_drv(psp);
2543 			if (ret) {
2544 				dev_err(adev->dev, "PSP load dbg drv failed!\n");
2545 				return ret;
2546 			}
2547 		}
2548 
2549 		if ((is_psp_fw_valid(psp->ras_drv)) &&
2550 		    (psp->funcs->bootloader_load_ras_drv != NULL)) {
2551 			ret = psp_bootloader_load_ras_drv(psp);
2552 			if (ret) {
2553 				dev_err(adev->dev, "PSP load ras_drv failed!\n");
2554 				return ret;
2555 			}
2556 		}
2557 
2558 		if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2559 		    (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2560 			ret = psp_bootloader_load_ipkeymgr_drv(psp);
2561 			if (ret) {
2562 				dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2563 				return ret;
2564 			}
2565 		}
2566 
2567 		if ((is_psp_fw_valid(psp->spdm_drv)) &&
2568 		    (psp->funcs->bootloader_load_spdm_drv != NULL)) {
2569 			ret = psp_bootloader_load_spdm_drv(psp);
2570 			if (ret) {
2571 				dev_err(adev->dev, "PSP load spdm_drv failed!\n");
2572 				return ret;
2573 			}
2574 		}
2575 
2576 		if ((is_psp_fw_valid(psp->sos)) &&
2577 		    (psp->funcs->bootloader_load_sos != NULL)) {
2578 			ret = psp_bootloader_load_sos(psp);
2579 			if (ret) {
2580 				dev_err(adev->dev, "PSP load sos failed!\n");
2581 				return ret;
2582 			}
2583 		}
2584 	}
2585 
2586 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2587 	if (ret) {
2588 		dev_err(adev->dev, "PSP create ring failed!\n");
2589 		return ret;
2590 	}
2591 
2592 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2593 		ret = psp_update_fw_reservation(psp);
2594 		if (ret) {
2595 			dev_err(adev->dev, "update fw reservation failed!\n");
2596 			return ret;
2597 		}
2598 	}
2599 
2600 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2601 		goto skip_pin_bo;
2602 
2603 	if (!psp->boot_time_tmr || psp->autoload_supported) {
2604 		ret = psp_tmr_init(psp);
2605 		if (ret) {
2606 			dev_err(adev->dev, "PSP tmr init failed!\n");
2607 			return ret;
2608 		}
2609 	}
2610 
2611 skip_pin_bo:
2612 	/*
2613 	 * For ASICs with DF Cstate management centralized
2614 	 * to PMFW, TMR setup should be performed after PMFW
2615 	 * loaded and before other non-psp firmware loaded.
2616 	 */
2617 	if (psp->pmfw_centralized_cstate_management) {
2618 		ret = psp_load_smu_fw(psp);
2619 		if (ret)
2620 			return ret;
2621 	}
2622 
2623 	if (!psp->boot_time_tmr || !psp->autoload_supported) {
2624 		ret = psp_tmr_load(psp);
2625 		if (ret) {
2626 			dev_err(adev->dev, "PSP load tmr failed!\n");
2627 			return ret;
2628 		}
2629 	}
2630 
2631 	return 0;
2632 }
2633 
2634 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2635 			   enum psp_gfx_fw_type *type)
2636 {
2637 	switch (ucode->ucode_id) {
2638 	case AMDGPU_UCODE_ID_CAP:
2639 		*type = GFX_FW_TYPE_CAP;
2640 		break;
2641 	case AMDGPU_UCODE_ID_SDMA0:
2642 		*type = GFX_FW_TYPE_SDMA0;
2643 		break;
2644 	case AMDGPU_UCODE_ID_SDMA1:
2645 		*type = GFX_FW_TYPE_SDMA1;
2646 		break;
2647 	case AMDGPU_UCODE_ID_SDMA2:
2648 		*type = GFX_FW_TYPE_SDMA2;
2649 		break;
2650 	case AMDGPU_UCODE_ID_SDMA3:
2651 		*type = GFX_FW_TYPE_SDMA3;
2652 		break;
2653 	case AMDGPU_UCODE_ID_SDMA4:
2654 		*type = GFX_FW_TYPE_SDMA4;
2655 		break;
2656 	case AMDGPU_UCODE_ID_SDMA5:
2657 		*type = GFX_FW_TYPE_SDMA5;
2658 		break;
2659 	case AMDGPU_UCODE_ID_SDMA6:
2660 		*type = GFX_FW_TYPE_SDMA6;
2661 		break;
2662 	case AMDGPU_UCODE_ID_SDMA7:
2663 		*type = GFX_FW_TYPE_SDMA7;
2664 		break;
2665 	case AMDGPU_UCODE_ID_CP_MES:
2666 		*type = GFX_FW_TYPE_CP_MES;
2667 		break;
2668 	case AMDGPU_UCODE_ID_CP_MES_DATA:
2669 		*type = GFX_FW_TYPE_MES_STACK;
2670 		break;
2671 	case AMDGPU_UCODE_ID_CP_MES1:
2672 		*type = GFX_FW_TYPE_CP_MES_KIQ;
2673 		break;
2674 	case AMDGPU_UCODE_ID_CP_MES1_DATA:
2675 		*type = GFX_FW_TYPE_MES_KIQ_STACK;
2676 		break;
2677 	case AMDGPU_UCODE_ID_CP_CE:
2678 		*type = GFX_FW_TYPE_CP_CE;
2679 		break;
2680 	case AMDGPU_UCODE_ID_CP_PFP:
2681 		*type = GFX_FW_TYPE_CP_PFP;
2682 		break;
2683 	case AMDGPU_UCODE_ID_CP_ME:
2684 		*type = GFX_FW_TYPE_CP_ME;
2685 		break;
2686 	case AMDGPU_UCODE_ID_CP_MEC1:
2687 		*type = GFX_FW_TYPE_CP_MEC;
2688 		break;
2689 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
2690 		*type = GFX_FW_TYPE_CP_MEC_ME1;
2691 		break;
2692 	case AMDGPU_UCODE_ID_CP_MEC2:
2693 		*type = GFX_FW_TYPE_CP_MEC;
2694 		break;
2695 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
2696 		*type = GFX_FW_TYPE_CP_MEC_ME2;
2697 		break;
2698 	case AMDGPU_UCODE_ID_RLC_P:
2699 		*type = GFX_FW_TYPE_RLC_P;
2700 		break;
2701 	case AMDGPU_UCODE_ID_RLC_V:
2702 		*type = GFX_FW_TYPE_RLC_V;
2703 		break;
2704 	case AMDGPU_UCODE_ID_RLC_G:
2705 		*type = GFX_FW_TYPE_RLC_G;
2706 		break;
2707 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2708 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2709 		break;
2710 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2711 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2712 		break;
2713 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2714 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2715 		break;
2716 	case AMDGPU_UCODE_ID_RLC_IRAM:
2717 		*type = GFX_FW_TYPE_RLC_IRAM;
2718 		break;
2719 	case AMDGPU_UCODE_ID_RLC_DRAM:
2720 		*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2721 		break;
2722 	case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2723 		*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2724 		break;
2725 	case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2726 		*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2727 		break;
2728 	case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2729 		*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2730 		break;
2731 	case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2732 		*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2733 		break;
2734 	case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2735 		*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2736 		break;
2737 	case AMDGPU_UCODE_ID_SMC:
2738 		*type = GFX_FW_TYPE_SMU;
2739 		break;
2740 	case AMDGPU_UCODE_ID_PPTABLE:
2741 		*type = GFX_FW_TYPE_PPTABLE;
2742 		break;
2743 	case AMDGPU_UCODE_ID_UVD:
2744 		*type = GFX_FW_TYPE_UVD;
2745 		break;
2746 	case AMDGPU_UCODE_ID_UVD1:
2747 		*type = GFX_FW_TYPE_UVD1;
2748 		break;
2749 	case AMDGPU_UCODE_ID_VCE:
2750 		*type = GFX_FW_TYPE_VCE;
2751 		break;
2752 	case AMDGPU_UCODE_ID_VCN:
2753 		*type = GFX_FW_TYPE_VCN;
2754 		break;
2755 	case AMDGPU_UCODE_ID_VCN1:
2756 		*type = GFX_FW_TYPE_VCN1;
2757 		break;
2758 	case AMDGPU_UCODE_ID_DMCU_ERAM:
2759 		*type = GFX_FW_TYPE_DMCU_ERAM;
2760 		break;
2761 	case AMDGPU_UCODE_ID_DMCU_INTV:
2762 		*type = GFX_FW_TYPE_DMCU_ISR;
2763 		break;
2764 	case AMDGPU_UCODE_ID_VCN0_RAM:
2765 		*type = GFX_FW_TYPE_VCN0_RAM;
2766 		break;
2767 	case AMDGPU_UCODE_ID_VCN1_RAM:
2768 		*type = GFX_FW_TYPE_VCN1_RAM;
2769 		break;
2770 	case AMDGPU_UCODE_ID_DMCUB:
2771 		*type = GFX_FW_TYPE_DMUB;
2772 		break;
2773 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2774 	case AMDGPU_UCODE_ID_SDMA_RS64:
2775 		*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2776 		break;
2777 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2778 		*type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2779 		break;
2780 	case AMDGPU_UCODE_ID_IMU_I:
2781 		*type = GFX_FW_TYPE_IMU_I;
2782 		break;
2783 	case AMDGPU_UCODE_ID_IMU_D:
2784 		*type = GFX_FW_TYPE_IMU_D;
2785 		break;
2786 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
2787 		*type = GFX_FW_TYPE_RS64_PFP;
2788 		break;
2789 	case AMDGPU_UCODE_ID_CP_RS64_ME:
2790 		*type = GFX_FW_TYPE_RS64_ME;
2791 		break;
2792 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
2793 		*type = GFX_FW_TYPE_RS64_MEC;
2794 		break;
2795 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2796 		*type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2797 		break;
2798 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2799 		*type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2800 		break;
2801 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2802 		*type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2803 		break;
2804 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2805 		*type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2806 		break;
2807 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2808 		*type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2809 		break;
2810 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2811 		*type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2812 		break;
2813 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2814 		*type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2815 		break;
2816 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2817 		*type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2818 		break;
2819 	case AMDGPU_UCODE_ID_VPE_CTX:
2820 		*type = GFX_FW_TYPE_VPEC_FW1;
2821 		break;
2822 	case AMDGPU_UCODE_ID_VPE_CTL:
2823 		*type = GFX_FW_TYPE_VPEC_FW2;
2824 		break;
2825 	case AMDGPU_UCODE_ID_VPE:
2826 		*type = GFX_FW_TYPE_VPE;
2827 		break;
2828 	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2829 		*type = GFX_FW_TYPE_UMSCH_UCODE;
2830 		break;
2831 	case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2832 		*type = GFX_FW_TYPE_UMSCH_DATA;
2833 		break;
2834 	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2835 		*type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2836 		break;
2837 	case AMDGPU_UCODE_ID_P2S_TABLE:
2838 		*type = GFX_FW_TYPE_P2S_TABLE;
2839 		break;
2840 	case AMDGPU_UCODE_ID_JPEG_RAM:
2841 		*type = GFX_FW_TYPE_JPEG_RAM;
2842 		break;
2843 	case AMDGPU_UCODE_ID_ISP:
2844 		*type = GFX_FW_TYPE_ISP;
2845 		break;
2846 	case AMDGPU_UCODE_ID_MAXIMUM:
2847 	default:
2848 		return -EINVAL;
2849 	}
2850 
2851 	return 0;
2852 }
2853 
2854 static void psp_print_fw_hdr(struct psp_context *psp,
2855 			     struct amdgpu_firmware_info *ucode)
2856 {
2857 	struct amdgpu_device *adev = psp->adev;
2858 	struct common_firmware_header *hdr;
2859 
2860 	switch (ucode->ucode_id) {
2861 	case AMDGPU_UCODE_ID_SDMA0:
2862 	case AMDGPU_UCODE_ID_SDMA1:
2863 	case AMDGPU_UCODE_ID_SDMA2:
2864 	case AMDGPU_UCODE_ID_SDMA3:
2865 	case AMDGPU_UCODE_ID_SDMA4:
2866 	case AMDGPU_UCODE_ID_SDMA5:
2867 	case AMDGPU_UCODE_ID_SDMA6:
2868 	case AMDGPU_UCODE_ID_SDMA7:
2869 		hdr = (struct common_firmware_header *)
2870 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2871 		amdgpu_ucode_print_sdma_hdr(hdr);
2872 		break;
2873 	case AMDGPU_UCODE_ID_CP_CE:
2874 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2875 		amdgpu_ucode_print_gfx_hdr(hdr);
2876 		break;
2877 	case AMDGPU_UCODE_ID_CP_PFP:
2878 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2879 		amdgpu_ucode_print_gfx_hdr(hdr);
2880 		break;
2881 	case AMDGPU_UCODE_ID_CP_ME:
2882 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2883 		amdgpu_ucode_print_gfx_hdr(hdr);
2884 		break;
2885 	case AMDGPU_UCODE_ID_CP_MEC1:
2886 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2887 		amdgpu_ucode_print_gfx_hdr(hdr);
2888 		break;
2889 	case AMDGPU_UCODE_ID_RLC_G:
2890 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2891 		amdgpu_ucode_print_rlc_hdr(hdr);
2892 		break;
2893 	case AMDGPU_UCODE_ID_SMC:
2894 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
2895 		amdgpu_ucode_print_smc_hdr(hdr);
2896 		break;
2897 	default:
2898 		break;
2899 	}
2900 }
2901 
2902 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2903 				       struct amdgpu_firmware_info *ucode,
2904 				       struct psp_gfx_cmd_resp *cmd)
2905 {
2906 	int ret;
2907 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
2908 
2909 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2910 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2911 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2912 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2913 
2914 	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2915 	if (ret)
2916 		dev_err(psp->adev->dev, "Unknown firmware type\n");
2917 
2918 	return ret;
2919 }
2920 
2921 int psp_execute_ip_fw_load(struct psp_context *psp,
2922 			   struct amdgpu_firmware_info *ucode)
2923 {
2924 	int ret = 0;
2925 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2926 
2927 	ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2928 	if (!ret) {
2929 		ret = psp_cmd_submit_buf(psp, ucode, cmd,
2930 					 psp->fence_buf_mc_addr);
2931 	}
2932 
2933 	release_psp_cmd_buf(psp);
2934 
2935 	return ret;
2936 }
2937 
2938 static int psp_load_p2s_table(struct psp_context *psp)
2939 {
2940 	int ret;
2941 	struct amdgpu_device *adev = psp->adev;
2942 	struct amdgpu_firmware_info *ucode =
2943 		&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2944 
2945 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2946 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2947 		return 0;
2948 
2949 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2950 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2951 		uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2952 								0x0036003C;
2953 		if (psp->sos.fw_version < supp_vers)
2954 			return 0;
2955 	}
2956 
2957 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2958 		return 0;
2959 
2960 	ret = psp_execute_ip_fw_load(psp, ucode);
2961 
2962 	return ret;
2963 }
2964 
2965 static int psp_load_smu_fw(struct psp_context *psp)
2966 {
2967 	int ret;
2968 	struct amdgpu_device *adev = psp->adev;
2969 	struct amdgpu_firmware_info *ucode =
2970 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2971 	struct amdgpu_ras *ras = psp->ras_context.ras;
2972 
2973 	/*
2974 	 * Skip SMU FW reloading in case of using BACO for runpm only,
2975 	 * as SMU is always alive.
2976 	 */
2977 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2978 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2979 		return 0;
2980 
2981 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2982 		return 0;
2983 
2984 	if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2985 	     (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2986 	      amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2987 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2988 		if (ret)
2989 			dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2990 	}
2991 
2992 	ret = psp_execute_ip_fw_load(psp, ucode);
2993 
2994 	if (ret)
2995 		dev_err(adev->dev, "PSP load smu failed!\n");
2996 
2997 	return ret;
2998 }
2999 
3000 static bool fw_load_skip_check(struct psp_context *psp,
3001 			       struct amdgpu_firmware_info *ucode)
3002 {
3003 	if (!ucode->fw || !ucode->ucode_size)
3004 		return true;
3005 
3006 	if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
3007 		return true;
3008 
3009 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
3010 	    (psp_smu_reload_quirk(psp) ||
3011 	     psp->autoload_supported ||
3012 	     psp->pmfw_centralized_cstate_management))
3013 		return true;
3014 
3015 	if (amdgpu_sriov_vf(psp->adev) &&
3016 	    amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
3017 		return true;
3018 
3019 	if (psp->autoload_supported &&
3020 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
3021 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
3022 		/* skip mec JT when autoload is enabled */
3023 		return true;
3024 
3025 	return false;
3026 }
3027 
3028 int psp_load_fw_list(struct psp_context *psp,
3029 		     struct amdgpu_firmware_info **ucode_list, int ucode_count)
3030 {
3031 	int ret = 0, i;
3032 	struct amdgpu_firmware_info *ucode;
3033 
3034 	for (i = 0; i < ucode_count; ++i) {
3035 		ucode = ucode_list[i];
3036 		psp_print_fw_hdr(psp, ucode);
3037 		ret = psp_execute_ip_fw_load(psp, ucode);
3038 		if (ret)
3039 			return ret;
3040 	}
3041 	return ret;
3042 }
3043 
3044 static int psp_load_non_psp_fw(struct psp_context *psp)
3045 {
3046 	int i, ret;
3047 	struct amdgpu_firmware_info *ucode;
3048 	struct amdgpu_device *adev = psp->adev;
3049 
3050 	if (psp->autoload_supported &&
3051 	    !psp->pmfw_centralized_cstate_management) {
3052 		ret = psp_load_smu_fw(psp);
3053 		if (ret)
3054 			return ret;
3055 	}
3056 
3057 	/* Load P2S table first if it's available */
3058 	psp_load_p2s_table(psp);
3059 
3060 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
3061 		ucode = &adev->firmware.ucode[i];
3062 
3063 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
3064 		    !fw_load_skip_check(psp, ucode)) {
3065 			ret = psp_load_smu_fw(psp);
3066 			if (ret)
3067 				return ret;
3068 			continue;
3069 		}
3070 
3071 		if (fw_load_skip_check(psp, ucode))
3072 			continue;
3073 
3074 		if (psp->autoload_supported &&
3075 		    (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3076 			     IP_VERSION(11, 0, 7) ||
3077 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3078 			     IP_VERSION(11, 0, 11) ||
3079 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3080 			     IP_VERSION(11, 0, 12)) &&
3081 		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
3082 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
3083 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
3084 			/* PSP only receive one SDMA fw for sienna_cichlid,
3085 			 * as all four sdma fw are same
3086 			 */
3087 			continue;
3088 
3089 		psp_print_fw_hdr(psp, ucode);
3090 
3091 		ret = psp_execute_ip_fw_load(psp, ucode);
3092 		if (ret)
3093 			return ret;
3094 
3095 		/* Start rlc autoload after psp received all the gfx firmware */
3096 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
3097 		    adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
3098 			ret = psp_rlc_autoload_start(psp);
3099 			if (ret) {
3100 				dev_err(adev->dev, "Failed to start rlc autoload\n");
3101 				return ret;
3102 			}
3103 		}
3104 	}
3105 
3106 	return 0;
3107 }
3108 
3109 static int psp_load_fw(struct amdgpu_device *adev)
3110 {
3111 	int ret;
3112 	struct psp_context *psp = &adev->psp;
3113 
3114 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3115 		/* should not destroy ring, only stop */
3116 		psp_ring_stop(psp, PSP_RING_TYPE__KM);
3117 	} else {
3118 		memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
3119 
3120 		ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
3121 		if (ret) {
3122 			dev_err(adev->dev, "PSP ring init failed!\n");
3123 			goto failed;
3124 		}
3125 	}
3126 
3127 	ret = psp_hw_start(psp);
3128 	if (ret)
3129 		goto failed;
3130 
3131 	ret = psp_load_non_psp_fw(psp);
3132 	if (ret)
3133 		goto failed1;
3134 
3135 	ret = psp_asd_initialize(psp);
3136 	if (ret) {
3137 		dev_err(adev->dev, "PSP load asd failed!\n");
3138 		goto failed1;
3139 	}
3140 
3141 	ret = psp_rl_load(adev);
3142 	if (ret) {
3143 		dev_err(adev->dev, "PSP load RL failed!\n");
3144 		goto failed1;
3145 	}
3146 
3147 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3148 		if (adev->gmc.xgmi.num_physical_nodes > 1) {
3149 			ret = psp_xgmi_initialize(psp, false, true);
3150 			/* Warning the XGMI seesion initialize failure
3151 			 * Instead of stop driver initialization
3152 			 */
3153 			if (ret)
3154 				dev_err(psp->adev->dev,
3155 					"XGMI: Failed to initialize XGMI session\n");
3156 		}
3157 	}
3158 
3159 	if (psp->ta_fw) {
3160 		ret = psp_ras_initialize(psp);
3161 		if (ret)
3162 			dev_err(psp->adev->dev,
3163 				"RAS: Failed to initialize RAS\n");
3164 
3165 		ret = psp_hdcp_initialize(psp);
3166 		if (ret)
3167 			dev_err(psp->adev->dev,
3168 				"HDCP: Failed to initialize HDCP\n");
3169 
3170 		ret = psp_dtm_initialize(psp);
3171 		if (ret)
3172 			dev_err(psp->adev->dev,
3173 				"DTM: Failed to initialize DTM\n");
3174 
3175 		ret = psp_rap_initialize(psp);
3176 		if (ret)
3177 			dev_err(psp->adev->dev,
3178 				"RAP: Failed to initialize RAP\n");
3179 
3180 		ret = psp_securedisplay_initialize(psp);
3181 		if (ret)
3182 			dev_err(psp->adev->dev,
3183 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3184 	}
3185 
3186 	return 0;
3187 
3188 failed1:
3189 	psp_free_shared_bufs(psp);
3190 failed:
3191 	/*
3192 	 * all cleanup jobs (xgmi terminate, ras terminate,
3193 	 * ring destroy, cmd/fence/fw buffers destory,
3194 	 * psp->cmd destory) are delayed to psp_hw_fini
3195 	 */
3196 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3197 	return ret;
3198 }
3199 
3200 static int psp_hw_init(struct amdgpu_ip_block *ip_block)
3201 {
3202 	int ret;
3203 	struct amdgpu_device *adev = ip_block->adev;
3204 
3205 	mutex_lock(&adev->firmware.mutex);
3206 
3207 	ret = amdgpu_ucode_init_bo(adev);
3208 	if (ret)
3209 		goto failed;
3210 
3211 	ret = psp_load_fw(adev);
3212 	if (ret) {
3213 		dev_err(adev->dev, "PSP firmware loading failed\n");
3214 		goto failed;
3215 	}
3216 
3217 	mutex_unlock(&adev->firmware.mutex);
3218 	return 0;
3219 
3220 failed:
3221 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
3222 	mutex_unlock(&adev->firmware.mutex);
3223 	return -EINVAL;
3224 }
3225 
3226 static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
3227 {
3228 	struct amdgpu_device *adev = ip_block->adev;
3229 	struct psp_context *psp = &adev->psp;
3230 
3231 	if (psp->ta_fw) {
3232 		psp_ras_terminate(psp);
3233 		psp_securedisplay_terminate(psp);
3234 		psp_rap_terminate(psp);
3235 		psp_dtm_terminate(psp);
3236 		psp_hdcp_terminate(psp);
3237 
3238 		if (adev->gmc.xgmi.num_physical_nodes > 1)
3239 			psp_xgmi_terminate(psp);
3240 	}
3241 
3242 	psp_asd_terminate(psp);
3243 	psp_tmr_terminate(psp);
3244 
3245 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3246 
3247 	return 0;
3248 }
3249 
3250 static int psp_suspend(struct amdgpu_ip_block *ip_block)
3251 {
3252 	int ret = 0;
3253 	struct amdgpu_device *adev = ip_block->adev;
3254 	struct psp_context *psp = &adev->psp;
3255 
3256 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3257 	    psp->xgmi_context.context.initialized) {
3258 		ret = psp_xgmi_terminate(psp);
3259 		if (ret) {
3260 			dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3261 			goto out;
3262 		}
3263 	}
3264 
3265 	if (psp->ta_fw) {
3266 		ret = psp_ras_terminate(psp);
3267 		if (ret) {
3268 			dev_err(adev->dev, "Failed to terminate ras ta\n");
3269 			goto out;
3270 		}
3271 		ret = psp_hdcp_terminate(psp);
3272 		if (ret) {
3273 			dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3274 			goto out;
3275 		}
3276 		ret = psp_dtm_terminate(psp);
3277 		if (ret) {
3278 			dev_err(adev->dev, "Failed to terminate dtm ta\n");
3279 			goto out;
3280 		}
3281 		ret = psp_rap_terminate(psp);
3282 		if (ret) {
3283 			dev_err(adev->dev, "Failed to terminate rap ta\n");
3284 			goto out;
3285 		}
3286 		ret = psp_securedisplay_terminate(psp);
3287 		if (ret) {
3288 			dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3289 			goto out;
3290 		}
3291 	}
3292 
3293 	ret = psp_asd_terminate(psp);
3294 	if (ret) {
3295 		dev_err(adev->dev, "Failed to terminate asd\n");
3296 		goto out;
3297 	}
3298 
3299 	ret = psp_tmr_terminate(psp);
3300 	if (ret) {
3301 		dev_err(adev->dev, "Failed to terminate tmr\n");
3302 		goto out;
3303 	}
3304 
3305 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3306 	if (ret)
3307 		dev_err(adev->dev, "PSP ring stop failed\n");
3308 
3309 out:
3310 	return ret;
3311 }
3312 
3313 static int psp_resume(struct amdgpu_ip_block *ip_block)
3314 {
3315 	int ret;
3316 	struct amdgpu_device *adev = ip_block->adev;
3317 	struct psp_context *psp = &adev->psp;
3318 
3319 	dev_info(adev->dev, "PSP is resuming...\n");
3320 
3321 	if (psp->mem_train_ctx.enable_mem_training) {
3322 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3323 		if (ret) {
3324 			dev_err(adev->dev, "Failed to process memory training!\n");
3325 			return ret;
3326 		}
3327 	}
3328 
3329 	mutex_lock(&adev->firmware.mutex);
3330 
3331 	ret = amdgpu_ucode_init_bo(adev);
3332 	if (ret)
3333 		goto failed;
3334 
3335 	ret = psp_hw_start(psp);
3336 	if (ret)
3337 		goto failed;
3338 
3339 	ret = psp_load_non_psp_fw(psp);
3340 	if (ret)
3341 		goto failed;
3342 
3343 	ret = psp_asd_initialize(psp);
3344 	if (ret) {
3345 		dev_err(adev->dev, "PSP load asd failed!\n");
3346 		goto failed;
3347 	}
3348 
3349 	ret = psp_rl_load(adev);
3350 	if (ret) {
3351 		dev_err(adev->dev, "PSP load RL failed!\n");
3352 		goto failed;
3353 	}
3354 
3355 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3356 		ret = psp_xgmi_initialize(psp, false, true);
3357 		/* Warning the XGMI seesion initialize failure
3358 		 * Instead of stop driver initialization
3359 		 */
3360 		if (ret)
3361 			dev_err(psp->adev->dev,
3362 				"XGMI: Failed to initialize XGMI session\n");
3363 	}
3364 
3365 	if (psp->ta_fw) {
3366 		ret = psp_ras_initialize(psp);
3367 		if (ret)
3368 			dev_err(psp->adev->dev,
3369 				"RAS: Failed to initialize RAS\n");
3370 
3371 		ret = psp_hdcp_initialize(psp);
3372 		if (ret)
3373 			dev_err(psp->adev->dev,
3374 				"HDCP: Failed to initialize HDCP\n");
3375 
3376 		ret = psp_dtm_initialize(psp);
3377 		if (ret)
3378 			dev_err(psp->adev->dev,
3379 				"DTM: Failed to initialize DTM\n");
3380 
3381 		ret = psp_rap_initialize(psp);
3382 		if (ret)
3383 			dev_err(psp->adev->dev,
3384 				"RAP: Failed to initialize RAP\n");
3385 
3386 		ret = psp_securedisplay_initialize(psp);
3387 		if (ret)
3388 			dev_err(psp->adev->dev,
3389 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3390 	}
3391 
3392 	mutex_unlock(&adev->firmware.mutex);
3393 
3394 	return 0;
3395 
3396 failed:
3397 	dev_err(adev->dev, "PSP resume failed\n");
3398 	mutex_unlock(&adev->firmware.mutex);
3399 	return ret;
3400 }
3401 
3402 int psp_gpu_reset(struct amdgpu_device *adev)
3403 {
3404 	int ret;
3405 
3406 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3407 		return 0;
3408 
3409 	mutex_lock(&adev->psp.mutex);
3410 	ret = psp_mode1_reset(&adev->psp);
3411 	mutex_unlock(&adev->psp.mutex);
3412 
3413 	return ret;
3414 }
3415 
3416 int psp_rlc_autoload_start(struct psp_context *psp)
3417 {
3418 	int ret;
3419 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3420 
3421 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3422 
3423 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
3424 				 psp->fence_buf_mc_addr);
3425 
3426 	release_psp_cmd_buf(psp);
3427 
3428 	return ret;
3429 }
3430 
3431 int psp_ring_cmd_submit(struct psp_context *psp,
3432 			uint64_t cmd_buf_mc_addr,
3433 			uint64_t fence_mc_addr,
3434 			int index)
3435 {
3436 	unsigned int psp_write_ptr_reg = 0;
3437 	struct psp_gfx_rb_frame *write_frame;
3438 	struct psp_ring *ring = &psp->km_ring;
3439 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3440 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3441 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3442 	struct amdgpu_device *adev = psp->adev;
3443 	uint32_t ring_size_dw = ring->ring_size / 4;
3444 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3445 
3446 	/* KM (GPCOM) prepare write pointer */
3447 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
3448 
3449 	/* Update KM RB frame pointer to new frame */
3450 	/* write_frame ptr increments by size of rb_frame in bytes */
3451 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3452 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
3453 		write_frame = ring_buffer_start;
3454 	else
3455 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3456 	/* Check invalid write_frame ptr address */
3457 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3458 		dev_err(adev->dev,
3459 			"ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3460 			ring_buffer_start, ring_buffer_end, write_frame);
3461 		dev_err(adev->dev,
3462 			"write_frame is pointing to address out of bounds\n");
3463 		return -EINVAL;
3464 	}
3465 
3466 	/* Initialize KM RB frame */
3467 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3468 
3469 	/* Update KM RB frame */
3470 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3471 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3472 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3473 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3474 	write_frame->fence_value = index;
3475 	amdgpu_device_flush_hdp(adev, NULL);
3476 
3477 	/* Update the write Pointer in DWORDs */
3478 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3479 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
3480 	return 0;
3481 }
3482 
3483 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3484 {
3485 	struct amdgpu_device *adev = psp->adev;
3486 	const struct psp_firmware_header_v1_0 *asd_hdr;
3487 	int err = 0;
3488 
3489 	err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
3490 				   "amdgpu/%s_asd.bin", chip_name);
3491 	if (err)
3492 		goto out;
3493 
3494 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3495 	adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3496 	adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3497 	adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3498 	adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3499 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3500 	return 0;
3501 out:
3502 	amdgpu_ucode_release(&adev->psp.asd_fw);
3503 	return err;
3504 }
3505 
3506 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3507 {
3508 	struct amdgpu_device *adev = psp->adev;
3509 	const struct psp_firmware_header_v1_0 *toc_hdr;
3510 	int err = 0;
3511 
3512 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
3513 				   "amdgpu/%s_toc.bin", chip_name);
3514 	if (err)
3515 		goto out;
3516 
3517 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3518 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3519 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3520 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3521 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3522 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3523 	return 0;
3524 out:
3525 	amdgpu_ucode_release(&adev->psp.toc_fw);
3526 	return err;
3527 }
3528 
3529 static int parse_sos_bin_descriptor(struct psp_context *psp,
3530 				   const struct psp_fw_bin_desc *desc,
3531 				   const struct psp_firmware_header_v2_0 *sos_hdr)
3532 {
3533 	uint8_t *ucode_start_addr  = NULL;
3534 
3535 	if (!psp || !desc || !sos_hdr)
3536 		return -EINVAL;
3537 
3538 	ucode_start_addr  = (uint8_t *)sos_hdr +
3539 			    le32_to_cpu(desc->offset_bytes) +
3540 			    le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3541 
3542 	switch (desc->fw_type) {
3543 	case PSP_FW_TYPE_PSP_SOS:
3544 		psp->sos.fw_version        = le32_to_cpu(desc->fw_version);
3545 		psp->sos.feature_version   = le32_to_cpu(desc->fw_version);
3546 		psp->sos.size_bytes        = le32_to_cpu(desc->size_bytes);
3547 		psp->sos.start_addr	   = ucode_start_addr;
3548 		break;
3549 	case PSP_FW_TYPE_PSP_SYS_DRV:
3550 		psp->sys.fw_version        = le32_to_cpu(desc->fw_version);
3551 		psp->sys.feature_version   = le32_to_cpu(desc->fw_version);
3552 		psp->sys.size_bytes        = le32_to_cpu(desc->size_bytes);
3553 		psp->sys.start_addr        = ucode_start_addr;
3554 		break;
3555 	case PSP_FW_TYPE_PSP_KDB:
3556 		psp->kdb.fw_version        = le32_to_cpu(desc->fw_version);
3557 		psp->kdb.feature_version   = le32_to_cpu(desc->fw_version);
3558 		psp->kdb.size_bytes        = le32_to_cpu(desc->size_bytes);
3559 		psp->kdb.start_addr        = ucode_start_addr;
3560 		break;
3561 	case PSP_FW_TYPE_PSP_TOC:
3562 		psp->toc.fw_version        = le32_to_cpu(desc->fw_version);
3563 		psp->toc.feature_version   = le32_to_cpu(desc->fw_version);
3564 		psp->toc.size_bytes        = le32_to_cpu(desc->size_bytes);
3565 		psp->toc.start_addr        = ucode_start_addr;
3566 		break;
3567 	case PSP_FW_TYPE_PSP_SPL:
3568 		psp->spl.fw_version        = le32_to_cpu(desc->fw_version);
3569 		psp->spl.feature_version   = le32_to_cpu(desc->fw_version);
3570 		psp->spl.size_bytes        = le32_to_cpu(desc->size_bytes);
3571 		psp->spl.start_addr        = ucode_start_addr;
3572 		break;
3573 	case PSP_FW_TYPE_PSP_RL:
3574 		psp->rl.fw_version         = le32_to_cpu(desc->fw_version);
3575 		psp->rl.feature_version    = le32_to_cpu(desc->fw_version);
3576 		psp->rl.size_bytes         = le32_to_cpu(desc->size_bytes);
3577 		psp->rl.start_addr         = ucode_start_addr;
3578 		break;
3579 	case PSP_FW_TYPE_PSP_SOC_DRV:
3580 		psp->soc_drv.fw_version         = le32_to_cpu(desc->fw_version);
3581 		psp->soc_drv.feature_version    = le32_to_cpu(desc->fw_version);
3582 		psp->soc_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3583 		psp->soc_drv.start_addr         = ucode_start_addr;
3584 		break;
3585 	case PSP_FW_TYPE_PSP_INTF_DRV:
3586 		psp->intf_drv.fw_version        = le32_to_cpu(desc->fw_version);
3587 		psp->intf_drv.feature_version   = le32_to_cpu(desc->fw_version);
3588 		psp->intf_drv.size_bytes        = le32_to_cpu(desc->size_bytes);
3589 		psp->intf_drv.start_addr        = ucode_start_addr;
3590 		break;
3591 	case PSP_FW_TYPE_PSP_DBG_DRV:
3592 		psp->dbg_drv.fw_version         = le32_to_cpu(desc->fw_version);
3593 		psp->dbg_drv.feature_version    = le32_to_cpu(desc->fw_version);
3594 		psp->dbg_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3595 		psp->dbg_drv.start_addr         = ucode_start_addr;
3596 		break;
3597 	case PSP_FW_TYPE_PSP_RAS_DRV:
3598 		psp->ras_drv.fw_version         = le32_to_cpu(desc->fw_version);
3599 		psp->ras_drv.feature_version    = le32_to_cpu(desc->fw_version);
3600 		psp->ras_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3601 		psp->ras_drv.start_addr         = ucode_start_addr;
3602 		break;
3603 	case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3604 		psp->ipkeymgr_drv.fw_version         = le32_to_cpu(desc->fw_version);
3605 		psp->ipkeymgr_drv.feature_version    = le32_to_cpu(desc->fw_version);
3606 		psp->ipkeymgr_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3607 		psp->ipkeymgr_drv.start_addr         = ucode_start_addr;
3608 		break;
3609 	case PSP_FW_TYPE_PSP_SPDM_DRV:
3610 		psp->spdm_drv.fw_version	= le32_to_cpu(desc->fw_version);
3611 		psp->spdm_drv.feature_version	= le32_to_cpu(desc->fw_version);
3612 		psp->spdm_drv.size_bytes	= le32_to_cpu(desc->size_bytes);
3613 		psp->spdm_drv.start_addr	= ucode_start_addr;
3614 		break;
3615 	default:
3616 		dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3617 		break;
3618 	}
3619 
3620 	return 0;
3621 }
3622 
3623 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3624 {
3625 	const struct psp_firmware_header_v1_0 *sos_hdr;
3626 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3627 	uint8_t *ucode_array_start_addr;
3628 
3629 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3630 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3631 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3632 
3633 	if (adev->gmc.xgmi.connected_to_cpu ||
3634 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3635 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3636 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3637 
3638 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3639 		adev->psp.sys.start_addr = ucode_array_start_addr;
3640 
3641 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3642 		adev->psp.sos.start_addr = ucode_array_start_addr +
3643 				le32_to_cpu(sos_hdr->sos.offset_bytes);
3644 	} else {
3645 		/* Load alternate PSP SOS FW */
3646 		sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3647 
3648 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3649 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3650 
3651 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3652 		adev->psp.sys.start_addr = ucode_array_start_addr +
3653 			le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3654 
3655 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3656 		adev->psp.sos.start_addr = ucode_array_start_addr +
3657 			le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3658 	}
3659 
3660 	if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3661 		dev_warn(adev->dev, "PSP SOS FW not available");
3662 		return -EINVAL;
3663 	}
3664 
3665 	return 0;
3666 }
3667 
3668 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3669 {
3670 	struct amdgpu_device *adev = psp->adev;
3671 	const struct psp_firmware_header_v1_0 *sos_hdr;
3672 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3673 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3674 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3675 	const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3676 	const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
3677 	int fw_index, fw_bin_count, start_index = 0;
3678 	const struct psp_fw_bin_desc *fw_bin;
3679 	uint8_t *ucode_array_start_addr;
3680 	int err = 0;
3681 
3682 	if (amdgpu_is_kicker_fw(adev))
3683 		err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3684 					   "amdgpu/%s_sos_kicker.bin", chip_name);
3685 	else
3686 		err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3687 					   "amdgpu/%s_sos.bin", chip_name);
3688 	if (err)
3689 		goto out;
3690 
3691 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3692 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3693 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3694 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3695 
3696 	switch (sos_hdr->header.header_version_major) {
3697 	case 1:
3698 		err = psp_init_sos_base_fw(adev);
3699 		if (err)
3700 			goto out;
3701 
3702 		if (sos_hdr->header.header_version_minor == 1) {
3703 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3704 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3705 			adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3706 					le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3707 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3708 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3709 					le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3710 		}
3711 		if (sos_hdr->header.header_version_minor == 2) {
3712 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3713 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3714 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3715 						    le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3716 		}
3717 		if (sos_hdr->header.header_version_minor == 3) {
3718 			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3719 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3720 			adev->psp.toc.start_addr = ucode_array_start_addr +
3721 				le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3722 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3723 			adev->psp.kdb.start_addr = ucode_array_start_addr +
3724 				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3725 			adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3726 			adev->psp.spl.start_addr = ucode_array_start_addr +
3727 				le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3728 			adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3729 			adev->psp.rl.start_addr = ucode_array_start_addr +
3730 				le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3731 		}
3732 		break;
3733 	case 2:
3734 		sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3735 
3736 		fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
3737 
3738 		if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
3739 			dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3740 			err = -EINVAL;
3741 			goto out;
3742 		}
3743 
3744 		if (sos_hdr_v2_0->header.header_version_minor == 1) {
3745 			sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
3746 
3747 			fw_bin = sos_hdr_v2_1->psp_fw_bin;
3748 
3749 			if (psp_is_aux_sos_load_required(psp))
3750 				start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3751 			else
3752 				fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3753 
3754 		} else {
3755 			fw_bin = sos_hdr_v2_0->psp_fw_bin;
3756 		}
3757 
3758 		for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
3759 			err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
3760 						       sos_hdr_v2_0);
3761 			if (err)
3762 				goto out;
3763 		}
3764 		break;
3765 	default:
3766 		dev_err(adev->dev,
3767 			"unsupported psp sos firmware\n");
3768 		err = -EINVAL;
3769 		goto out;
3770 	}
3771 
3772 	return 0;
3773 out:
3774 	amdgpu_ucode_release(&adev->psp.sos_fw);
3775 
3776 	return err;
3777 }
3778 
3779 static bool is_ta_fw_applicable(struct psp_context *psp,
3780 			     const struct psp_fw_bin_desc *desc)
3781 {
3782 	struct amdgpu_device *adev = psp->adev;
3783 	uint32_t fw_version;
3784 
3785 	switch (desc->fw_type) {
3786 	case TA_FW_TYPE_PSP_XGMI:
3787 	case TA_FW_TYPE_PSP_XGMI_AUX:
3788 		/* for now, AUX TA only exists on 13.0.6 ta bin,
3789 		 * from v20.00.0x.14
3790 		 */
3791 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3792 		    IP_VERSION(13, 0, 6)) {
3793 			fw_version = le32_to_cpu(desc->fw_version);
3794 
3795 			if (adev->flags & AMD_IS_APU &&
3796 			    (fw_version & 0xff) >= 0x14)
3797 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
3798 			else
3799 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
3800 		}
3801 		break;
3802 	default:
3803 		break;
3804 	}
3805 
3806 	return true;
3807 }
3808 
3809 static int parse_ta_bin_descriptor(struct psp_context *psp,
3810 				   const struct psp_fw_bin_desc *desc,
3811 				   const struct ta_firmware_header_v2_0 *ta_hdr)
3812 {
3813 	uint8_t *ucode_start_addr  = NULL;
3814 
3815 	if (!psp || !desc || !ta_hdr)
3816 		return -EINVAL;
3817 
3818 	if (!is_ta_fw_applicable(psp, desc))
3819 		return 0;
3820 
3821 	ucode_start_addr  = (uint8_t *)ta_hdr +
3822 			    le32_to_cpu(desc->offset_bytes) +
3823 			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3824 
3825 	switch (desc->fw_type) {
3826 	case TA_FW_TYPE_PSP_ASD:
3827 		psp->asd_context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3828 		psp->asd_context.bin_desc.feature_version   = le32_to_cpu(desc->fw_version);
3829 		psp->asd_context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3830 		psp->asd_context.bin_desc.start_addr        = ucode_start_addr;
3831 		break;
3832 	case TA_FW_TYPE_PSP_XGMI:
3833 	case TA_FW_TYPE_PSP_XGMI_AUX:
3834 		psp->xgmi_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3835 		psp->xgmi_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3836 		psp->xgmi_context.context.bin_desc.start_addr       = ucode_start_addr;
3837 		break;
3838 	case TA_FW_TYPE_PSP_RAS:
3839 		psp->ras_context.context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3840 		psp->ras_context.context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3841 		psp->ras_context.context.bin_desc.start_addr        = ucode_start_addr;
3842 		break;
3843 	case TA_FW_TYPE_PSP_HDCP:
3844 		psp->hdcp_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3845 		psp->hdcp_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3846 		psp->hdcp_context.context.bin_desc.start_addr       = ucode_start_addr;
3847 		break;
3848 	case TA_FW_TYPE_PSP_DTM:
3849 		psp->dtm_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3850 		psp->dtm_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3851 		psp->dtm_context.context.bin_desc.start_addr       = ucode_start_addr;
3852 		break;
3853 	case TA_FW_TYPE_PSP_RAP:
3854 		psp->rap_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3855 		psp->rap_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3856 		psp->rap_context.context.bin_desc.start_addr       = ucode_start_addr;
3857 		break;
3858 	case TA_FW_TYPE_PSP_SECUREDISPLAY:
3859 		psp->securedisplay_context.context.bin_desc.fw_version =
3860 			le32_to_cpu(desc->fw_version);
3861 		psp->securedisplay_context.context.bin_desc.size_bytes =
3862 			le32_to_cpu(desc->size_bytes);
3863 		psp->securedisplay_context.context.bin_desc.start_addr =
3864 			ucode_start_addr;
3865 		break;
3866 	default:
3867 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3868 		break;
3869 	}
3870 
3871 	return 0;
3872 }
3873 
3874 static int parse_ta_v1_microcode(struct psp_context *psp)
3875 {
3876 	const struct ta_firmware_header_v1_0 *ta_hdr;
3877 	struct amdgpu_device *adev = psp->adev;
3878 
3879 	ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3880 
3881 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3882 		return -EINVAL;
3883 
3884 	adev->psp.xgmi_context.context.bin_desc.fw_version =
3885 		le32_to_cpu(ta_hdr->xgmi.fw_version);
3886 	adev->psp.xgmi_context.context.bin_desc.size_bytes =
3887 		le32_to_cpu(ta_hdr->xgmi.size_bytes);
3888 	adev->psp.xgmi_context.context.bin_desc.start_addr =
3889 		(uint8_t *)ta_hdr +
3890 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3891 
3892 	adev->psp.ras_context.context.bin_desc.fw_version =
3893 		le32_to_cpu(ta_hdr->ras.fw_version);
3894 	adev->psp.ras_context.context.bin_desc.size_bytes =
3895 		le32_to_cpu(ta_hdr->ras.size_bytes);
3896 	adev->psp.ras_context.context.bin_desc.start_addr =
3897 		(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3898 		le32_to_cpu(ta_hdr->ras.offset_bytes);
3899 
3900 	adev->psp.hdcp_context.context.bin_desc.fw_version =
3901 		le32_to_cpu(ta_hdr->hdcp.fw_version);
3902 	adev->psp.hdcp_context.context.bin_desc.size_bytes =
3903 		le32_to_cpu(ta_hdr->hdcp.size_bytes);
3904 	adev->psp.hdcp_context.context.bin_desc.start_addr =
3905 		(uint8_t *)ta_hdr +
3906 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3907 
3908 	adev->psp.dtm_context.context.bin_desc.fw_version =
3909 		le32_to_cpu(ta_hdr->dtm.fw_version);
3910 	adev->psp.dtm_context.context.bin_desc.size_bytes =
3911 		le32_to_cpu(ta_hdr->dtm.size_bytes);
3912 	adev->psp.dtm_context.context.bin_desc.start_addr =
3913 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3914 		le32_to_cpu(ta_hdr->dtm.offset_bytes);
3915 
3916 	adev->psp.securedisplay_context.context.bin_desc.fw_version =
3917 		le32_to_cpu(ta_hdr->securedisplay.fw_version);
3918 	adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3919 		le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3920 	adev->psp.securedisplay_context.context.bin_desc.start_addr =
3921 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3922 		le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3923 
3924 	adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3925 
3926 	return 0;
3927 }
3928 
3929 static int parse_ta_v2_microcode(struct psp_context *psp)
3930 {
3931 	const struct ta_firmware_header_v2_0 *ta_hdr;
3932 	struct amdgpu_device *adev = psp->adev;
3933 	int err = 0;
3934 	int ta_index = 0;
3935 
3936 	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3937 
3938 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3939 		return -EINVAL;
3940 
3941 	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3942 		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3943 		return -EINVAL;
3944 	}
3945 
3946 	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3947 		err = parse_ta_bin_descriptor(psp,
3948 					      &ta_hdr->ta_fw_bin[ta_index],
3949 					      ta_hdr);
3950 		if (err)
3951 			return err;
3952 	}
3953 
3954 	return 0;
3955 }
3956 
3957 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3958 {
3959 	const struct common_firmware_header *hdr;
3960 	struct amdgpu_device *adev = psp->adev;
3961 	int err;
3962 
3963 	if (amdgpu_is_kicker_fw(adev))
3964 		err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3965 					   "amdgpu/%s_ta_kicker.bin", chip_name);
3966 	else
3967 		err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3968 					   "amdgpu/%s_ta.bin", chip_name);
3969 	if (err)
3970 		return err;
3971 
3972 	hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3973 	switch (le16_to_cpu(hdr->header_version_major)) {
3974 	case 1:
3975 		err = parse_ta_v1_microcode(psp);
3976 		break;
3977 	case 2:
3978 		err = parse_ta_v2_microcode(psp);
3979 		break;
3980 	default:
3981 		dev_err(adev->dev, "unsupported TA header version\n");
3982 		err = -EINVAL;
3983 	}
3984 
3985 	if (err)
3986 		amdgpu_ucode_release(&adev->psp.ta_fw);
3987 
3988 	return err;
3989 }
3990 
3991 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3992 {
3993 	struct amdgpu_device *adev = psp->adev;
3994 	const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3995 	struct amdgpu_firmware_info *info = NULL;
3996 	int err = 0;
3997 
3998 	if (!amdgpu_sriov_vf(adev)) {
3999 		dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
4000 		return -EINVAL;
4001 	}
4002 
4003 	err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
4004 				   "amdgpu/%s_cap.bin", chip_name);
4005 	if (err) {
4006 		if (err == -ENODEV) {
4007 			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
4008 			err = 0;
4009 		} else {
4010 			dev_err(adev->dev, "fail to initialize cap microcode\n");
4011 		}
4012 		goto out;
4013 	}
4014 
4015 	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
4016 	info->ucode_id = AMDGPU_UCODE_ID_CAP;
4017 	info->fw = adev->psp.cap_fw;
4018 	cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
4019 		adev->psp.cap_fw->data;
4020 	adev->firmware.fw_size += ALIGN(
4021 			le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
4022 	adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
4023 	adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
4024 	adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
4025 
4026 	return 0;
4027 
4028 out:
4029 	amdgpu_ucode_release(&adev->psp.cap_fw);
4030 	return err;
4031 }
4032 
4033 int psp_config_sq_perfmon(struct psp_context *psp,
4034 		uint32_t xcp_id, bool core_override_enable,
4035 		bool reg_override_enable, bool perfmon_override_enable)
4036 {
4037 	int ret;
4038 
4039 	if (amdgpu_sriov_vf(psp->adev))
4040 		return 0;
4041 
4042 	if (xcp_id > MAX_XCP) {
4043 		dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
4044 		return -EINVAL;
4045 	}
4046 
4047 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
4048 		dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
4049 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
4050 		return -EINVAL;
4051 	}
4052 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
4053 
4054 	cmd->cmd_id	=	GFX_CMD_ID_CONFIG_SQ_PERFMON;
4055 	cmd->cmd.config_sq_perfmon.gfx_xcp_mask	=	BIT_MASK(xcp_id);
4056 	cmd->cmd.config_sq_perfmon.core_override	=	core_override_enable;
4057 	cmd->cmd.config_sq_perfmon.reg_override	=	reg_override_enable;
4058 	cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
4059 
4060 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
4061 	if (ret)
4062 		dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
4063 			xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
4064 
4065 	release_psp_cmd_buf(psp);
4066 	return ret;
4067 }
4068 
4069 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
4070 					enum amd_clockgating_state state)
4071 {
4072 	return 0;
4073 }
4074 
4075 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
4076 				     enum amd_powergating_state state)
4077 {
4078 	return 0;
4079 }
4080 
4081 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
4082 					 struct device_attribute *attr,
4083 					 char *buf)
4084 {
4085 	struct drm_device *ddev = dev_get_drvdata(dev);
4086 	struct amdgpu_device *adev = drm_to_adev(ddev);
4087 	struct amdgpu_ip_block *ip_block;
4088 	uint32_t fw_ver;
4089 	int ret;
4090 
4091 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
4092 	if (!ip_block || !ip_block->status.late_initialized) {
4093 		dev_info(adev->dev, "PSP block is not ready yet\n.");
4094 		return -EBUSY;
4095 	}
4096 
4097 	mutex_lock(&adev->psp.mutex);
4098 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
4099 	mutex_unlock(&adev->psp.mutex);
4100 
4101 	if (ret) {
4102 		dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
4103 		return ret;
4104 	}
4105 
4106 	return sysfs_emit(buf, "%x\n", fw_ver);
4107 }
4108 
4109 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
4110 						       struct device_attribute *attr,
4111 						       const char *buf,
4112 						       size_t count)
4113 {
4114 	struct drm_device *ddev = dev_get_drvdata(dev);
4115 	struct amdgpu_device *adev = drm_to_adev(ddev);
4116 	int ret, idx;
4117 	const struct firmware *usbc_pd_fw;
4118 	struct amdgpu_bo *fw_buf_bo = NULL;
4119 	uint64_t fw_pri_mc_addr;
4120 	void *fw_pri_cpu_addr;
4121 	struct amdgpu_ip_block *ip_block;
4122 
4123 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
4124 	if (!ip_block || !ip_block->status.late_initialized) {
4125 		dev_err(adev->dev, "PSP block is not ready yet.");
4126 		return -EBUSY;
4127 	}
4128 
4129 	if (!drm_dev_enter(ddev, &idx))
4130 		return -ENODEV;
4131 
4132 	ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
4133 				   "amdgpu/%s", buf);
4134 	if (ret)
4135 		goto fail;
4136 
4137 	/* LFB address which is aligned to 1MB boundary per PSP request */
4138 	ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
4139 				      AMDGPU_GEM_DOMAIN_VRAM |
4140 				      AMDGPU_GEM_DOMAIN_GTT,
4141 				      &fw_buf_bo, &fw_pri_mc_addr,
4142 				      &fw_pri_cpu_addr);
4143 	if (ret)
4144 		goto rel_buf;
4145 
4146 	memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
4147 
4148 	mutex_lock(&adev->psp.mutex);
4149 	ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
4150 	mutex_unlock(&adev->psp.mutex);
4151 
4152 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4153 
4154 rel_buf:
4155 	amdgpu_ucode_release(&usbc_pd_fw);
4156 fail:
4157 	if (ret) {
4158 		dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
4159 		count = ret;
4160 	}
4161 
4162 	drm_dev_exit(idx);
4163 	return count;
4164 }
4165 
4166 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
4167 {
4168 	int idx;
4169 
4170 	if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
4171 		return;
4172 
4173 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
4174 	memcpy(psp->fw_pri_buf, start_addr, bin_size);
4175 
4176 	drm_dev_exit(idx);
4177 }
4178 
4179 /**
4180  * DOC: usbc_pd_fw
4181  * Reading from this file will retrieve the USB-C PD firmware version. Writing to
4182  * this file will trigger the update process.
4183  */
4184 static DEVICE_ATTR(usbc_pd_fw, 0644,
4185 		   psp_usbc_pd_fw_sysfs_read,
4186 		   psp_usbc_pd_fw_sysfs_write);
4187 
4188 int is_psp_fw_valid(struct psp_bin_desc bin)
4189 {
4190 	return bin.size_bytes;
4191 }
4192 
4193 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
4194 					const struct bin_attribute *bin_attr,
4195 					char *buffer, loff_t pos, size_t count)
4196 {
4197 	struct device *dev = kobj_to_dev(kobj);
4198 	struct drm_device *ddev = dev_get_drvdata(dev);
4199 	struct amdgpu_device *adev = drm_to_adev(ddev);
4200 
4201 	adev->psp.vbflash_done = false;
4202 
4203 	/* Safeguard against memory drain */
4204 	if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
4205 		dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
4206 		kvfree(adev->psp.vbflash_tmp_buf);
4207 		adev->psp.vbflash_tmp_buf = NULL;
4208 		adev->psp.vbflash_image_size = 0;
4209 		return -ENOMEM;
4210 	}
4211 
4212 	/* TODO Just allocate max for now and optimize to realloc later if needed */
4213 	if (!adev->psp.vbflash_tmp_buf) {
4214 		adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
4215 		if (!adev->psp.vbflash_tmp_buf)
4216 			return -ENOMEM;
4217 	}
4218 
4219 	mutex_lock(&adev->psp.mutex);
4220 	memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
4221 	adev->psp.vbflash_image_size += count;
4222 	mutex_unlock(&adev->psp.mutex);
4223 
4224 	dev_dbg(adev->dev, "IFWI staged for update\n");
4225 
4226 	return count;
4227 }
4228 
4229 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
4230 				       const struct bin_attribute *bin_attr, char *buffer,
4231 				       loff_t pos, size_t count)
4232 {
4233 	struct device *dev = kobj_to_dev(kobj);
4234 	struct drm_device *ddev = dev_get_drvdata(dev);
4235 	struct amdgpu_device *adev = drm_to_adev(ddev);
4236 	struct amdgpu_bo *fw_buf_bo = NULL;
4237 	uint64_t fw_pri_mc_addr;
4238 	void *fw_pri_cpu_addr;
4239 	int ret;
4240 
4241 	if (adev->psp.vbflash_image_size == 0)
4242 		return -EINVAL;
4243 
4244 	dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
4245 
4246 	ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
4247 					AMDGPU_GPU_PAGE_SIZE,
4248 					AMDGPU_GEM_DOMAIN_VRAM,
4249 					&fw_buf_bo,
4250 					&fw_pri_mc_addr,
4251 					&fw_pri_cpu_addr);
4252 	if (ret)
4253 		goto rel_buf;
4254 
4255 	memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
4256 
4257 	mutex_lock(&adev->psp.mutex);
4258 	ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
4259 	mutex_unlock(&adev->psp.mutex);
4260 
4261 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4262 
4263 rel_buf:
4264 	kvfree(adev->psp.vbflash_tmp_buf);
4265 	adev->psp.vbflash_tmp_buf = NULL;
4266 	adev->psp.vbflash_image_size = 0;
4267 
4268 	if (ret) {
4269 		dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
4270 		return ret;
4271 	}
4272 
4273 	dev_dbg(adev->dev, "PSP IFWI flash process done\n");
4274 	return 0;
4275 }
4276 
4277 /**
4278  * DOC: psp_vbflash
4279  * Writing to this file will stage an IFWI for update. Reading from this file
4280  * will trigger the update process.
4281  */
4282 static const struct bin_attribute psp_vbflash_bin_attr = {
4283 	.attr = {.name = "psp_vbflash", .mode = 0660},
4284 	.size = 0,
4285 	.write = amdgpu_psp_vbflash_write,
4286 	.read = amdgpu_psp_vbflash_read,
4287 };
4288 
4289 /**
4290  * DOC: psp_vbflash_status
4291  * The status of the flash process.
4292  * 0: IFWI flash not complete.
4293  * 1: IFWI flash complete.
4294  */
4295 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
4296 					 struct device_attribute *attr,
4297 					 char *buf)
4298 {
4299 	struct drm_device *ddev = dev_get_drvdata(dev);
4300 	struct amdgpu_device *adev = drm_to_adev(ddev);
4301 	uint32_t vbflash_status;
4302 
4303 	vbflash_status = psp_vbflash_status(&adev->psp);
4304 	if (!adev->psp.vbflash_done)
4305 		vbflash_status = 0;
4306 	else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
4307 		vbflash_status = 1;
4308 
4309 	return sysfs_emit(buf, "0x%x\n", vbflash_status);
4310 }
4311 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
4312 
4313 static const struct bin_attribute *const bin_flash_attrs[] = {
4314 	&psp_vbflash_bin_attr,
4315 	NULL
4316 };
4317 
4318 static struct attribute *flash_attrs[] = {
4319 	&dev_attr_psp_vbflash_status.attr,
4320 	&dev_attr_usbc_pd_fw.attr,
4321 	NULL
4322 };
4323 
4324 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
4325 {
4326 	struct device *dev = kobj_to_dev(kobj);
4327 	struct drm_device *ddev = dev_get_drvdata(dev);
4328 	struct amdgpu_device *adev = drm_to_adev(ddev);
4329 
4330 	if (attr == &dev_attr_usbc_pd_fw.attr)
4331 		return adev->psp.sup_pd_fw_up ? 0660 : 0;
4332 
4333 	return adev->psp.sup_ifwi_up ? 0440 : 0;
4334 }
4335 
4336 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
4337 						const struct bin_attribute *attr,
4338 						int idx)
4339 {
4340 	struct device *dev = kobj_to_dev(kobj);
4341 	struct drm_device *ddev = dev_get_drvdata(dev);
4342 	struct amdgpu_device *adev = drm_to_adev(ddev);
4343 
4344 	return adev->psp.sup_ifwi_up ? 0660 : 0;
4345 }
4346 
4347 const struct attribute_group amdgpu_flash_attr_group = {
4348 	.attrs = flash_attrs,
4349 	.bin_attrs = bin_flash_attrs,
4350 	.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4351 	.is_visible = amdgpu_flash_attr_is_visible,
4352 };
4353 
4354 #if defined(CONFIG_DEBUG_FS)
4355 static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
4356 {
4357 	struct amdgpu_device *adev = filp->f_inode->i_private;
4358 	struct spirom_bo *bo_triplet;
4359 	int ret;
4360 
4361 	/* serialize the open() file calling */
4362 	if (!mutex_trylock(&adev->psp.mutex))
4363 		return -EBUSY;
4364 
4365 	/*
4366 	 * make sure only one userpace process is alive for dumping so that
4367 	 * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
4368 	 * let's say the case where one process try opening the file while
4369 	 * another one has proceeded to read or release. In this way, eliminate
4370 	 * the use of mutex for read() or release() callback as well.
4371 	 */
4372 	if (adev->psp.spirom_dump_trip) {
4373 		mutex_unlock(&adev->psp.mutex);
4374 		return -EBUSY;
4375 	}
4376 
4377 	bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
4378 	if (!bo_triplet) {
4379 		mutex_unlock(&adev->psp.mutex);
4380 		return -ENOMEM;
4381 	}
4382 
4383 	ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
4384 				      AMDGPU_GPU_PAGE_SIZE,
4385 				      AMDGPU_GEM_DOMAIN_GTT,
4386 				      &bo_triplet->bo,
4387 				      &bo_triplet->mc_addr,
4388 				      &bo_triplet->cpu_addr);
4389 	if (ret)
4390 		goto rel_trip;
4391 
4392 	ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
4393 	if (ret)
4394 		goto rel_bo;
4395 
4396 	adev->psp.spirom_dump_trip = bo_triplet;
4397 	mutex_unlock(&adev->psp.mutex);
4398 	return 0;
4399 rel_bo:
4400 	amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4401 			      &bo_triplet->cpu_addr);
4402 rel_trip:
4403 	kfree(bo_triplet);
4404 	mutex_unlock(&adev->psp.mutex);
4405 	dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
4406 	return ret;
4407 }
4408 
4409 static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
4410 					    loff_t *pos)
4411 {
4412 	struct amdgpu_device *adev = filp->f_inode->i_private;
4413 	struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4414 
4415 	if (!bo_triplet)
4416 		return -EINVAL;
4417 
4418 	return simple_read_from_buffer(buf,
4419 				       size,
4420 				       pos, bo_triplet->cpu_addr,
4421 				       AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4422 }
4423 
4424 static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
4425 {
4426 	struct amdgpu_device *adev = filp->f_inode->i_private;
4427 	struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4428 
4429 	if (bo_triplet) {
4430 		amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4431 				      &bo_triplet->cpu_addr);
4432 		kfree(bo_triplet);
4433 	}
4434 
4435 	adev->psp.spirom_dump_trip = NULL;
4436 	return 0;
4437 }
4438 
4439 static const struct file_operations psp_dump_spirom_debugfs_ops = {
4440 	.owner = THIS_MODULE,
4441 	.open = psp_read_spirom_debugfs_open,
4442 	.read = psp_read_spirom_debugfs_read,
4443 	.release = psp_read_spirom_debugfs_release,
4444 	.llseek = default_llseek,
4445 };
4446 #endif
4447 
4448 void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
4449 {
4450 #if defined(CONFIG_DEBUG_FS)
4451 	struct drm_minor *minor = adev_to_drm(adev)->primary;
4452 
4453 	debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
4454 				 adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4455 #endif
4456 }
4457 
4458 const struct amd_ip_funcs psp_ip_funcs = {
4459 	.name = "psp",
4460 	.early_init = psp_early_init,
4461 	.sw_init = psp_sw_init,
4462 	.sw_fini = psp_sw_fini,
4463 	.hw_init = psp_hw_init,
4464 	.hw_fini = psp_hw_fini,
4465 	.suspend = psp_suspend,
4466 	.resume = psp_resume,
4467 	.set_clockgating_state = psp_set_clockgating_state,
4468 	.set_powergating_state = psp_set_powergating_state,
4469 };
4470 
4471 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4472 	.type = AMD_IP_BLOCK_TYPE_PSP,
4473 	.major = 3,
4474 	.minor = 1,
4475 	.rev = 0,
4476 	.funcs = &psp_ip_funcs,
4477 };
4478 
4479 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4480 	.type = AMD_IP_BLOCK_TYPE_PSP,
4481 	.major = 10,
4482 	.minor = 0,
4483 	.rev = 0,
4484 	.funcs = &psp_ip_funcs,
4485 };
4486 
4487 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4488 	.type = AMD_IP_BLOCK_TYPE_PSP,
4489 	.major = 11,
4490 	.minor = 0,
4491 	.rev = 0,
4492 	.funcs = &psp_ip_funcs,
4493 };
4494 
4495 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4496 	.type = AMD_IP_BLOCK_TYPE_PSP,
4497 	.major = 11,
4498 	.minor = 0,
4499 	.rev = 8,
4500 	.funcs = &psp_ip_funcs,
4501 };
4502 
4503 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4504 	.type = AMD_IP_BLOCK_TYPE_PSP,
4505 	.major = 12,
4506 	.minor = 0,
4507 	.rev = 0,
4508 	.funcs = &psp_ip_funcs,
4509 };
4510 
4511 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4512 	.type = AMD_IP_BLOCK_TYPE_PSP,
4513 	.major = 13,
4514 	.minor = 0,
4515 	.rev = 0,
4516 	.funcs = &psp_ip_funcs,
4517 };
4518 
4519 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4520 	.type = AMD_IP_BLOCK_TYPE_PSP,
4521 	.major = 13,
4522 	.minor = 0,
4523 	.rev = 4,
4524 	.funcs = &psp_ip_funcs,
4525 };
4526 
4527 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4528 	.type = AMD_IP_BLOCK_TYPE_PSP,
4529 	.major = 14,
4530 	.minor = 0,
4531 	.rev = 0,
4532 	.funcs = &psp_ip_funcs,
4533 };
4534