xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c (revision 01479d140686430ce51d01dc4ad4548323bd1232)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42 
43 #include "amdgpu_ras.h"
44 #include "amdgpu_securedisplay.h"
45 #include "amdgpu_atomfirmware.h"
46 
47 #define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*16)
48 
49 static int psp_load_smu_fw(struct psp_context *psp);
50 static int psp_rap_terminate(struct psp_context *psp);
51 static int psp_securedisplay_terminate(struct psp_context *psp);
52 
53 static int psp_ring_init(struct psp_context *psp,
54 			 enum psp_ring_type ring_type)
55 {
56 	int ret = 0;
57 	struct psp_ring *ring;
58 	struct amdgpu_device *adev = psp->adev;
59 
60 	ring = &psp->km_ring;
61 
62 	ring->ring_type = ring_type;
63 
64 	/* allocate 4k Page of Local Frame Buffer memory for ring */
65 	ring->ring_size = 0x1000;
66 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 				      AMDGPU_GEM_DOMAIN_VRAM |
68 				      AMDGPU_GEM_DOMAIN_GTT,
69 				      &adev->firmware.rbuf,
70 				      &ring->ring_mem_mc_addr,
71 				      (void **)&ring->ring_mem);
72 	if (ret) {
73 		ring->ring_size = 0;
74 		return ret;
75 	}
76 
77 	return 0;
78 }
79 
80 /*
81  * Due to DF Cstate management centralized to PMFW, the firmware
82  * loading sequence will be updated as below:
83  *   - Load KDB
84  *   - Load SYS_DRV
85  *   - Load tOS
86  *   - Load PMFW
87  *   - Setup TMR
88  *   - Load other non-psp fw
89  *   - Load ASD
90  *   - Load XGMI/RAS/HDCP/DTM TA if any
91  *
92  * This new sequence is required for
93  *   - Arcturus and onwards
94  */
95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96 {
97 	struct amdgpu_device *adev = psp->adev;
98 
99 	if (amdgpu_sriov_vf(adev)) {
100 		psp->pmfw_centralized_cstate_management = false;
101 		return;
102 	}
103 
104 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 	case IP_VERSION(11, 0, 0):
106 	case IP_VERSION(11, 0, 4):
107 	case IP_VERSION(11, 0, 5):
108 	case IP_VERSION(11, 0, 7):
109 	case IP_VERSION(11, 0, 9):
110 	case IP_VERSION(11, 0, 11):
111 	case IP_VERSION(11, 0, 12):
112 	case IP_VERSION(11, 0, 13):
113 	case IP_VERSION(13, 0, 0):
114 	case IP_VERSION(13, 0, 2):
115 	case IP_VERSION(13, 0, 7):
116 		psp->pmfw_centralized_cstate_management = true;
117 		break;
118 	default:
119 		psp->pmfw_centralized_cstate_management = false;
120 		break;
121 	}
122 }
123 
124 static int psp_init_sriov_microcode(struct psp_context *psp)
125 {
126 	struct amdgpu_device *adev = psp->adev;
127 	char ucode_prefix[30];
128 	int ret = 0;
129 
130 	amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131 
132 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 	case IP_VERSION(9, 0, 0):
134 	case IP_VERSION(11, 0, 7):
135 	case IP_VERSION(11, 0, 9):
136 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 		ret = psp_init_cap_microcode(psp, ucode_prefix);
138 		break;
139 	case IP_VERSION(13, 0, 2):
140 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 		ret = psp_init_cap_microcode(psp, ucode_prefix);
142 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 		break;
144 	case IP_VERSION(13, 0, 0):
145 		adev->virt.autoload_ucode_id = 0;
146 		break;
147 	case IP_VERSION(13, 0, 6):
148 	case IP_VERSION(13, 0, 14):
149 		ret = psp_init_cap_microcode(psp, ucode_prefix);
150 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
151 		break;
152 	case IP_VERSION(13, 0, 10):
153 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
154 		ret = psp_init_cap_microcode(psp, ucode_prefix);
155 		break;
156 	case IP_VERSION(13, 0, 12):
157 		ret = psp_init_ta_microcode(psp, ucode_prefix);
158 		break;
159 	default:
160 		return -EINVAL;
161 	}
162 	return ret;
163 }
164 
165 static int psp_early_init(struct amdgpu_ip_block *ip_block)
166 {
167 	struct amdgpu_device *adev = ip_block->adev;
168 	struct psp_context *psp = &adev->psp;
169 
170 	psp->autoload_supported = true;
171 	psp->boot_time_tmr = true;
172 
173 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
174 	case IP_VERSION(9, 0, 0):
175 		psp_v3_1_set_psp_funcs(psp);
176 		psp->autoload_supported = false;
177 		psp->boot_time_tmr = false;
178 		break;
179 	case IP_VERSION(10, 0, 0):
180 	case IP_VERSION(10, 0, 1):
181 		psp_v10_0_set_psp_funcs(psp);
182 		psp->autoload_supported = false;
183 		psp->boot_time_tmr = false;
184 		break;
185 	case IP_VERSION(11, 0, 2):
186 	case IP_VERSION(11, 0, 4):
187 		psp_v11_0_set_psp_funcs(psp);
188 		psp->autoload_supported = false;
189 		psp->boot_time_tmr = false;
190 		break;
191 	case IP_VERSION(11, 0, 0):
192 	case IP_VERSION(11, 0, 7):
193 		adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
194 		fallthrough;
195 	case IP_VERSION(11, 0, 5):
196 	case IP_VERSION(11, 0, 9):
197 	case IP_VERSION(11, 0, 11):
198 	case IP_VERSION(11, 5, 0):
199 	case IP_VERSION(11, 5, 2):
200 	case IP_VERSION(11, 0, 12):
201 	case IP_VERSION(11, 0, 13):
202 		psp_v11_0_set_psp_funcs(psp);
203 		psp->boot_time_tmr = false;
204 		break;
205 	case IP_VERSION(11, 0, 3):
206 	case IP_VERSION(12, 0, 1):
207 		psp_v12_0_set_psp_funcs(psp);
208 		psp->autoload_supported = false;
209 		psp->boot_time_tmr = false;
210 		break;
211 	case IP_VERSION(13, 0, 2):
212 		psp->boot_time_tmr = false;
213 		fallthrough;
214 	case IP_VERSION(13, 0, 6):
215 	case IP_VERSION(13, 0, 14):
216 		psp_v13_0_set_psp_funcs(psp);
217 		psp->autoload_supported = false;
218 		break;
219 	case IP_VERSION(13, 0, 12):
220 		psp_v13_0_set_psp_funcs(psp);
221 		psp->autoload_supported = false;
222 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
223 		break;
224 	case IP_VERSION(13, 0, 1):
225 	case IP_VERSION(13, 0, 3):
226 	case IP_VERSION(13, 0, 5):
227 	case IP_VERSION(13, 0, 8):
228 	case IP_VERSION(13, 0, 11):
229 	case IP_VERSION(14, 0, 0):
230 	case IP_VERSION(14, 0, 1):
231 	case IP_VERSION(14, 0, 4):
232 		psp_v13_0_set_psp_funcs(psp);
233 		psp->boot_time_tmr = false;
234 		break;
235 	case IP_VERSION(11, 0, 8):
236 		if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
237 			psp_v11_0_8_set_psp_funcs(psp);
238 		}
239 		psp->autoload_supported = false;
240 		psp->boot_time_tmr = false;
241 		break;
242 	case IP_VERSION(13, 0, 0):
243 	case IP_VERSION(13, 0, 7):
244 	case IP_VERSION(13, 0, 10):
245 		psp_v13_0_set_psp_funcs(psp);
246 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
247 		psp->boot_time_tmr = false;
248 		break;
249 	case IP_VERSION(13, 0, 4):
250 		psp_v13_0_4_set_psp_funcs(psp);
251 		psp->boot_time_tmr = false;
252 		break;
253 	case IP_VERSION(14, 0, 2):
254 	case IP_VERSION(14, 0, 3):
255 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
256 		psp_v14_0_set_psp_funcs(psp);
257 		break;
258 	case IP_VERSION(14, 0, 5):
259 		psp_v14_0_set_psp_funcs(psp);
260 		psp->boot_time_tmr = false;
261 		break;
262 	default:
263 		return -EINVAL;
264 	}
265 
266 	psp->adev = adev;
267 
268 	adev->psp_timeout = 20000;
269 
270 	psp_check_pmfw_centralized_cstate_management(psp);
271 
272 	if (amdgpu_sriov_vf(adev))
273 		return psp_init_sriov_microcode(psp);
274 	else
275 		return psp_init_microcode(psp);
276 }
277 
278 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
279 {
280 	amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
281 			      &mem_ctx->shared_buf);
282 	mem_ctx->shared_bo = NULL;
283 }
284 
285 static void psp_free_shared_bufs(struct psp_context *psp)
286 {
287 	void *tmr_buf;
288 	void **pptr;
289 
290 	/* free TMR memory buffer */
291 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
292 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
293 	psp->tmr_bo = NULL;
294 
295 	/* free xgmi shared memory */
296 	psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
297 
298 	/* free ras shared memory */
299 	psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
300 
301 	/* free hdcp shared memory */
302 	psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
303 
304 	/* free dtm shared memory */
305 	psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
306 
307 	/* free rap shared memory */
308 	psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
309 
310 	/* free securedisplay shared memory */
311 	psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
312 
313 
314 }
315 
316 static void psp_memory_training_fini(struct psp_context *psp)
317 {
318 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
319 
320 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
321 	kfree(ctx->sys_cache);
322 	ctx->sys_cache = NULL;
323 }
324 
325 static int psp_memory_training_init(struct psp_context *psp)
326 {
327 	int ret;
328 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
329 
330 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
331 		dev_dbg(psp->adev->dev, "memory training is not supported!\n");
332 		return 0;
333 	}
334 
335 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
336 	if (ctx->sys_cache == NULL) {
337 		dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
338 		ret = -ENOMEM;
339 		goto Err_out;
340 	}
341 
342 	dev_dbg(psp->adev->dev,
343 		"train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
344 		ctx->train_data_size,
345 		ctx->p2c_train_data_offset,
346 		ctx->c2p_train_data_offset);
347 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
348 	return 0;
349 
350 Err_out:
351 	psp_memory_training_fini(psp);
352 	return ret;
353 }
354 
355 /*
356  * Helper funciton to query psp runtime database entry
357  *
358  * @adev: amdgpu_device pointer
359  * @entry_type: the type of psp runtime database entry
360  * @db_entry: runtime database entry pointer
361  *
362  * Return false if runtime database doesn't exit or entry is invalid
363  * or true if the specific database entry is found, and copy to @db_entry
364  */
365 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
366 				     enum psp_runtime_entry_type entry_type,
367 				     void *db_entry)
368 {
369 	uint64_t db_header_pos, db_dir_pos;
370 	struct psp_runtime_data_header db_header = {0};
371 	struct psp_runtime_data_directory db_dir = {0};
372 	bool ret = false;
373 	int i;
374 
375 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
376 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
377 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
378 		return false;
379 
380 	db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
381 	db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
382 
383 	/* read runtime db header from vram */
384 	amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
385 			sizeof(struct psp_runtime_data_header), false);
386 
387 	if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
388 		/* runtime db doesn't exist, exit */
389 		dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
390 		return false;
391 	}
392 
393 	/* read runtime database entry from vram */
394 	amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
395 			sizeof(struct psp_runtime_data_directory), false);
396 
397 	if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
398 		/* invalid db entry count, exit */
399 		dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
400 		return false;
401 	}
402 
403 	/* look up for requested entry type */
404 	for (i = 0; i < db_dir.entry_count && !ret; i++) {
405 		if (db_dir.entry_list[i].entry_type == entry_type) {
406 			switch (entry_type) {
407 			case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
408 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
409 					/* invalid db entry size */
410 					dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
411 					return false;
412 				}
413 				/* read runtime database entry */
414 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
415 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
416 				ret = true;
417 				break;
418 			case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
419 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
420 					/* invalid db entry size */
421 					dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
422 					return false;
423 				}
424 				/* read runtime database entry */
425 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
426 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
427 				ret = true;
428 				break;
429 			default:
430 				ret = false;
431 				break;
432 			}
433 		}
434 	}
435 
436 	return ret;
437 }
438 
439 static int psp_sw_init(struct amdgpu_ip_block *ip_block)
440 {
441 	struct amdgpu_device *adev = ip_block->adev;
442 	struct psp_context *psp = &adev->psp;
443 	int ret;
444 	struct psp_runtime_boot_cfg_entry boot_cfg_entry;
445 	struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
446 	struct psp_runtime_scpm_entry scpm_entry;
447 
448 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
449 	if (!psp->cmd) {
450 		dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
451 		ret = -ENOMEM;
452 	}
453 
454 	adev->psp.xgmi_context.supports_extended_data =
455 		!adev->gmc.xgmi.connected_to_cpu &&
456 		amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
457 
458 	memset(&scpm_entry, 0, sizeof(scpm_entry));
459 	if ((psp_get_runtime_db_entry(adev,
460 				PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
461 				&scpm_entry)) &&
462 	    (scpm_entry.scpm_status != SCPM_DISABLE)) {
463 		adev->scpm_enabled = true;
464 		adev->scpm_status = scpm_entry.scpm_status;
465 	} else {
466 		adev->scpm_enabled = false;
467 		adev->scpm_status = SCPM_DISABLE;
468 	}
469 
470 	/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
471 
472 	memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
473 	if (psp_get_runtime_db_entry(adev,
474 				PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
475 				&boot_cfg_entry)) {
476 		psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
477 		if ((psp->boot_cfg_bitmask) &
478 		    BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
479 			/* If psp runtime database exists, then
480 			 * only enable two stage memory training
481 			 * when TWO_STAGE_DRAM_TRAINING bit is set
482 			 * in runtime database
483 			 */
484 			mem_training_ctx->enable_mem_training = true;
485 		}
486 
487 	} else {
488 		/* If psp runtime database doesn't exist or is
489 		 * invalid, force enable two stage memory training
490 		 */
491 		mem_training_ctx->enable_mem_training = true;
492 	}
493 
494 	if (mem_training_ctx->enable_mem_training) {
495 		ret = psp_memory_training_init(psp);
496 		if (ret) {
497 			dev_err(adev->dev, "Failed to initialize memory training!\n");
498 			return ret;
499 		}
500 
501 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
502 		if (ret) {
503 			dev_err(adev->dev, "Failed to process memory training!\n");
504 			return ret;
505 		}
506 	}
507 
508 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
509 				      AMDGPU_GEM_DOMAIN_VRAM,
510 				      &psp->fw_pri_bo,
511 				      &psp->fw_pri_mc_addr,
512 				      &psp->fw_pri_buf);
513 	if (ret)
514 		return ret;
515 
516 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
517 				      AMDGPU_GEM_DOMAIN_VRAM |
518 				      AMDGPU_GEM_DOMAIN_GTT,
519 				      &psp->fence_buf_bo,
520 				      &psp->fence_buf_mc_addr,
521 				      &psp->fence_buf);
522 	if (ret)
523 		goto failed1;
524 
525 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
526 				      AMDGPU_GEM_DOMAIN_VRAM |
527 				      AMDGPU_GEM_DOMAIN_GTT,
528 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
529 				      (void **)&psp->cmd_buf_mem);
530 	if (ret)
531 		goto failed2;
532 
533 	return 0;
534 
535 failed2:
536 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
537 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
538 failed1:
539 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
540 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
541 	return ret;
542 }
543 
544 static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
545 {
546 	struct amdgpu_device *adev = ip_block->adev;
547 	struct psp_context *psp = &adev->psp;
548 
549 	psp_memory_training_fini(psp);
550 
551 	amdgpu_ucode_release(&psp->sos_fw);
552 	amdgpu_ucode_release(&psp->asd_fw);
553 	amdgpu_ucode_release(&psp->ta_fw);
554 	amdgpu_ucode_release(&psp->cap_fw);
555 	amdgpu_ucode_release(&psp->toc_fw);
556 
557 	kfree(psp->cmd);
558 	psp->cmd = NULL;
559 
560 	psp_free_shared_bufs(psp);
561 
562 	if (psp->km_ring.ring_mem)
563 		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
564 				      &psp->km_ring.ring_mem_mc_addr,
565 				      (void **)&psp->km_ring.ring_mem);
566 
567 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
568 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
569 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
570 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
571 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
572 			      (void **)&psp->cmd_buf_mem);
573 
574 	return 0;
575 }
576 
577 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val,
578 		 uint32_t mask, uint32_t flags)
579 {
580 	bool check_changed = flags & PSP_WAITREG_CHANGED;
581 	bool verbose = !(flags & PSP_WAITREG_NOVERBOSE);
582 	uint32_t val;
583 	int i;
584 	struct amdgpu_device *adev = psp->adev;
585 
586 	if (psp->adev->no_hw_access)
587 		return 0;
588 
589 	for (i = 0; i < adev->usec_timeout; i++) {
590 		val = RREG32(reg_index);
591 		if (check_changed) {
592 			if (val != reg_val)
593 				return 0;
594 		} else {
595 			if ((val & mask) == reg_val)
596 				return 0;
597 		}
598 		udelay(1);
599 	}
600 
601 	if (verbose)
602 		dev_err(adev->dev,
603 			"psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x",
604 			reg_index, mask, val, reg_val);
605 
606 	return -ETIME;
607 }
608 
609 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
610 			       uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
611 {
612 	uint32_t val;
613 	int i;
614 	struct amdgpu_device *adev = psp->adev;
615 
616 	if (psp->adev->no_hw_access)
617 		return 0;
618 
619 	for (i = 0; i < msec_timeout; i++) {
620 		val = RREG32(reg_index);
621 		if ((val & mask) == reg_val)
622 			return 0;
623 		msleep(1);
624 	}
625 
626 	return -ETIME;
627 }
628 
629 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
630 {
631 	switch (cmd_id) {
632 	case GFX_CMD_ID_LOAD_TA:
633 		return "LOAD_TA";
634 	case GFX_CMD_ID_UNLOAD_TA:
635 		return "UNLOAD_TA";
636 	case GFX_CMD_ID_INVOKE_CMD:
637 		return "INVOKE_CMD";
638 	case GFX_CMD_ID_LOAD_ASD:
639 		return "LOAD_ASD";
640 	case GFX_CMD_ID_SETUP_TMR:
641 		return "SETUP_TMR";
642 	case GFX_CMD_ID_LOAD_IP_FW:
643 		return "LOAD_IP_FW";
644 	case GFX_CMD_ID_DESTROY_TMR:
645 		return "DESTROY_TMR";
646 	case GFX_CMD_ID_SAVE_RESTORE:
647 		return "SAVE_RESTORE_IP_FW";
648 	case GFX_CMD_ID_SETUP_VMR:
649 		return "SETUP_VMR";
650 	case GFX_CMD_ID_DESTROY_VMR:
651 		return "DESTROY_VMR";
652 	case GFX_CMD_ID_PROG_REG:
653 		return "PROG_REG";
654 	case GFX_CMD_ID_GET_FW_ATTESTATION:
655 		return "GET_FW_ATTESTATION";
656 	case GFX_CMD_ID_LOAD_TOC:
657 		return "ID_LOAD_TOC";
658 	case GFX_CMD_ID_AUTOLOAD_RLC:
659 		return "AUTOLOAD_RLC";
660 	case GFX_CMD_ID_BOOT_CFG:
661 		return "BOOT_CFG";
662 	case GFX_CMD_ID_CONFIG_SQ_PERFMON:
663 		return "CONFIG_SQ_PERFMON";
664 	case GFX_CMD_ID_FB_FW_RESERV_ADDR:
665 		return "FB_FW_RESERV_ADDR";
666 	case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
667 		return "FB_FW_RESERV_EXT_ADDR";
668 	case GFX_CMD_ID_SRIOV_SPATIAL_PART:
669 		return "SPATIAL_PARTITION";
670 	case GFX_CMD_ID_FB_NPS_MODE:
671 		return "NPS_MODE_CHANGE";
672 	default:
673 		return "UNKNOWN CMD";
674 	}
675 }
676 
677 static bool psp_err_warn(struct psp_context *psp)
678 {
679 	struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
680 
681 	/* This response indicates reg list is already loaded */
682 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
683 	    cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
684 	    cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
685 	    cmd->resp.status == TEE_ERROR_CANCEL)
686 		return false;
687 
688 	return true;
689 }
690 
691 static int
692 psp_cmd_submit_buf(struct psp_context *psp,
693 		   struct amdgpu_firmware_info *ucode,
694 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
695 {
696 	int ret;
697 	int index;
698 	int timeout = psp->adev->psp_timeout;
699 	bool ras_intr = false;
700 	bool skip_unsupport = false;
701 
702 	if (psp->adev->no_hw_access)
703 		return 0;
704 
705 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
706 
707 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
708 
709 	index = atomic_inc_return(&psp->fence_value);
710 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
711 	if (ret) {
712 		atomic_dec(&psp->fence_value);
713 		goto exit;
714 	}
715 
716 	amdgpu_device_invalidate_hdp(psp->adev, NULL);
717 	while (*((unsigned int *)psp->fence_buf) != index) {
718 		if (--timeout == 0)
719 			break;
720 		/*
721 		 * Shouldn't wait for timeout when err_event_athub occurs,
722 		 * because gpu reset thread triggered and lock resource should
723 		 * be released for psp resume sequence.
724 		 */
725 		ras_intr = amdgpu_ras_intr_triggered();
726 		if (ras_intr)
727 			break;
728 		usleep_range(10, 100);
729 		amdgpu_device_invalidate_hdp(psp->adev, NULL);
730 	}
731 
732 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
733 	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
734 		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
735 
736 	memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
737 
738 	/* In some cases, psp response status is not 0 even there is no
739 	 * problem while the command is submitted. Some version of PSP FW
740 	 * doesn't write 0 to that field.
741 	 * So here we would like to only print a warning instead of an error
742 	 * during psp initialization to avoid breaking hw_init and it doesn't
743 	 * return -EINVAL.
744 	 */
745 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
746 		if (ucode)
747 			dev_warn(psp->adev->dev,
748 				 "failed to load ucode %s(0x%X) ",
749 				 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
750 		if (psp_err_warn(psp))
751 			dev_warn(
752 				psp->adev->dev,
753 				"psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
754 				psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
755 				psp->cmd_buf_mem->cmd_id,
756 				psp->cmd_buf_mem->resp.status);
757 		/* If any firmware (including CAP) load fails under SRIOV, it should
758 		 * return failure to stop the VF from initializing.
759 		 * Also return failure in case of timeout
760 		 */
761 		if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
762 			ret = -EINVAL;
763 			goto exit;
764 		}
765 	}
766 
767 	if (ucode) {
768 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
769 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
770 	}
771 
772 exit:
773 	return ret;
774 }
775 
776 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
777 {
778 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
779 
780 	mutex_lock(&psp->mutex);
781 
782 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
783 
784 	return cmd;
785 }
786 
787 static void release_psp_cmd_buf(struct psp_context *psp)
788 {
789 	mutex_unlock(&psp->mutex);
790 }
791 
792 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
793 				 struct psp_gfx_cmd_resp *cmd,
794 				 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
795 {
796 	struct amdgpu_device *adev = psp->adev;
797 	uint32_t size = 0;
798 	uint64_t tmr_pa = 0;
799 
800 	if (tmr_bo) {
801 		size = amdgpu_bo_size(tmr_bo);
802 		tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
803 	}
804 
805 	if (amdgpu_sriov_vf(psp->adev))
806 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
807 	else
808 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
809 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
810 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
811 	cmd->cmd.cmd_setup_tmr.buf_size = size;
812 	cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
813 	cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
814 	cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
815 }
816 
817 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
818 				      uint64_t pri_buf_mc, uint32_t size)
819 {
820 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
821 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
822 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
823 	cmd->cmd.cmd_load_toc.toc_size = size;
824 }
825 
826 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
827 static int psp_load_toc(struct psp_context *psp,
828 			uint32_t *tmr_size)
829 {
830 	int ret;
831 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
832 
833 	/* Copy toc to psp firmware private buffer */
834 	psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
835 
836 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
837 
838 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
839 				 psp->fence_buf_mc_addr);
840 	if (!ret)
841 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
842 
843 	release_psp_cmd_buf(psp);
844 
845 	return ret;
846 }
847 
848 /* Set up Trusted Memory Region */
849 static int psp_tmr_init(struct psp_context *psp)
850 {
851 	int ret = 0;
852 	int tmr_size;
853 	void *tmr_buf;
854 	void **pptr;
855 
856 	/*
857 	 * According to HW engineer, they prefer the TMR address be "naturally
858 	 * aligned" , e.g. the start address be an integer divide of TMR size.
859 	 *
860 	 * Note: this memory need be reserved till the driver
861 	 * uninitializes.
862 	 */
863 	tmr_size = PSP_TMR_SIZE(psp->adev);
864 
865 	/* For ASICs support RLC autoload, psp will parse the toc
866 	 * and calculate the total size of TMR needed
867 	 */
868 	if (!amdgpu_sriov_vf(psp->adev) &&
869 	    psp->toc.start_addr &&
870 	    psp->toc.size_bytes &&
871 	    psp->fw_pri_buf) {
872 		ret = psp_load_toc(psp, &tmr_size);
873 		if (ret) {
874 			dev_err(psp->adev->dev, "Failed to load toc\n");
875 			return ret;
876 		}
877 	}
878 
879 	if (!psp->tmr_bo && !psp->boot_time_tmr) {
880 		pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
881 		ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
882 					      PSP_TMR_ALIGNMENT,
883 					      AMDGPU_HAS_VRAM(psp->adev) ?
884 					      AMDGPU_GEM_DOMAIN_VRAM :
885 					      AMDGPU_GEM_DOMAIN_GTT,
886 					      &psp->tmr_bo, &psp->tmr_mc_addr,
887 					      pptr);
888 	}
889 	if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
890 		psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
891 
892 	return ret;
893 }
894 
895 static bool psp_skip_tmr(struct psp_context *psp)
896 {
897 	switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
898 	case IP_VERSION(11, 0, 9):
899 	case IP_VERSION(11, 0, 7):
900 	case IP_VERSION(13, 0, 2):
901 	case IP_VERSION(13, 0, 6):
902 	case IP_VERSION(13, 0, 10):
903 	case IP_VERSION(13, 0, 12):
904 	case IP_VERSION(13, 0, 14):
905 		return true;
906 	default:
907 		return false;
908 	}
909 }
910 
911 static int psp_tmr_load(struct psp_context *psp)
912 {
913 	int ret;
914 	struct psp_gfx_cmd_resp *cmd;
915 
916 	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
917 	 * Already set up by host driver.
918 	 */
919 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
920 		return 0;
921 
922 	cmd = acquire_psp_cmd_buf(psp);
923 
924 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
925 	if (psp->tmr_bo)
926 		dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
927 			 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
928 
929 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
930 				 psp->fence_buf_mc_addr);
931 
932 	release_psp_cmd_buf(psp);
933 
934 	return ret;
935 }
936 
937 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
938 					struct psp_gfx_cmd_resp *cmd)
939 {
940 	if (amdgpu_sriov_vf(psp->adev))
941 		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
942 	else
943 		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
944 }
945 
946 static int psp_tmr_unload(struct psp_context *psp)
947 {
948 	int ret;
949 	struct psp_gfx_cmd_resp *cmd;
950 
951 	/* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
952 	 * as TMR is not loaded at all
953 	 */
954 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
955 		return 0;
956 
957 	cmd = acquire_psp_cmd_buf(psp);
958 
959 	psp_prep_tmr_unload_cmd_buf(psp, cmd);
960 	dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
961 
962 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
963 				 psp->fence_buf_mc_addr);
964 
965 	release_psp_cmd_buf(psp);
966 
967 	return ret;
968 }
969 
970 static int psp_tmr_terminate(struct psp_context *psp)
971 {
972 	return psp_tmr_unload(psp);
973 }
974 
975 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
976 					uint64_t *output_ptr)
977 {
978 	int ret;
979 	struct psp_gfx_cmd_resp *cmd;
980 
981 	if (!output_ptr)
982 		return -EINVAL;
983 
984 	if (amdgpu_sriov_vf(psp->adev))
985 		return 0;
986 
987 	cmd = acquire_psp_cmd_buf(psp);
988 
989 	cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
990 
991 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
992 				 psp->fence_buf_mc_addr);
993 
994 	if (!ret) {
995 		*output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
996 			      ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
997 	}
998 
999 	release_psp_cmd_buf(psp);
1000 
1001 	return ret;
1002 }
1003 
1004 static int psp_get_fw_reservation_info(struct psp_context *psp,
1005 						   uint32_t cmd_id,
1006 						   uint64_t *addr,
1007 						   uint32_t *size)
1008 {
1009 	int ret;
1010 	uint32_t status;
1011 	struct psp_gfx_cmd_resp *cmd;
1012 
1013 	cmd = acquire_psp_cmd_buf(psp);
1014 
1015 	cmd->cmd_id = cmd_id;
1016 
1017 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1018 				 psp->fence_buf_mc_addr);
1019 	if (ret) {
1020 		release_psp_cmd_buf(psp);
1021 		return ret;
1022 	}
1023 
1024 	status = cmd->resp.status;
1025 	if (status == PSP_ERR_UNKNOWN_COMMAND) {
1026 		release_psp_cmd_buf(psp);
1027 		*addr = 0;
1028 		*size = 0;
1029 		return 0;
1030 	}
1031 
1032 	*addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 |
1033 		cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo;
1034 	*size = cmd->resp.uresp.fw_reserve_info.reserve_size;
1035 
1036 	release_psp_cmd_buf(psp);
1037 
1038 	return 0;
1039 }
1040 
1041 int psp_update_fw_reservation(struct psp_context *psp)
1042 {
1043 	int ret;
1044 	uint64_t reserv_addr, reserv_addr_ext;
1045 	uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
1046 	struct amdgpu_device *adev = psp->adev;
1047 
1048 	mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
1049 
1050 	if (amdgpu_sriov_vf(psp->adev))
1051 		return 0;
1052 
1053 	switch (mp0_ip_ver) {
1054 	case IP_VERSION(14, 0, 2):
1055 		if (adev->psp.sos.fw_version < 0x3b0e0d)
1056 			return 0;
1057 		break;
1058 
1059 	case IP_VERSION(14, 0, 3):
1060 		if (adev->psp.sos.fw_version < 0x3a0e14)
1061 			return 0;
1062 		break;
1063 
1064 	default:
1065 		return 0;
1066 	}
1067 
1068 	ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
1069 	if (ret)
1070 		return ret;
1071 	ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
1072 	if (ret)
1073 		return ret;
1074 
1075 	if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
1076 		dev_warn(adev->dev, "reserve fw region is not valid!\n");
1077 		return 0;
1078 	}
1079 
1080 	amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
1081 
1082 	reserv_size = roundup(reserv_size, SZ_1M);
1083 
1084 	ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL);
1085 	if (ret) {
1086 		dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret);
1087 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
1088 		return ret;
1089 	}
1090 
1091 	reserv_size_ext = roundup(reserv_size_ext, SZ_1M);
1092 
1093 	ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext,
1094 					 &adev->mman.fw_reserved_memory_extend, NULL);
1095 	if (ret) {
1096 		dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret);
1097 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL);
1098 		return ret;
1099 	}
1100 
1101 	return 0;
1102 }
1103 
1104 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
1105 {
1106 	struct psp_context *psp = &adev->psp;
1107 	struct psp_gfx_cmd_resp *cmd;
1108 	int ret;
1109 
1110 	if (amdgpu_sriov_vf(adev))
1111 		return 0;
1112 
1113 	cmd = acquire_psp_cmd_buf(psp);
1114 
1115 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1116 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
1117 
1118 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1119 	if (!ret) {
1120 		*boot_cfg =
1121 			(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
1122 	}
1123 
1124 	release_psp_cmd_buf(psp);
1125 
1126 	return ret;
1127 }
1128 
1129 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
1130 {
1131 	int ret;
1132 	struct psp_context *psp = &adev->psp;
1133 	struct psp_gfx_cmd_resp *cmd;
1134 
1135 	if (amdgpu_sriov_vf(adev))
1136 		return 0;
1137 
1138 	cmd = acquire_psp_cmd_buf(psp);
1139 
1140 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1141 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1142 	cmd->cmd.boot_cfg.boot_config = boot_cfg;
1143 	cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1144 
1145 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1146 
1147 	release_psp_cmd_buf(psp);
1148 
1149 	return ret;
1150 }
1151 
1152 static int psp_rl_load(struct amdgpu_device *adev)
1153 {
1154 	int ret;
1155 	struct psp_context *psp = &adev->psp;
1156 	struct psp_gfx_cmd_resp *cmd;
1157 
1158 	if (!is_psp_fw_valid(psp->rl))
1159 		return 0;
1160 
1161 	cmd = acquire_psp_cmd_buf(psp);
1162 
1163 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1164 	memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1165 
1166 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1167 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1168 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1169 	cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1170 	cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1171 
1172 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1173 
1174 	release_psp_cmd_buf(psp);
1175 
1176 	return ret;
1177 }
1178 
1179 int psp_memory_partition(struct psp_context *psp, int mode)
1180 {
1181 	struct psp_gfx_cmd_resp *cmd;
1182 	int ret;
1183 
1184 	if (amdgpu_sriov_vf(psp->adev))
1185 		return 0;
1186 
1187 	cmd = acquire_psp_cmd_buf(psp);
1188 
1189 	cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
1190 	cmd->cmd.cmd_memory_part.mode = mode;
1191 
1192 	dev_info(psp->adev->dev,
1193 		 "Requesting %d memory partition change through PSP", mode);
1194 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1195 	if (ret)
1196 		dev_err(psp->adev->dev,
1197 			"PSP request failed to change to NPS%d mode\n", mode);
1198 
1199 	release_psp_cmd_buf(psp);
1200 
1201 	return ret;
1202 }
1203 
1204 int psp_spatial_partition(struct psp_context *psp, int mode)
1205 {
1206 	struct psp_gfx_cmd_resp *cmd;
1207 	int ret;
1208 
1209 	if (amdgpu_sriov_vf(psp->adev))
1210 		return 0;
1211 
1212 	cmd = acquire_psp_cmd_buf(psp);
1213 
1214 	cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1215 	cmd->cmd.cmd_spatial_part.mode = mode;
1216 
1217 	dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1218 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1219 
1220 	release_psp_cmd_buf(psp);
1221 
1222 	return ret;
1223 }
1224 
1225 static int psp_asd_initialize(struct psp_context *psp)
1226 {
1227 	int ret;
1228 
1229 	/* If PSP version doesn't match ASD version, asd loading will be failed.
1230 	 * add workaround to bypass it for sriov now.
1231 	 * TODO: add version check to make it common
1232 	 */
1233 	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1234 		return 0;
1235 
1236 	/* bypass asd if display hardware is not available */
1237 	if (!amdgpu_device_has_display_hardware(psp->adev) &&
1238 	    amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1239 		return 0;
1240 
1241 	psp->asd_context.mem_context.shared_mc_addr  = 0;
1242 	psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1243 	psp->asd_context.ta_load_type                = GFX_CMD_ID_LOAD_ASD;
1244 
1245 	ret = psp_ta_load(psp, &psp->asd_context);
1246 	if (!ret)
1247 		psp->asd_context.initialized = true;
1248 
1249 	return ret;
1250 }
1251 
1252 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1253 				       uint32_t session_id)
1254 {
1255 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1256 	cmd->cmd.cmd_unload_ta.session_id = session_id;
1257 }
1258 
1259 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1260 {
1261 	int ret;
1262 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1263 
1264 	psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1265 
1266 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1267 
1268 	context->resp_status = cmd->resp.status;
1269 
1270 	release_psp_cmd_buf(psp);
1271 
1272 	return ret;
1273 }
1274 
1275 static int psp_asd_terminate(struct psp_context *psp)
1276 {
1277 	int ret;
1278 
1279 	if (amdgpu_sriov_vf(psp->adev))
1280 		return 0;
1281 
1282 	if (!psp->asd_context.initialized)
1283 		return 0;
1284 
1285 	ret = psp_ta_unload(psp, &psp->asd_context);
1286 	if (!ret)
1287 		psp->asd_context.initialized = false;
1288 
1289 	return ret;
1290 }
1291 
1292 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1293 		uint32_t id, uint32_t value)
1294 {
1295 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1296 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1297 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1298 }
1299 
1300 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1301 		uint32_t value)
1302 {
1303 	struct psp_gfx_cmd_resp *cmd;
1304 	int ret = 0;
1305 
1306 	if (reg >= PSP_REG_LAST)
1307 		return -EINVAL;
1308 
1309 	cmd = acquire_psp_cmd_buf(psp);
1310 
1311 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1312 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1313 	if (ret)
1314 		dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1315 
1316 	release_psp_cmd_buf(psp);
1317 
1318 	return ret;
1319 }
1320 
1321 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1322 				     uint64_t ta_bin_mc,
1323 				     struct ta_context *context)
1324 {
1325 	cmd->cmd_id				= context->ta_load_type;
1326 	cmd->cmd.cmd_load_ta.app_phy_addr_lo	= lower_32_bits(ta_bin_mc);
1327 	cmd->cmd.cmd_load_ta.app_phy_addr_hi	= upper_32_bits(ta_bin_mc);
1328 	cmd->cmd.cmd_load_ta.app_len		= context->bin_desc.size_bytes;
1329 
1330 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1331 		lower_32_bits(context->mem_context.shared_mc_addr);
1332 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1333 		upper_32_bits(context->mem_context.shared_mc_addr);
1334 	cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1335 }
1336 
1337 int psp_ta_init_shared_buf(struct psp_context *psp,
1338 				  struct ta_mem_context *mem_ctx)
1339 {
1340 	/*
1341 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1342 	 * physical) for ta to host memory
1343 	 */
1344 	return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1345 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1346 				      AMDGPU_GEM_DOMAIN_GTT,
1347 				      &mem_ctx->shared_bo,
1348 				      &mem_ctx->shared_mc_addr,
1349 				      &mem_ctx->shared_buf);
1350 }
1351 
1352 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1353 				       uint32_t ta_cmd_id,
1354 				       uint32_t session_id)
1355 {
1356 	cmd->cmd_id				= GFX_CMD_ID_INVOKE_CMD;
1357 	cmd->cmd.cmd_invoke_cmd.session_id	= session_id;
1358 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id	= ta_cmd_id;
1359 }
1360 
1361 int psp_ta_invoke(struct psp_context *psp,
1362 		  uint32_t ta_cmd_id,
1363 		  struct ta_context *context)
1364 {
1365 	int ret;
1366 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1367 
1368 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1369 
1370 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1371 				 psp->fence_buf_mc_addr);
1372 
1373 	context->resp_status = cmd->resp.status;
1374 
1375 	release_psp_cmd_buf(psp);
1376 
1377 	return ret;
1378 }
1379 
1380 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1381 {
1382 	int ret;
1383 	struct psp_gfx_cmd_resp *cmd;
1384 
1385 	cmd = acquire_psp_cmd_buf(psp);
1386 
1387 	psp_copy_fw(psp, context->bin_desc.start_addr,
1388 		    context->bin_desc.size_bytes);
1389 
1390 	if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
1391 		context->mem_context.shared_bo)
1392 		context->mem_context.shared_mc_addr =
1393 			amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
1394 
1395 	psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1396 
1397 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1398 				 psp->fence_buf_mc_addr);
1399 
1400 	context->resp_status = cmd->resp.status;
1401 
1402 	if (!ret)
1403 		context->session_id = cmd->resp.session_id;
1404 
1405 	release_psp_cmd_buf(psp);
1406 
1407 	return ret;
1408 }
1409 
1410 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1411 {
1412 	return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1413 }
1414 
1415 int psp_xgmi_terminate(struct psp_context *psp)
1416 {
1417 	int ret;
1418 	struct amdgpu_device *adev = psp->adev;
1419 
1420 	/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1421 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1422 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1423 	     adev->gmc.xgmi.connected_to_cpu))
1424 		return 0;
1425 
1426 	if (!psp->xgmi_context.context.initialized)
1427 		return 0;
1428 
1429 	ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1430 
1431 	psp->xgmi_context.context.initialized = false;
1432 
1433 	return ret;
1434 }
1435 
1436 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1437 {
1438 	struct ta_xgmi_shared_memory *xgmi_cmd;
1439 	int ret;
1440 
1441 	if (!psp->ta_fw ||
1442 	    !psp->xgmi_context.context.bin_desc.size_bytes ||
1443 	    !psp->xgmi_context.context.bin_desc.start_addr)
1444 		return -ENOENT;
1445 
1446 	if (!load_ta)
1447 		goto invoke;
1448 
1449 	psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1450 	psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1451 
1452 	if (!psp->xgmi_context.context.mem_context.shared_buf) {
1453 		ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1454 		if (ret)
1455 			return ret;
1456 	}
1457 
1458 	/* Load XGMI TA */
1459 	ret = psp_ta_load(psp, &psp->xgmi_context.context);
1460 	if (!ret)
1461 		psp->xgmi_context.context.initialized = true;
1462 	else
1463 		return ret;
1464 
1465 invoke:
1466 	/* Initialize XGMI session */
1467 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1468 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1469 	xgmi_cmd->flag_extend_link_record = set_extended_data;
1470 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1471 
1472 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1473 	/* note down the capbility flag for XGMI TA */
1474 	psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1475 
1476 	return ret;
1477 }
1478 
1479 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1480 {
1481 	struct ta_xgmi_shared_memory *xgmi_cmd;
1482 	int ret;
1483 
1484 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1485 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1486 
1487 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1488 
1489 	/* Invoke xgmi ta to get hive id */
1490 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1491 	if (ret)
1492 		return ret;
1493 
1494 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1495 
1496 	return 0;
1497 }
1498 
1499 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1500 {
1501 	struct ta_xgmi_shared_memory *xgmi_cmd;
1502 	int ret;
1503 
1504 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1505 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1506 
1507 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1508 
1509 	/* Invoke xgmi ta to get the node id */
1510 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1511 	if (ret)
1512 		return ret;
1513 
1514 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1515 
1516 	return 0;
1517 }
1518 
1519 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1520 {
1521 	return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1522 			IP_VERSION(13, 0, 2) &&
1523 		psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1524 	       amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1525 		       IP_VERSION(13, 0, 6);
1526 }
1527 
1528 /*
1529  * Chips that support extended topology information require the driver to
1530  * reflect topology information in the opposite direction.  This is
1531  * because the TA has already exceeded its link record limit and if the
1532  * TA holds bi-directional information, the driver would have to do
1533  * multiple fetches instead of just two.
1534  */
1535 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1536 					struct psp_xgmi_node_info node_info)
1537 {
1538 	struct amdgpu_device *mirror_adev;
1539 	struct amdgpu_hive_info *hive;
1540 	uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1541 	uint64_t dst_node_id = node_info.node_id;
1542 	uint8_t dst_num_hops = node_info.num_hops;
1543 	uint8_t dst_num_links = node_info.num_links;
1544 
1545 	hive = amdgpu_get_xgmi_hive(psp->adev);
1546 	if (WARN_ON(!hive))
1547 		return;
1548 
1549 	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1550 		struct psp_xgmi_topology_info *mirror_top_info;
1551 		int j;
1552 
1553 		if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1554 			continue;
1555 
1556 		mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1557 		for (j = 0; j < mirror_top_info->num_nodes; j++) {
1558 			if (mirror_top_info->nodes[j].node_id != src_node_id)
1559 				continue;
1560 
1561 			mirror_top_info->nodes[j].num_hops = dst_num_hops;
1562 			/*
1563 			 * prevent 0 num_links value re-reflection since reflection
1564 			 * criteria is based on num_hops (direct or indirect).
1565 			 *
1566 			 */
1567 			if (dst_num_links)
1568 				mirror_top_info->nodes[j].num_links = dst_num_links;
1569 
1570 			break;
1571 		}
1572 
1573 		break;
1574 	}
1575 
1576 	amdgpu_put_xgmi_hive(hive);
1577 }
1578 
1579 int psp_xgmi_get_topology_info(struct psp_context *psp,
1580 			       int number_devices,
1581 			       struct psp_xgmi_topology_info *topology,
1582 			       bool get_extended_data)
1583 {
1584 	struct ta_xgmi_shared_memory *xgmi_cmd;
1585 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1586 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1587 	int i;
1588 	int ret;
1589 
1590 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1591 		return -EINVAL;
1592 
1593 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1594 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1595 	xgmi_cmd->flag_extend_link_record = get_extended_data;
1596 
1597 	/* Fill in the shared memory with topology information as input */
1598 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1599 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1600 	topology_info_input->num_nodes = number_devices;
1601 
1602 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1603 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1604 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1605 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1606 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1607 	}
1608 
1609 	/* Invoke xgmi ta to get the topology information */
1610 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1611 	if (ret)
1612 		return ret;
1613 
1614 	/* Read the output topology information from the shared memory */
1615 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1616 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1617 	for (i = 0; i < topology->num_nodes; i++) {
1618 		/* extended data will either be 0 or equal to non-extended data */
1619 		if (topology_info_output->nodes[i].num_hops)
1620 			topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1621 
1622 		/* non-extended data gets everything here so no need to update */
1623 		if (!get_extended_data) {
1624 			topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1625 			topology->nodes[i].is_sharing_enabled =
1626 					topology_info_output->nodes[i].is_sharing_enabled;
1627 			topology->nodes[i].sdma_engine =
1628 					topology_info_output->nodes[i].sdma_engine;
1629 		}
1630 
1631 	}
1632 
1633 	/* Invoke xgmi ta again to get the link information */
1634 	if (psp_xgmi_peer_link_info_supported(psp)) {
1635 		struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1636 		struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1637 		bool requires_reflection =
1638 			(psp->xgmi_context.supports_extended_data &&
1639 			 get_extended_data) ||
1640 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1641 				IP_VERSION(13, 0, 6) ||
1642 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1643 				IP_VERSION(13, 0, 14);
1644 		bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1645 				psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1646 
1647 		/* popluate the shared output buffer rather than the cmd input buffer
1648 		 * with node_ids as the input for GET_PEER_LINKS command execution.
1649 		 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1650 		 * The same requirement for GET_EXTEND_PEER_LINKS command.
1651 		 */
1652 		if (ta_port_num_support) {
1653 			link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1654 
1655 			for (i = 0; i < topology->num_nodes; i++)
1656 				link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1657 
1658 			link_extend_info_output->num_nodes = topology->num_nodes;
1659 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1660 		} else {
1661 			link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1662 
1663 			for (i = 0; i < topology->num_nodes; i++)
1664 				link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1665 
1666 			link_info_output->num_nodes = topology->num_nodes;
1667 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1668 		}
1669 
1670 		ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1671 		if (ret)
1672 			return ret;
1673 
1674 		for (i = 0; i < topology->num_nodes; i++) {
1675 			uint8_t node_num_links = ta_port_num_support ?
1676 				link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1677 			/* accumulate num_links on extended data */
1678 			if (get_extended_data) {
1679 				topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1680 			} else {
1681 				topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1682 								topology->nodes[i].num_links : node_num_links;
1683 			}
1684 			/* popluate the connected port num info if supported and available */
1685 			if (ta_port_num_support && topology->nodes[i].num_links) {
1686 				memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1687 				       sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1688 			}
1689 
1690 			/* reflect the topology information for bi-directionality */
1691 			if (requires_reflection && topology->nodes[i].num_hops)
1692 				psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1693 		}
1694 	}
1695 
1696 	return 0;
1697 }
1698 
1699 int psp_xgmi_set_topology_info(struct psp_context *psp,
1700 			       int number_devices,
1701 			       struct psp_xgmi_topology_info *topology)
1702 {
1703 	struct ta_xgmi_shared_memory *xgmi_cmd;
1704 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1705 	int i;
1706 
1707 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1708 		return -EINVAL;
1709 
1710 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1711 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1712 
1713 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1714 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1715 	topology_info_input->num_nodes = number_devices;
1716 
1717 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1718 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1719 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1720 		topology_info_input->nodes[i].is_sharing_enabled = 1;
1721 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1722 	}
1723 
1724 	/* Invoke xgmi ta to set topology information */
1725 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1726 }
1727 
1728 // ras begin
1729 static void psp_ras_ta_check_status(struct psp_context *psp)
1730 {
1731 	struct ta_ras_shared_memory *ras_cmd =
1732 		(struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1733 
1734 	switch (ras_cmd->ras_status) {
1735 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1736 		dev_warn(psp->adev->dev,
1737 			 "RAS WARNING: cmd failed due to unsupported ip\n");
1738 		break;
1739 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1740 		dev_warn(psp->adev->dev,
1741 			 "RAS WARNING: cmd failed due to unsupported error injection\n");
1742 		break;
1743 	case TA_RAS_STATUS__SUCCESS:
1744 		break;
1745 	case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1746 		if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1747 			dev_warn(psp->adev->dev,
1748 				 "RAS WARNING: Inject error to critical region is not allowed\n");
1749 		break;
1750 	default:
1751 		dev_warn(psp->adev->dev,
1752 			 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1753 		break;
1754 	}
1755 }
1756 
1757 static int psp_ras_send_cmd(struct psp_context *psp,
1758 		enum ras_command cmd_id, void *in, void *out)
1759 {
1760 	struct ta_ras_shared_memory *ras_cmd;
1761 	uint32_t cmd = cmd_id;
1762 	int ret = 0;
1763 
1764 	if (!in)
1765 		return -EINVAL;
1766 
1767 	mutex_lock(&psp->ras_context.mutex);
1768 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1769 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1770 
1771 	switch (cmd) {
1772 	case TA_RAS_COMMAND__ENABLE_FEATURES:
1773 	case TA_RAS_COMMAND__DISABLE_FEATURES:
1774 		memcpy(&ras_cmd->ras_in_message,
1775 			in, sizeof(ras_cmd->ras_in_message));
1776 		break;
1777 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1778 		memcpy(&ras_cmd->ras_in_message.trigger_error,
1779 			in, sizeof(ras_cmd->ras_in_message.trigger_error));
1780 		break;
1781 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1782 		memcpy(&ras_cmd->ras_in_message.address,
1783 			in, sizeof(ras_cmd->ras_in_message.address));
1784 		break;
1785 	default:
1786 		dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1787 		ret = -EINVAL;
1788 		goto err_out;
1789 	}
1790 
1791 	ras_cmd->cmd_id = cmd;
1792 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1793 
1794 	switch (cmd) {
1795 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1796 		if (!ret && out)
1797 			memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1798 		break;
1799 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1800 		if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1801 			ret = -EINVAL;
1802 		else if (out)
1803 			memcpy(out,
1804 				&ras_cmd->ras_out_message.address,
1805 				sizeof(ras_cmd->ras_out_message.address));
1806 		break;
1807 	default:
1808 		break;
1809 	}
1810 
1811 err_out:
1812 	mutex_unlock(&psp->ras_context.mutex);
1813 
1814 	return ret;
1815 }
1816 
1817 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1818 {
1819 	struct ta_ras_shared_memory *ras_cmd;
1820 	int ret;
1821 
1822 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1823 
1824 	/*
1825 	 * TODO: bypass the loading in sriov for now
1826 	 */
1827 	if (amdgpu_sriov_vf(psp->adev))
1828 		return 0;
1829 
1830 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1831 
1832 	if (amdgpu_ras_intr_triggered())
1833 		return ret;
1834 
1835 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1836 		dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1837 		return -EINVAL;
1838 	}
1839 
1840 	if (!ret) {
1841 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1842 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1843 
1844 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1845 		} else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1846 			dev_warn(psp->adev->dev,
1847 				 "RAS internal register access blocked\n");
1848 
1849 		psp_ras_ta_check_status(psp);
1850 	}
1851 
1852 	return ret;
1853 }
1854 
1855 int psp_ras_enable_features(struct psp_context *psp,
1856 		union ta_ras_cmd_input *info, bool enable)
1857 {
1858 	enum ras_command cmd_id;
1859 	int ret;
1860 
1861 	if (!psp->ras_context.context.initialized || !info)
1862 		return -EINVAL;
1863 
1864 	cmd_id = enable ?
1865 		TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1866 	ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
1867 	if (ret)
1868 		return -EINVAL;
1869 
1870 	return 0;
1871 }
1872 
1873 int psp_ras_terminate(struct psp_context *psp)
1874 {
1875 	int ret;
1876 
1877 	/*
1878 	 * TODO: bypass the terminate in sriov for now
1879 	 */
1880 	if (amdgpu_sriov_vf(psp->adev))
1881 		return 0;
1882 
1883 	if (!psp->ras_context.context.initialized)
1884 		return 0;
1885 
1886 	ret = psp_ta_unload(psp, &psp->ras_context.context);
1887 
1888 	psp->ras_context.context.initialized = false;
1889 
1890 	mutex_destroy(&psp->ras_context.mutex);
1891 
1892 	return ret;
1893 }
1894 
1895 int psp_ras_initialize(struct psp_context *psp)
1896 {
1897 	int ret;
1898 	uint32_t boot_cfg = 0xFF;
1899 	struct amdgpu_device *adev = psp->adev;
1900 	struct ta_ras_shared_memory *ras_cmd;
1901 
1902 	/*
1903 	 * TODO: bypass the initialize in sriov for now
1904 	 */
1905 	if (amdgpu_sriov_vf(adev))
1906 		return 0;
1907 
1908 	if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1909 	    !adev->psp.ras_context.context.bin_desc.start_addr) {
1910 		dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1911 		return 0;
1912 	}
1913 
1914 	if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1915 		/* query GECC enablement status from boot config
1916 		 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1917 		 */
1918 		ret = psp_boot_config_get(adev, &boot_cfg);
1919 		if (ret)
1920 			dev_warn(adev->dev, "PSP get boot config failed\n");
1921 
1922 		if (boot_cfg == 1 && !adev->ras_default_ecc_enabled &&
1923 		    amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1924 			dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n");
1925 			dev_warn(adev->dev,
1926 				"To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n");
1927 		} else {
1928 			if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) &&
1929 				amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1930 				if (boot_cfg == 1) {
1931 					dev_info(adev->dev, "GECC is enabled\n");
1932 				} else {
1933 					/* enable GECC in next boot cycle if it is disabled
1934 					 * in boot config, or force enable GECC if failed to
1935 					 * get boot configuration
1936 					 */
1937 					ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1938 					if (ret)
1939 						dev_warn(adev->dev, "PSP set boot config failed\n");
1940 					else
1941 						dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1942 				}
1943 			} else {
1944 				if (!boot_cfg) {
1945 					if (!adev->ras_default_ecc_enabled &&
1946 					    amdgpu_ras_enable != 1 &&
1947 					    amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
1948 						dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n");
1949 					else
1950 						dev_info(adev->dev, "GECC is disabled\n");
1951 				} else {
1952 					/* disable GECC in next boot cycle if ras is
1953 					 * disabled by module parameter amdgpu_ras_enable
1954 					 * and/or amdgpu_ras_mask, or boot_config_get call
1955 					 * is failed
1956 					 */
1957 					ret = psp_boot_config_set(adev, 0);
1958 					if (ret)
1959 						dev_warn(adev->dev, "PSP set boot config failed\n");
1960 					else
1961 						dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1962 				}
1963 			}
1964 		}
1965 	}
1966 
1967 	psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1968 	psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1969 
1970 	if (!psp->ras_context.context.mem_context.shared_buf) {
1971 		ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1972 		if (ret)
1973 			return ret;
1974 	}
1975 
1976 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1977 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1978 
1979 	if (amdgpu_ras_is_poison_mode_supported(adev))
1980 		ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1981 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1982 		ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1983 	ras_cmd->ras_in_message.init_flags.xcc_mask =
1984 		adev->gfx.xcc_mask;
1985 	ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1986 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
1987 		ras_cmd->ras_in_message.init_flags.nps_mode =
1988 			adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1989 	ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask;
1990 
1991 	ret = psp_ta_load(psp, &psp->ras_context.context);
1992 
1993 	if (!ret && !ras_cmd->ras_status) {
1994 		psp->ras_context.context.initialized = true;
1995 		mutex_init(&psp->ras_context.mutex);
1996 	} else {
1997 		if (ras_cmd->ras_status)
1998 			dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1999 
2000 		/* fail to load RAS TA */
2001 		psp->ras_context.context.initialized = false;
2002 	}
2003 
2004 	return ret;
2005 }
2006 
2007 int psp_ras_trigger_error(struct psp_context *psp,
2008 			  struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
2009 {
2010 	struct amdgpu_device *adev = psp->adev;
2011 	int ret;
2012 	uint32_t dev_mask;
2013 	uint32_t ras_status = 0;
2014 
2015 	if (!psp->ras_context.context.initialized || !info)
2016 		return -EINVAL;
2017 
2018 	switch (info->block_id) {
2019 	case TA_RAS_BLOCK__GFX:
2020 		dev_mask = GET_MASK(GC, instance_mask);
2021 		break;
2022 	case TA_RAS_BLOCK__SDMA:
2023 		dev_mask = GET_MASK(SDMA0, instance_mask);
2024 		break;
2025 	case TA_RAS_BLOCK__VCN:
2026 	case TA_RAS_BLOCK__JPEG:
2027 		dev_mask = GET_MASK(VCN, instance_mask);
2028 		break;
2029 	default:
2030 		dev_mask = instance_mask;
2031 		break;
2032 	}
2033 
2034 	/* reuse sub_block_index for backward compatibility */
2035 	dev_mask <<= AMDGPU_RAS_INST_SHIFT;
2036 	dev_mask &= AMDGPU_RAS_INST_MASK;
2037 	info->sub_block_index |= dev_mask;
2038 
2039 	ret = psp_ras_send_cmd(psp,
2040 			TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
2041 	if (ret)
2042 		return -EINVAL;
2043 
2044 	/* If err_event_athub occurs error inject was successful, however
2045 	 *  return status from TA is no long reliable
2046 	 */
2047 	if (amdgpu_ras_intr_triggered())
2048 		return 0;
2049 
2050 	if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
2051 		return -EACCES;
2052 	else if (ras_status)
2053 		return -EINVAL;
2054 
2055 	return 0;
2056 }
2057 
2058 int psp_ras_query_address(struct psp_context *psp,
2059 			  struct ta_ras_query_address_input *addr_in,
2060 			  struct ta_ras_query_address_output *addr_out)
2061 {
2062 	int ret;
2063 
2064 	if (!psp->ras_context.context.initialized ||
2065 		!addr_in || !addr_out)
2066 		return -EINVAL;
2067 
2068 	ret = psp_ras_send_cmd(psp,
2069 			TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
2070 
2071 	return ret;
2072 }
2073 // ras end
2074 
2075 // HDCP start
2076 static int psp_hdcp_initialize(struct psp_context *psp)
2077 {
2078 	int ret;
2079 
2080 	/*
2081 	 * TODO: bypass the initialize in sriov for now
2082 	 */
2083 	if (amdgpu_sriov_vf(psp->adev))
2084 		return 0;
2085 
2086 	/* bypass hdcp initialization if dmu is harvested */
2087 	if (!amdgpu_device_has_display_hardware(psp->adev))
2088 		return 0;
2089 
2090 	if (!psp->hdcp_context.context.bin_desc.size_bytes ||
2091 	    !psp->hdcp_context.context.bin_desc.start_addr) {
2092 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
2093 		return 0;
2094 	}
2095 
2096 	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
2097 	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2098 
2099 	if (!psp->hdcp_context.context.mem_context.shared_buf) {
2100 		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
2101 		if (ret)
2102 			return ret;
2103 	}
2104 
2105 	ret = psp_ta_load(psp, &psp->hdcp_context.context);
2106 	if (!ret) {
2107 		psp->hdcp_context.context.initialized = true;
2108 		mutex_init(&psp->hdcp_context.mutex);
2109 	}
2110 
2111 	return ret;
2112 }
2113 
2114 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2115 {
2116 	/*
2117 	 * TODO: bypass the loading in sriov for now
2118 	 */
2119 	if (amdgpu_sriov_vf(psp->adev))
2120 		return 0;
2121 
2122 	if (!psp->hdcp_context.context.initialized)
2123 		return 0;
2124 
2125 	return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
2126 }
2127 
2128 static int psp_hdcp_terminate(struct psp_context *psp)
2129 {
2130 	int ret;
2131 
2132 	/*
2133 	 * TODO: bypass the terminate in sriov for now
2134 	 */
2135 	if (amdgpu_sriov_vf(psp->adev))
2136 		return 0;
2137 
2138 	if (!psp->hdcp_context.context.initialized)
2139 		return 0;
2140 
2141 	ret = psp_ta_unload(psp, &psp->hdcp_context.context);
2142 
2143 	psp->hdcp_context.context.initialized = false;
2144 
2145 	return ret;
2146 }
2147 // HDCP end
2148 
2149 // DTM start
2150 static int psp_dtm_initialize(struct psp_context *psp)
2151 {
2152 	int ret;
2153 
2154 	/*
2155 	 * TODO: bypass the initialize in sriov for now
2156 	 */
2157 	if (amdgpu_sriov_vf(psp->adev))
2158 		return 0;
2159 
2160 	/* bypass dtm initialization if dmu is harvested */
2161 	if (!amdgpu_device_has_display_hardware(psp->adev))
2162 		return 0;
2163 
2164 	if (!psp->dtm_context.context.bin_desc.size_bytes ||
2165 	    !psp->dtm_context.context.bin_desc.start_addr) {
2166 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
2167 		return 0;
2168 	}
2169 
2170 	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
2171 	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2172 
2173 	if (!psp->dtm_context.context.mem_context.shared_buf) {
2174 		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
2175 		if (ret)
2176 			return ret;
2177 	}
2178 
2179 	ret = psp_ta_load(psp, &psp->dtm_context.context);
2180 	if (!ret) {
2181 		psp->dtm_context.context.initialized = true;
2182 		mutex_init(&psp->dtm_context.mutex);
2183 	}
2184 
2185 	return ret;
2186 }
2187 
2188 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2189 {
2190 	/*
2191 	 * TODO: bypass the loading in sriov for now
2192 	 */
2193 	if (amdgpu_sriov_vf(psp->adev))
2194 		return 0;
2195 
2196 	if (!psp->dtm_context.context.initialized)
2197 		return 0;
2198 
2199 	return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
2200 }
2201 
2202 static int psp_dtm_terminate(struct psp_context *psp)
2203 {
2204 	int ret;
2205 
2206 	/*
2207 	 * TODO: bypass the terminate in sriov for now
2208 	 */
2209 	if (amdgpu_sriov_vf(psp->adev))
2210 		return 0;
2211 
2212 	if (!psp->dtm_context.context.initialized)
2213 		return 0;
2214 
2215 	ret = psp_ta_unload(psp, &psp->dtm_context.context);
2216 
2217 	psp->dtm_context.context.initialized = false;
2218 
2219 	return ret;
2220 }
2221 // DTM end
2222 
2223 // RAP start
2224 static int psp_rap_initialize(struct psp_context *psp)
2225 {
2226 	int ret;
2227 	enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2228 
2229 	/*
2230 	 * TODO: bypass the initialize in sriov for now
2231 	 */
2232 	if (amdgpu_sriov_vf(psp->adev))
2233 		return 0;
2234 
2235 	if (!psp->rap_context.context.bin_desc.size_bytes ||
2236 	    !psp->rap_context.context.bin_desc.start_addr) {
2237 		dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2238 		return 0;
2239 	}
2240 
2241 	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2242 	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2243 
2244 	if (!psp->rap_context.context.mem_context.shared_buf) {
2245 		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2246 		if (ret)
2247 			return ret;
2248 	}
2249 
2250 	ret = psp_ta_load(psp, &psp->rap_context.context);
2251 	if (!ret) {
2252 		psp->rap_context.context.initialized = true;
2253 		mutex_init(&psp->rap_context.mutex);
2254 	} else
2255 		return ret;
2256 
2257 	ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2258 	if (ret || status != TA_RAP_STATUS__SUCCESS) {
2259 		psp_rap_terminate(psp);
2260 		/* free rap shared memory */
2261 		psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2262 
2263 		dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2264 			 ret, status);
2265 
2266 		return ret;
2267 	}
2268 
2269 	return 0;
2270 }
2271 
2272 static int psp_rap_terminate(struct psp_context *psp)
2273 {
2274 	int ret;
2275 
2276 	if (!psp->rap_context.context.initialized)
2277 		return 0;
2278 
2279 	ret = psp_ta_unload(psp, &psp->rap_context.context);
2280 
2281 	psp->rap_context.context.initialized = false;
2282 
2283 	return ret;
2284 }
2285 
2286 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2287 {
2288 	struct ta_rap_shared_memory *rap_cmd;
2289 	int ret = 0;
2290 
2291 	if (!psp->rap_context.context.initialized)
2292 		return 0;
2293 
2294 	if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2295 	    ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2296 		return -EINVAL;
2297 
2298 	mutex_lock(&psp->rap_context.mutex);
2299 
2300 	rap_cmd = (struct ta_rap_shared_memory *)
2301 		  psp->rap_context.context.mem_context.shared_buf;
2302 	memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2303 
2304 	rap_cmd->cmd_id = ta_cmd_id;
2305 	rap_cmd->validation_method_id = METHOD_A;
2306 
2307 	ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2308 	if (ret)
2309 		goto out_unlock;
2310 
2311 	if (status)
2312 		*status = rap_cmd->rap_status;
2313 
2314 out_unlock:
2315 	mutex_unlock(&psp->rap_context.mutex);
2316 
2317 	return ret;
2318 }
2319 // RAP end
2320 
2321 /* securedisplay start */
2322 static int psp_securedisplay_initialize(struct psp_context *psp)
2323 {
2324 	int ret;
2325 	struct ta_securedisplay_cmd *securedisplay_cmd;
2326 
2327 	/*
2328 	 * TODO: bypass the initialize in sriov for now
2329 	 */
2330 	if (amdgpu_sriov_vf(psp->adev))
2331 		return 0;
2332 
2333 	/* bypass securedisplay initialization if dmu is harvested */
2334 	if (!amdgpu_device_has_display_hardware(psp->adev))
2335 		return 0;
2336 
2337 	if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2338 	    !psp->securedisplay_context.context.bin_desc.start_addr) {
2339 		dev_info(psp->adev->dev,
2340 			 "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
2341 		return 0;
2342 	}
2343 
2344 	psp->securedisplay_context.context.mem_context.shared_mem_size =
2345 		PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2346 	psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2347 
2348 	if (!psp->securedisplay_context.context.initialized) {
2349 		ret = psp_ta_init_shared_buf(psp,
2350 					     &psp->securedisplay_context.context.mem_context);
2351 		if (ret)
2352 			return ret;
2353 	}
2354 
2355 	ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2356 	if (!ret) {
2357 		psp->securedisplay_context.context.initialized = true;
2358 		mutex_init(&psp->securedisplay_context.mutex);
2359 	} else
2360 		return ret;
2361 
2362 	mutex_lock(&psp->securedisplay_context.mutex);
2363 
2364 	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2365 			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2366 
2367 	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2368 
2369 	mutex_unlock(&psp->securedisplay_context.mutex);
2370 
2371 	if (ret) {
2372 		psp_securedisplay_terminate(psp);
2373 		/* free securedisplay shared memory */
2374 		psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2375 		dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2376 		return -EINVAL;
2377 	}
2378 
2379 	if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2380 		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2381 		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2382 			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2383 		/* don't try again */
2384 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2385 	}
2386 
2387 	return 0;
2388 }
2389 
2390 static int psp_securedisplay_terminate(struct psp_context *psp)
2391 {
2392 	int ret;
2393 
2394 	/*
2395 	 * TODO:bypass the terminate in sriov for now
2396 	 */
2397 	if (amdgpu_sriov_vf(psp->adev))
2398 		return 0;
2399 
2400 	if (!psp->securedisplay_context.context.initialized)
2401 		return 0;
2402 
2403 	ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2404 
2405 	psp->securedisplay_context.context.initialized = false;
2406 
2407 	return ret;
2408 }
2409 
2410 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2411 {
2412 	int ret;
2413 
2414 	if (!psp->securedisplay_context.context.initialized)
2415 		return -EINVAL;
2416 
2417 	if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2418 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
2419 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
2420 		return -EINVAL;
2421 
2422 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2423 
2424 	return ret;
2425 }
2426 /* SECUREDISPLAY end */
2427 
2428 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2429 {
2430 	struct psp_context *psp = &adev->psp;
2431 	int ret = 0;
2432 
2433 	if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2434 		ret = psp->funcs->wait_for_bootloader(psp);
2435 
2436 	return ret;
2437 }
2438 
2439 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2440 {
2441 	if (psp->funcs &&
2442 	    psp->funcs->get_ras_capability) {
2443 		return psp->funcs->get_ras_capability(psp);
2444 	} else {
2445 		return false;
2446 	}
2447 }
2448 
2449 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
2450 {
2451 	struct psp_context *psp = &adev->psp;
2452 
2453 	if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
2454 		return false;
2455 
2456 	if (psp->funcs && psp->funcs->is_reload_needed)
2457 		return psp->funcs->is_reload_needed(psp);
2458 
2459 	return false;
2460 }
2461 
2462 static void psp_update_gpu_addresses(struct amdgpu_device *adev)
2463 {
2464 	struct psp_context *psp = &adev->psp;
2465 
2466 	if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
2467 		psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
2468 		psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
2469 		psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
2470 	}
2471 	if (adev->firmware.rbuf && psp->km_ring.ring_mem)
2472 		psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
2473 }
2474 
2475 static int psp_hw_start(struct psp_context *psp)
2476 {
2477 	struct amdgpu_device *adev = psp->adev;
2478 	int ret;
2479 
2480 	if (amdgpu_virt_xgmi_migrate_enabled(adev))
2481 		psp_update_gpu_addresses(adev);
2482 
2483 	if (!amdgpu_sriov_vf(adev)) {
2484 		if ((is_psp_fw_valid(psp->kdb)) &&
2485 		    (psp->funcs->bootloader_load_kdb != NULL)) {
2486 			ret = psp_bootloader_load_kdb(psp);
2487 			if (ret) {
2488 				dev_err(adev->dev, "PSP load kdb failed!\n");
2489 				return ret;
2490 			}
2491 		}
2492 
2493 		if ((is_psp_fw_valid(psp->spl)) &&
2494 		    (psp->funcs->bootloader_load_spl != NULL)) {
2495 			ret = psp_bootloader_load_spl(psp);
2496 			if (ret) {
2497 				dev_err(adev->dev, "PSP load spl failed!\n");
2498 				return ret;
2499 			}
2500 		}
2501 
2502 		if ((is_psp_fw_valid(psp->sys)) &&
2503 		    (psp->funcs->bootloader_load_sysdrv != NULL)) {
2504 			ret = psp_bootloader_load_sysdrv(psp);
2505 			if (ret) {
2506 				dev_err(adev->dev, "PSP load sys drv failed!\n");
2507 				return ret;
2508 			}
2509 		}
2510 
2511 		if ((is_psp_fw_valid(psp->soc_drv)) &&
2512 		    (psp->funcs->bootloader_load_soc_drv != NULL)) {
2513 			ret = psp_bootloader_load_soc_drv(psp);
2514 			if (ret) {
2515 				dev_err(adev->dev, "PSP load soc drv failed!\n");
2516 				return ret;
2517 			}
2518 		}
2519 
2520 		if ((is_psp_fw_valid(psp->intf_drv)) &&
2521 		    (psp->funcs->bootloader_load_intf_drv != NULL)) {
2522 			ret = psp_bootloader_load_intf_drv(psp);
2523 			if (ret) {
2524 				dev_err(adev->dev, "PSP load intf drv failed!\n");
2525 				return ret;
2526 			}
2527 		}
2528 
2529 		if ((is_psp_fw_valid(psp->dbg_drv)) &&
2530 		    (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2531 			ret = psp_bootloader_load_dbg_drv(psp);
2532 			if (ret) {
2533 				dev_err(adev->dev, "PSP load dbg drv failed!\n");
2534 				return ret;
2535 			}
2536 		}
2537 
2538 		if ((is_psp_fw_valid(psp->ras_drv)) &&
2539 		    (psp->funcs->bootloader_load_ras_drv != NULL)) {
2540 			ret = psp_bootloader_load_ras_drv(psp);
2541 			if (ret) {
2542 				dev_err(adev->dev, "PSP load ras_drv failed!\n");
2543 				return ret;
2544 			}
2545 		}
2546 
2547 		if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2548 		    (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2549 			ret = psp_bootloader_load_ipkeymgr_drv(psp);
2550 			if (ret) {
2551 				dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2552 				return ret;
2553 			}
2554 		}
2555 
2556 		if ((is_psp_fw_valid(psp->spdm_drv)) &&
2557 		    (psp->funcs->bootloader_load_spdm_drv != NULL)) {
2558 			ret = psp_bootloader_load_spdm_drv(psp);
2559 			if (ret) {
2560 				dev_err(adev->dev, "PSP load spdm_drv failed!\n");
2561 				return ret;
2562 			}
2563 		}
2564 
2565 		if ((is_psp_fw_valid(psp->sos)) &&
2566 		    (psp->funcs->bootloader_load_sos != NULL)) {
2567 			ret = psp_bootloader_load_sos(psp);
2568 			if (ret) {
2569 				dev_err(adev->dev, "PSP load sos failed!\n");
2570 				return ret;
2571 			}
2572 		}
2573 	}
2574 
2575 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2576 	if (ret) {
2577 		dev_err(adev->dev, "PSP create ring failed!\n");
2578 		return ret;
2579 	}
2580 
2581 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2582 		ret = psp_update_fw_reservation(psp);
2583 		if (ret) {
2584 			dev_err(adev->dev, "update fw reservation failed!\n");
2585 			return ret;
2586 		}
2587 	}
2588 
2589 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2590 		goto skip_pin_bo;
2591 
2592 	if (!psp->boot_time_tmr || psp->autoload_supported) {
2593 		ret = psp_tmr_init(psp);
2594 		if (ret) {
2595 			dev_err(adev->dev, "PSP tmr init failed!\n");
2596 			return ret;
2597 		}
2598 	}
2599 
2600 skip_pin_bo:
2601 	/*
2602 	 * For ASICs with DF Cstate management centralized
2603 	 * to PMFW, TMR setup should be performed after PMFW
2604 	 * loaded and before other non-psp firmware loaded.
2605 	 */
2606 	if (psp->pmfw_centralized_cstate_management) {
2607 		ret = psp_load_smu_fw(psp);
2608 		if (ret)
2609 			return ret;
2610 	}
2611 
2612 	if (!psp->boot_time_tmr || !psp->autoload_supported) {
2613 		ret = psp_tmr_load(psp);
2614 		if (ret) {
2615 			dev_err(adev->dev, "PSP load tmr failed!\n");
2616 			return ret;
2617 		}
2618 	}
2619 
2620 	return 0;
2621 }
2622 
2623 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2624 			   enum psp_gfx_fw_type *type)
2625 {
2626 	switch (ucode->ucode_id) {
2627 	case AMDGPU_UCODE_ID_CAP:
2628 		*type = GFX_FW_TYPE_CAP;
2629 		break;
2630 	case AMDGPU_UCODE_ID_SDMA0:
2631 		*type = GFX_FW_TYPE_SDMA0;
2632 		break;
2633 	case AMDGPU_UCODE_ID_SDMA1:
2634 		*type = GFX_FW_TYPE_SDMA1;
2635 		break;
2636 	case AMDGPU_UCODE_ID_SDMA2:
2637 		*type = GFX_FW_TYPE_SDMA2;
2638 		break;
2639 	case AMDGPU_UCODE_ID_SDMA3:
2640 		*type = GFX_FW_TYPE_SDMA3;
2641 		break;
2642 	case AMDGPU_UCODE_ID_SDMA4:
2643 		*type = GFX_FW_TYPE_SDMA4;
2644 		break;
2645 	case AMDGPU_UCODE_ID_SDMA5:
2646 		*type = GFX_FW_TYPE_SDMA5;
2647 		break;
2648 	case AMDGPU_UCODE_ID_SDMA6:
2649 		*type = GFX_FW_TYPE_SDMA6;
2650 		break;
2651 	case AMDGPU_UCODE_ID_SDMA7:
2652 		*type = GFX_FW_TYPE_SDMA7;
2653 		break;
2654 	case AMDGPU_UCODE_ID_CP_MES:
2655 		*type = GFX_FW_TYPE_CP_MES;
2656 		break;
2657 	case AMDGPU_UCODE_ID_CP_MES_DATA:
2658 		*type = GFX_FW_TYPE_MES_STACK;
2659 		break;
2660 	case AMDGPU_UCODE_ID_CP_MES1:
2661 		*type = GFX_FW_TYPE_CP_MES_KIQ;
2662 		break;
2663 	case AMDGPU_UCODE_ID_CP_MES1_DATA:
2664 		*type = GFX_FW_TYPE_MES_KIQ_STACK;
2665 		break;
2666 	case AMDGPU_UCODE_ID_CP_CE:
2667 		*type = GFX_FW_TYPE_CP_CE;
2668 		break;
2669 	case AMDGPU_UCODE_ID_CP_PFP:
2670 		*type = GFX_FW_TYPE_CP_PFP;
2671 		break;
2672 	case AMDGPU_UCODE_ID_CP_ME:
2673 		*type = GFX_FW_TYPE_CP_ME;
2674 		break;
2675 	case AMDGPU_UCODE_ID_CP_MEC1:
2676 		*type = GFX_FW_TYPE_CP_MEC;
2677 		break;
2678 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
2679 		*type = GFX_FW_TYPE_CP_MEC_ME1;
2680 		break;
2681 	case AMDGPU_UCODE_ID_CP_MEC2:
2682 		*type = GFX_FW_TYPE_CP_MEC;
2683 		break;
2684 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
2685 		*type = GFX_FW_TYPE_CP_MEC_ME2;
2686 		break;
2687 	case AMDGPU_UCODE_ID_RLC_P:
2688 		*type = GFX_FW_TYPE_RLC_P;
2689 		break;
2690 	case AMDGPU_UCODE_ID_RLC_V:
2691 		*type = GFX_FW_TYPE_RLC_V;
2692 		break;
2693 	case AMDGPU_UCODE_ID_RLC_G:
2694 		*type = GFX_FW_TYPE_RLC_G;
2695 		break;
2696 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2697 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2698 		break;
2699 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2700 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2701 		break;
2702 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2703 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2704 		break;
2705 	case AMDGPU_UCODE_ID_RLC_IRAM:
2706 		*type = GFX_FW_TYPE_RLC_IRAM;
2707 		break;
2708 	case AMDGPU_UCODE_ID_RLC_DRAM:
2709 		*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2710 		break;
2711 	case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2712 		*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2713 		break;
2714 	case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2715 		*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2716 		break;
2717 	case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2718 		*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2719 		break;
2720 	case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2721 		*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2722 		break;
2723 	case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2724 		*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2725 		break;
2726 	case AMDGPU_UCODE_ID_SMC:
2727 		*type = GFX_FW_TYPE_SMU;
2728 		break;
2729 	case AMDGPU_UCODE_ID_PPTABLE:
2730 		*type = GFX_FW_TYPE_PPTABLE;
2731 		break;
2732 	case AMDGPU_UCODE_ID_UVD:
2733 		*type = GFX_FW_TYPE_UVD;
2734 		break;
2735 	case AMDGPU_UCODE_ID_UVD1:
2736 		*type = GFX_FW_TYPE_UVD1;
2737 		break;
2738 	case AMDGPU_UCODE_ID_VCE:
2739 		*type = GFX_FW_TYPE_VCE;
2740 		break;
2741 	case AMDGPU_UCODE_ID_VCN:
2742 		*type = GFX_FW_TYPE_VCN;
2743 		break;
2744 	case AMDGPU_UCODE_ID_VCN1:
2745 		*type = GFX_FW_TYPE_VCN1;
2746 		break;
2747 	case AMDGPU_UCODE_ID_DMCU_ERAM:
2748 		*type = GFX_FW_TYPE_DMCU_ERAM;
2749 		break;
2750 	case AMDGPU_UCODE_ID_DMCU_INTV:
2751 		*type = GFX_FW_TYPE_DMCU_ISR;
2752 		break;
2753 	case AMDGPU_UCODE_ID_VCN0_RAM:
2754 		*type = GFX_FW_TYPE_VCN0_RAM;
2755 		break;
2756 	case AMDGPU_UCODE_ID_VCN1_RAM:
2757 		*type = GFX_FW_TYPE_VCN1_RAM;
2758 		break;
2759 	case AMDGPU_UCODE_ID_DMCUB:
2760 		*type = GFX_FW_TYPE_DMUB;
2761 		break;
2762 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2763 	case AMDGPU_UCODE_ID_SDMA_RS64:
2764 		*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2765 		break;
2766 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2767 		*type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2768 		break;
2769 	case AMDGPU_UCODE_ID_IMU_I:
2770 		*type = GFX_FW_TYPE_IMU_I;
2771 		break;
2772 	case AMDGPU_UCODE_ID_IMU_D:
2773 		*type = GFX_FW_TYPE_IMU_D;
2774 		break;
2775 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
2776 		*type = GFX_FW_TYPE_RS64_PFP;
2777 		break;
2778 	case AMDGPU_UCODE_ID_CP_RS64_ME:
2779 		*type = GFX_FW_TYPE_RS64_ME;
2780 		break;
2781 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
2782 		*type = GFX_FW_TYPE_RS64_MEC;
2783 		break;
2784 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2785 		*type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2786 		break;
2787 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2788 		*type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2789 		break;
2790 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2791 		*type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2792 		break;
2793 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2794 		*type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2795 		break;
2796 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2797 		*type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2798 		break;
2799 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2800 		*type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2801 		break;
2802 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2803 		*type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2804 		break;
2805 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2806 		*type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2807 		break;
2808 	case AMDGPU_UCODE_ID_VPE_CTX:
2809 		*type = GFX_FW_TYPE_VPEC_FW1;
2810 		break;
2811 	case AMDGPU_UCODE_ID_VPE_CTL:
2812 		*type = GFX_FW_TYPE_VPEC_FW2;
2813 		break;
2814 	case AMDGPU_UCODE_ID_VPE:
2815 		*type = GFX_FW_TYPE_VPE;
2816 		break;
2817 	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2818 		*type = GFX_FW_TYPE_UMSCH_UCODE;
2819 		break;
2820 	case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2821 		*type = GFX_FW_TYPE_UMSCH_DATA;
2822 		break;
2823 	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2824 		*type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2825 		break;
2826 	case AMDGPU_UCODE_ID_P2S_TABLE:
2827 		*type = GFX_FW_TYPE_P2S_TABLE;
2828 		break;
2829 	case AMDGPU_UCODE_ID_JPEG_RAM:
2830 		*type = GFX_FW_TYPE_JPEG_RAM;
2831 		break;
2832 	case AMDGPU_UCODE_ID_ISP:
2833 		*type = GFX_FW_TYPE_ISP;
2834 		break;
2835 	case AMDGPU_UCODE_ID_MAXIMUM:
2836 	default:
2837 		return -EINVAL;
2838 	}
2839 
2840 	return 0;
2841 }
2842 
2843 static void psp_print_fw_hdr(struct psp_context *psp,
2844 			     struct amdgpu_firmware_info *ucode)
2845 {
2846 	struct amdgpu_device *adev = psp->adev;
2847 	struct common_firmware_header *hdr;
2848 
2849 	switch (ucode->ucode_id) {
2850 	case AMDGPU_UCODE_ID_SDMA0:
2851 	case AMDGPU_UCODE_ID_SDMA1:
2852 	case AMDGPU_UCODE_ID_SDMA2:
2853 	case AMDGPU_UCODE_ID_SDMA3:
2854 	case AMDGPU_UCODE_ID_SDMA4:
2855 	case AMDGPU_UCODE_ID_SDMA5:
2856 	case AMDGPU_UCODE_ID_SDMA6:
2857 	case AMDGPU_UCODE_ID_SDMA7:
2858 		hdr = (struct common_firmware_header *)
2859 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2860 		amdgpu_ucode_print_sdma_hdr(hdr);
2861 		break;
2862 	case AMDGPU_UCODE_ID_CP_CE:
2863 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2864 		amdgpu_ucode_print_gfx_hdr(hdr);
2865 		break;
2866 	case AMDGPU_UCODE_ID_CP_PFP:
2867 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2868 		amdgpu_ucode_print_gfx_hdr(hdr);
2869 		break;
2870 	case AMDGPU_UCODE_ID_CP_ME:
2871 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2872 		amdgpu_ucode_print_gfx_hdr(hdr);
2873 		break;
2874 	case AMDGPU_UCODE_ID_CP_MEC1:
2875 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2876 		amdgpu_ucode_print_gfx_hdr(hdr);
2877 		break;
2878 	case AMDGPU_UCODE_ID_RLC_G:
2879 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2880 		amdgpu_ucode_print_rlc_hdr(hdr);
2881 		break;
2882 	case AMDGPU_UCODE_ID_SMC:
2883 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
2884 		amdgpu_ucode_print_smc_hdr(hdr);
2885 		break;
2886 	default:
2887 		break;
2888 	}
2889 }
2890 
2891 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2892 				       struct amdgpu_firmware_info *ucode,
2893 				       struct psp_gfx_cmd_resp *cmd)
2894 {
2895 	int ret;
2896 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
2897 
2898 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2899 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2900 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2901 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2902 
2903 	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2904 	if (ret)
2905 		dev_err(psp->adev->dev, "Unknown firmware type\n");
2906 
2907 	return ret;
2908 }
2909 
2910 int psp_execute_ip_fw_load(struct psp_context *psp,
2911 			   struct amdgpu_firmware_info *ucode)
2912 {
2913 	int ret = 0;
2914 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2915 
2916 	ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2917 	if (!ret) {
2918 		ret = psp_cmd_submit_buf(psp, ucode, cmd,
2919 					 psp->fence_buf_mc_addr);
2920 	}
2921 
2922 	release_psp_cmd_buf(psp);
2923 
2924 	return ret;
2925 }
2926 
2927 static int psp_load_p2s_table(struct psp_context *psp)
2928 {
2929 	int ret;
2930 	struct amdgpu_device *adev = psp->adev;
2931 	struct amdgpu_firmware_info *ucode =
2932 		&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2933 
2934 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2935 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2936 		return 0;
2937 
2938 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2939 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2940 		uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2941 								0x0036003C;
2942 		if (psp->sos.fw_version < supp_vers)
2943 			return 0;
2944 	}
2945 
2946 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2947 		return 0;
2948 
2949 	ret = psp_execute_ip_fw_load(psp, ucode);
2950 
2951 	return ret;
2952 }
2953 
2954 static int psp_load_smu_fw(struct psp_context *psp)
2955 {
2956 	int ret;
2957 	struct amdgpu_device *adev = psp->adev;
2958 	struct amdgpu_firmware_info *ucode =
2959 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2960 	struct amdgpu_ras *ras = psp->ras_context.ras;
2961 
2962 	/*
2963 	 * Skip SMU FW reloading in case of using BACO for runpm only,
2964 	 * as SMU is always alive.
2965 	 */
2966 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2967 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2968 		return 0;
2969 
2970 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2971 		return 0;
2972 
2973 	if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2974 	     (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2975 	      amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2976 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2977 		if (ret)
2978 			dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2979 	}
2980 
2981 	ret = psp_execute_ip_fw_load(psp, ucode);
2982 
2983 	if (ret)
2984 		dev_err(adev->dev, "PSP load smu failed!\n");
2985 
2986 	return ret;
2987 }
2988 
2989 static bool fw_load_skip_check(struct psp_context *psp,
2990 			       struct amdgpu_firmware_info *ucode)
2991 {
2992 	if (!ucode->fw || !ucode->ucode_size)
2993 		return true;
2994 
2995 	if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2996 		return true;
2997 
2998 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2999 	    (psp_smu_reload_quirk(psp) ||
3000 	     psp->autoload_supported ||
3001 	     psp->pmfw_centralized_cstate_management))
3002 		return true;
3003 
3004 	if (amdgpu_sriov_vf(psp->adev) &&
3005 	    amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
3006 		return true;
3007 
3008 	if (psp->autoload_supported &&
3009 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
3010 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
3011 		/* skip mec JT when autoload is enabled */
3012 		return true;
3013 
3014 	return false;
3015 }
3016 
3017 int psp_load_fw_list(struct psp_context *psp,
3018 		     struct amdgpu_firmware_info **ucode_list, int ucode_count)
3019 {
3020 	int ret = 0, i;
3021 	struct amdgpu_firmware_info *ucode;
3022 
3023 	for (i = 0; i < ucode_count; ++i) {
3024 		ucode = ucode_list[i];
3025 		psp_print_fw_hdr(psp, ucode);
3026 		ret = psp_execute_ip_fw_load(psp, ucode);
3027 		if (ret)
3028 			return ret;
3029 	}
3030 	return ret;
3031 }
3032 
3033 static int psp_load_non_psp_fw(struct psp_context *psp)
3034 {
3035 	int i, ret;
3036 	struct amdgpu_firmware_info *ucode;
3037 	struct amdgpu_device *adev = psp->adev;
3038 
3039 	if (psp->autoload_supported &&
3040 	    !psp->pmfw_centralized_cstate_management) {
3041 		ret = psp_load_smu_fw(psp);
3042 		if (ret)
3043 			return ret;
3044 	}
3045 
3046 	/* Load P2S table first if it's available */
3047 	psp_load_p2s_table(psp);
3048 
3049 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
3050 		ucode = &adev->firmware.ucode[i];
3051 
3052 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
3053 		    !fw_load_skip_check(psp, ucode)) {
3054 			ret = psp_load_smu_fw(psp);
3055 			if (ret)
3056 				return ret;
3057 			continue;
3058 		}
3059 
3060 		if (fw_load_skip_check(psp, ucode))
3061 			continue;
3062 
3063 		if (psp->autoload_supported &&
3064 		    (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3065 			     IP_VERSION(11, 0, 7) ||
3066 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3067 			     IP_VERSION(11, 0, 11) ||
3068 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3069 			     IP_VERSION(11, 0, 12)) &&
3070 		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
3071 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
3072 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
3073 			/* PSP only receive one SDMA fw for sienna_cichlid,
3074 			 * as all four sdma fw are same
3075 			 */
3076 			continue;
3077 
3078 		psp_print_fw_hdr(psp, ucode);
3079 
3080 		ret = psp_execute_ip_fw_load(psp, ucode);
3081 		if (ret)
3082 			return ret;
3083 
3084 		/* Start rlc autoload after psp received all the gfx firmware */
3085 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
3086 		    adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
3087 			ret = psp_rlc_autoload_start(psp);
3088 			if (ret) {
3089 				dev_err(adev->dev, "Failed to start rlc autoload\n");
3090 				return ret;
3091 			}
3092 		}
3093 	}
3094 
3095 	return 0;
3096 }
3097 
3098 static int psp_load_fw(struct amdgpu_device *adev)
3099 {
3100 	int ret;
3101 	struct psp_context *psp = &adev->psp;
3102 
3103 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3104 		/* should not destroy ring, only stop */
3105 		psp_ring_stop(psp, PSP_RING_TYPE__KM);
3106 	} else {
3107 		memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
3108 
3109 		ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
3110 		if (ret) {
3111 			dev_err(adev->dev, "PSP ring init failed!\n");
3112 			goto failed;
3113 		}
3114 	}
3115 
3116 	ret = psp_hw_start(psp);
3117 	if (ret)
3118 		goto failed;
3119 
3120 	ret = psp_load_non_psp_fw(psp);
3121 	if (ret)
3122 		goto failed1;
3123 
3124 	ret = psp_asd_initialize(psp);
3125 	if (ret) {
3126 		dev_err(adev->dev, "PSP load asd failed!\n");
3127 		goto failed1;
3128 	}
3129 
3130 	ret = psp_rl_load(adev);
3131 	if (ret) {
3132 		dev_err(adev->dev, "PSP load RL failed!\n");
3133 		goto failed1;
3134 	}
3135 
3136 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3137 		if (adev->gmc.xgmi.num_physical_nodes > 1) {
3138 			ret = psp_xgmi_initialize(psp, false, true);
3139 			/* Warning the XGMI seesion initialize failure
3140 			 * Instead of stop driver initialization
3141 			 */
3142 			if (ret)
3143 				dev_err(psp->adev->dev,
3144 					"XGMI: Failed to initialize XGMI session\n");
3145 		}
3146 	}
3147 
3148 	if (psp->ta_fw) {
3149 		ret = psp_ras_initialize(psp);
3150 		if (ret)
3151 			dev_err(psp->adev->dev,
3152 				"RAS: Failed to initialize RAS\n");
3153 
3154 		ret = psp_hdcp_initialize(psp);
3155 		if (ret)
3156 			dev_err(psp->adev->dev,
3157 				"HDCP: Failed to initialize HDCP\n");
3158 
3159 		ret = psp_dtm_initialize(psp);
3160 		if (ret)
3161 			dev_err(psp->adev->dev,
3162 				"DTM: Failed to initialize DTM\n");
3163 
3164 		ret = psp_rap_initialize(psp);
3165 		if (ret)
3166 			dev_err(psp->adev->dev,
3167 				"RAP: Failed to initialize RAP\n");
3168 
3169 		ret = psp_securedisplay_initialize(psp);
3170 		if (ret)
3171 			dev_err(psp->adev->dev,
3172 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3173 	}
3174 
3175 	return 0;
3176 
3177 failed1:
3178 	psp_free_shared_bufs(psp);
3179 failed:
3180 	/*
3181 	 * all cleanup jobs (xgmi terminate, ras terminate,
3182 	 * ring destroy, cmd/fence/fw buffers destory,
3183 	 * psp->cmd destory) are delayed to psp_hw_fini
3184 	 */
3185 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3186 	return ret;
3187 }
3188 
3189 static int psp_hw_init(struct amdgpu_ip_block *ip_block)
3190 {
3191 	int ret;
3192 	struct amdgpu_device *adev = ip_block->adev;
3193 
3194 	mutex_lock(&adev->firmware.mutex);
3195 
3196 	ret = amdgpu_ucode_init_bo(adev);
3197 	if (ret)
3198 		goto failed;
3199 
3200 	ret = psp_load_fw(adev);
3201 	if (ret) {
3202 		dev_err(adev->dev, "PSP firmware loading failed\n");
3203 		goto failed;
3204 	}
3205 
3206 	mutex_unlock(&adev->firmware.mutex);
3207 	return 0;
3208 
3209 failed:
3210 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
3211 	mutex_unlock(&adev->firmware.mutex);
3212 	return -EINVAL;
3213 }
3214 
3215 static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
3216 {
3217 	struct amdgpu_device *adev = ip_block->adev;
3218 	struct psp_context *psp = &adev->psp;
3219 
3220 	if (psp->ta_fw) {
3221 		psp_ras_terminate(psp);
3222 		psp_securedisplay_terminate(psp);
3223 		psp_rap_terminate(psp);
3224 		psp_dtm_terminate(psp);
3225 		psp_hdcp_terminate(psp);
3226 
3227 		if (adev->gmc.xgmi.num_physical_nodes > 1)
3228 			psp_xgmi_terminate(psp);
3229 	}
3230 
3231 	psp_asd_terminate(psp);
3232 	psp_tmr_terminate(psp);
3233 
3234 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3235 
3236 	return 0;
3237 }
3238 
3239 static int psp_suspend(struct amdgpu_ip_block *ip_block)
3240 {
3241 	int ret = 0;
3242 	struct amdgpu_device *adev = ip_block->adev;
3243 	struct psp_context *psp = &adev->psp;
3244 
3245 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3246 	    psp->xgmi_context.context.initialized) {
3247 		ret = psp_xgmi_terminate(psp);
3248 		if (ret) {
3249 			dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3250 			goto out;
3251 		}
3252 	}
3253 
3254 	if (psp->ta_fw) {
3255 		ret = psp_ras_terminate(psp);
3256 		if (ret) {
3257 			dev_err(adev->dev, "Failed to terminate ras ta\n");
3258 			goto out;
3259 		}
3260 		ret = psp_hdcp_terminate(psp);
3261 		if (ret) {
3262 			dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3263 			goto out;
3264 		}
3265 		ret = psp_dtm_terminate(psp);
3266 		if (ret) {
3267 			dev_err(adev->dev, "Failed to terminate dtm ta\n");
3268 			goto out;
3269 		}
3270 		ret = psp_rap_terminate(psp);
3271 		if (ret) {
3272 			dev_err(adev->dev, "Failed to terminate rap ta\n");
3273 			goto out;
3274 		}
3275 		ret = psp_securedisplay_terminate(psp);
3276 		if (ret) {
3277 			dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3278 			goto out;
3279 		}
3280 	}
3281 
3282 	ret = psp_asd_terminate(psp);
3283 	if (ret) {
3284 		dev_err(adev->dev, "Failed to terminate asd\n");
3285 		goto out;
3286 	}
3287 
3288 	ret = psp_tmr_terminate(psp);
3289 	if (ret) {
3290 		dev_err(adev->dev, "Failed to terminate tmr\n");
3291 		goto out;
3292 	}
3293 
3294 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3295 	if (ret)
3296 		dev_err(adev->dev, "PSP ring stop failed\n");
3297 
3298 out:
3299 	return ret;
3300 }
3301 
3302 static int psp_resume(struct amdgpu_ip_block *ip_block)
3303 {
3304 	int ret;
3305 	struct amdgpu_device *adev = ip_block->adev;
3306 	struct psp_context *psp = &adev->psp;
3307 
3308 	dev_info(adev->dev, "PSP is resuming...\n");
3309 
3310 	if (psp->mem_train_ctx.enable_mem_training) {
3311 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3312 		if (ret) {
3313 			dev_err(adev->dev, "Failed to process memory training!\n");
3314 			return ret;
3315 		}
3316 	}
3317 
3318 	mutex_lock(&adev->firmware.mutex);
3319 
3320 	ret = amdgpu_ucode_init_bo(adev);
3321 	if (ret)
3322 		goto failed;
3323 
3324 	ret = psp_hw_start(psp);
3325 	if (ret)
3326 		goto failed;
3327 
3328 	ret = psp_load_non_psp_fw(psp);
3329 	if (ret)
3330 		goto failed;
3331 
3332 	ret = psp_asd_initialize(psp);
3333 	if (ret) {
3334 		dev_err(adev->dev, "PSP load asd failed!\n");
3335 		goto failed;
3336 	}
3337 
3338 	ret = psp_rl_load(adev);
3339 	if (ret) {
3340 		dev_err(adev->dev, "PSP load RL failed!\n");
3341 		goto failed;
3342 	}
3343 
3344 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3345 		ret = psp_xgmi_initialize(psp, false, true);
3346 		/* Warning the XGMI seesion initialize failure
3347 		 * Instead of stop driver initialization
3348 		 */
3349 		if (ret)
3350 			dev_err(psp->adev->dev,
3351 				"XGMI: Failed to initialize XGMI session\n");
3352 	}
3353 
3354 	if (psp->ta_fw) {
3355 		ret = psp_ras_initialize(psp);
3356 		if (ret)
3357 			dev_err(psp->adev->dev,
3358 				"RAS: Failed to initialize RAS\n");
3359 
3360 		ret = psp_hdcp_initialize(psp);
3361 		if (ret)
3362 			dev_err(psp->adev->dev,
3363 				"HDCP: Failed to initialize HDCP\n");
3364 
3365 		ret = psp_dtm_initialize(psp);
3366 		if (ret)
3367 			dev_err(psp->adev->dev,
3368 				"DTM: Failed to initialize DTM\n");
3369 
3370 		ret = psp_rap_initialize(psp);
3371 		if (ret)
3372 			dev_err(psp->adev->dev,
3373 				"RAP: Failed to initialize RAP\n");
3374 
3375 		ret = psp_securedisplay_initialize(psp);
3376 		if (ret)
3377 			dev_err(psp->adev->dev,
3378 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3379 	}
3380 
3381 	mutex_unlock(&adev->firmware.mutex);
3382 
3383 	return 0;
3384 
3385 failed:
3386 	dev_err(adev->dev, "PSP resume failed\n");
3387 	mutex_unlock(&adev->firmware.mutex);
3388 	return ret;
3389 }
3390 
3391 int psp_gpu_reset(struct amdgpu_device *adev)
3392 {
3393 	int ret;
3394 
3395 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3396 		return 0;
3397 
3398 	mutex_lock(&adev->psp.mutex);
3399 	ret = psp_mode1_reset(&adev->psp);
3400 	mutex_unlock(&adev->psp.mutex);
3401 
3402 	return ret;
3403 }
3404 
3405 int psp_rlc_autoload_start(struct psp_context *psp)
3406 {
3407 	int ret;
3408 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3409 
3410 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3411 
3412 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
3413 				 psp->fence_buf_mc_addr);
3414 
3415 	release_psp_cmd_buf(psp);
3416 
3417 	return ret;
3418 }
3419 
3420 int psp_ring_cmd_submit(struct psp_context *psp,
3421 			uint64_t cmd_buf_mc_addr,
3422 			uint64_t fence_mc_addr,
3423 			int index)
3424 {
3425 	unsigned int psp_write_ptr_reg = 0;
3426 	struct psp_gfx_rb_frame *write_frame;
3427 	struct psp_ring *ring = &psp->km_ring;
3428 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3429 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3430 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3431 	struct amdgpu_device *adev = psp->adev;
3432 	uint32_t ring_size_dw = ring->ring_size / 4;
3433 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3434 
3435 	/* KM (GPCOM) prepare write pointer */
3436 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
3437 
3438 	/* Update KM RB frame pointer to new frame */
3439 	/* write_frame ptr increments by size of rb_frame in bytes */
3440 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3441 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
3442 		write_frame = ring_buffer_start;
3443 	else
3444 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3445 	/* Check invalid write_frame ptr address */
3446 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3447 		dev_err(adev->dev,
3448 			"ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3449 			ring_buffer_start, ring_buffer_end, write_frame);
3450 		dev_err(adev->dev,
3451 			"write_frame is pointing to address out of bounds\n");
3452 		return -EINVAL;
3453 	}
3454 
3455 	/* Initialize KM RB frame */
3456 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3457 
3458 	/* Update KM RB frame */
3459 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3460 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3461 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3462 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3463 	write_frame->fence_value = index;
3464 	amdgpu_device_flush_hdp(adev, NULL);
3465 
3466 	/* Update the write Pointer in DWORDs */
3467 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3468 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
3469 	return 0;
3470 }
3471 
3472 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3473 {
3474 	struct amdgpu_device *adev = psp->adev;
3475 	const struct psp_firmware_header_v1_0 *asd_hdr;
3476 	int err = 0;
3477 
3478 	err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
3479 				   "amdgpu/%s_asd.bin", chip_name);
3480 	if (err)
3481 		goto out;
3482 
3483 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3484 	adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3485 	adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3486 	adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3487 	adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3488 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3489 	return 0;
3490 out:
3491 	amdgpu_ucode_release(&adev->psp.asd_fw);
3492 	return err;
3493 }
3494 
3495 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3496 {
3497 	struct amdgpu_device *adev = psp->adev;
3498 	const struct psp_firmware_header_v1_0 *toc_hdr;
3499 	int err = 0;
3500 
3501 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
3502 				   "amdgpu/%s_toc.bin", chip_name);
3503 	if (err)
3504 		goto out;
3505 
3506 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3507 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3508 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3509 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3510 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3511 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3512 	return 0;
3513 out:
3514 	amdgpu_ucode_release(&adev->psp.toc_fw);
3515 	return err;
3516 }
3517 
3518 static int parse_sos_bin_descriptor(struct psp_context *psp,
3519 				   const struct psp_fw_bin_desc *desc,
3520 				   const struct psp_firmware_header_v2_0 *sos_hdr)
3521 {
3522 	uint8_t *ucode_start_addr  = NULL;
3523 
3524 	if (!psp || !desc || !sos_hdr)
3525 		return -EINVAL;
3526 
3527 	ucode_start_addr  = (uint8_t *)sos_hdr +
3528 			    le32_to_cpu(desc->offset_bytes) +
3529 			    le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3530 
3531 	switch (desc->fw_type) {
3532 	case PSP_FW_TYPE_PSP_SOS:
3533 		psp->sos.fw_version        = le32_to_cpu(desc->fw_version);
3534 		psp->sos.feature_version   = le32_to_cpu(desc->fw_version);
3535 		psp->sos.size_bytes        = le32_to_cpu(desc->size_bytes);
3536 		psp->sos.start_addr	   = ucode_start_addr;
3537 		break;
3538 	case PSP_FW_TYPE_PSP_SYS_DRV:
3539 		psp->sys.fw_version        = le32_to_cpu(desc->fw_version);
3540 		psp->sys.feature_version   = le32_to_cpu(desc->fw_version);
3541 		psp->sys.size_bytes        = le32_to_cpu(desc->size_bytes);
3542 		psp->sys.start_addr        = ucode_start_addr;
3543 		break;
3544 	case PSP_FW_TYPE_PSP_KDB:
3545 		psp->kdb.fw_version        = le32_to_cpu(desc->fw_version);
3546 		psp->kdb.feature_version   = le32_to_cpu(desc->fw_version);
3547 		psp->kdb.size_bytes        = le32_to_cpu(desc->size_bytes);
3548 		psp->kdb.start_addr        = ucode_start_addr;
3549 		break;
3550 	case PSP_FW_TYPE_PSP_TOC:
3551 		psp->toc.fw_version        = le32_to_cpu(desc->fw_version);
3552 		psp->toc.feature_version   = le32_to_cpu(desc->fw_version);
3553 		psp->toc.size_bytes        = le32_to_cpu(desc->size_bytes);
3554 		psp->toc.start_addr        = ucode_start_addr;
3555 		break;
3556 	case PSP_FW_TYPE_PSP_SPL:
3557 		psp->spl.fw_version        = le32_to_cpu(desc->fw_version);
3558 		psp->spl.feature_version   = le32_to_cpu(desc->fw_version);
3559 		psp->spl.size_bytes        = le32_to_cpu(desc->size_bytes);
3560 		psp->spl.start_addr        = ucode_start_addr;
3561 		break;
3562 	case PSP_FW_TYPE_PSP_RL:
3563 		psp->rl.fw_version         = le32_to_cpu(desc->fw_version);
3564 		psp->rl.feature_version    = le32_to_cpu(desc->fw_version);
3565 		psp->rl.size_bytes         = le32_to_cpu(desc->size_bytes);
3566 		psp->rl.start_addr         = ucode_start_addr;
3567 		break;
3568 	case PSP_FW_TYPE_PSP_SOC_DRV:
3569 		psp->soc_drv.fw_version         = le32_to_cpu(desc->fw_version);
3570 		psp->soc_drv.feature_version    = le32_to_cpu(desc->fw_version);
3571 		psp->soc_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3572 		psp->soc_drv.start_addr         = ucode_start_addr;
3573 		break;
3574 	case PSP_FW_TYPE_PSP_INTF_DRV:
3575 		psp->intf_drv.fw_version        = le32_to_cpu(desc->fw_version);
3576 		psp->intf_drv.feature_version   = le32_to_cpu(desc->fw_version);
3577 		psp->intf_drv.size_bytes        = le32_to_cpu(desc->size_bytes);
3578 		psp->intf_drv.start_addr        = ucode_start_addr;
3579 		break;
3580 	case PSP_FW_TYPE_PSP_DBG_DRV:
3581 		psp->dbg_drv.fw_version         = le32_to_cpu(desc->fw_version);
3582 		psp->dbg_drv.feature_version    = le32_to_cpu(desc->fw_version);
3583 		psp->dbg_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3584 		psp->dbg_drv.start_addr         = ucode_start_addr;
3585 		break;
3586 	case PSP_FW_TYPE_PSP_RAS_DRV:
3587 		psp->ras_drv.fw_version         = le32_to_cpu(desc->fw_version);
3588 		psp->ras_drv.feature_version    = le32_to_cpu(desc->fw_version);
3589 		psp->ras_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3590 		psp->ras_drv.start_addr         = ucode_start_addr;
3591 		break;
3592 	case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3593 		psp->ipkeymgr_drv.fw_version         = le32_to_cpu(desc->fw_version);
3594 		psp->ipkeymgr_drv.feature_version    = le32_to_cpu(desc->fw_version);
3595 		psp->ipkeymgr_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3596 		psp->ipkeymgr_drv.start_addr         = ucode_start_addr;
3597 		break;
3598 	case PSP_FW_TYPE_PSP_SPDM_DRV:
3599 		psp->spdm_drv.fw_version	= le32_to_cpu(desc->fw_version);
3600 		psp->spdm_drv.feature_version	= le32_to_cpu(desc->fw_version);
3601 		psp->spdm_drv.size_bytes	= le32_to_cpu(desc->size_bytes);
3602 		psp->spdm_drv.start_addr	= ucode_start_addr;
3603 		break;
3604 	default:
3605 		dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3606 		break;
3607 	}
3608 
3609 	return 0;
3610 }
3611 
3612 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3613 {
3614 	const struct psp_firmware_header_v1_0 *sos_hdr;
3615 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3616 	uint8_t *ucode_array_start_addr;
3617 
3618 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3619 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3620 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3621 
3622 	if (adev->gmc.xgmi.connected_to_cpu ||
3623 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3624 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3625 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3626 
3627 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3628 		adev->psp.sys.start_addr = ucode_array_start_addr;
3629 
3630 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3631 		adev->psp.sos.start_addr = ucode_array_start_addr +
3632 				le32_to_cpu(sos_hdr->sos.offset_bytes);
3633 	} else {
3634 		/* Load alternate PSP SOS FW */
3635 		sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3636 
3637 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3638 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3639 
3640 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3641 		adev->psp.sys.start_addr = ucode_array_start_addr +
3642 			le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3643 
3644 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3645 		adev->psp.sos.start_addr = ucode_array_start_addr +
3646 			le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3647 	}
3648 
3649 	if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3650 		dev_warn(adev->dev, "PSP SOS FW not available");
3651 		return -EINVAL;
3652 	}
3653 
3654 	return 0;
3655 }
3656 
3657 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3658 {
3659 	struct amdgpu_device *adev = psp->adev;
3660 	const struct psp_firmware_header_v1_0 *sos_hdr;
3661 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3662 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3663 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3664 	const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3665 	const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
3666 	int fw_index, fw_bin_count, start_index = 0;
3667 	const struct psp_fw_bin_desc *fw_bin;
3668 	uint8_t *ucode_array_start_addr;
3669 	int err = 0;
3670 
3671 	if (amdgpu_is_kicker_fw(adev))
3672 		err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3673 					   "amdgpu/%s_sos_kicker.bin", chip_name);
3674 	else
3675 		err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3676 					   "amdgpu/%s_sos.bin", chip_name);
3677 	if (err)
3678 		goto out;
3679 
3680 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3681 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3682 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3683 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3684 
3685 	switch (sos_hdr->header.header_version_major) {
3686 	case 1:
3687 		err = psp_init_sos_base_fw(adev);
3688 		if (err)
3689 			goto out;
3690 
3691 		if (sos_hdr->header.header_version_minor == 1) {
3692 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3693 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3694 			adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3695 					le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3696 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3697 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3698 					le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3699 		}
3700 		if (sos_hdr->header.header_version_minor == 2) {
3701 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3702 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3703 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3704 						    le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3705 		}
3706 		if (sos_hdr->header.header_version_minor == 3) {
3707 			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3708 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3709 			adev->psp.toc.start_addr = ucode_array_start_addr +
3710 				le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3711 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3712 			adev->psp.kdb.start_addr = ucode_array_start_addr +
3713 				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3714 			adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3715 			adev->psp.spl.start_addr = ucode_array_start_addr +
3716 				le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3717 			adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3718 			adev->psp.rl.start_addr = ucode_array_start_addr +
3719 				le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3720 		}
3721 		break;
3722 	case 2:
3723 		sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3724 
3725 		fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
3726 
3727 		if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
3728 			dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3729 			err = -EINVAL;
3730 			goto out;
3731 		}
3732 
3733 		if (sos_hdr_v2_0->header.header_version_minor == 1) {
3734 			sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
3735 
3736 			fw_bin = sos_hdr_v2_1->psp_fw_bin;
3737 
3738 			if (psp_is_aux_sos_load_required(psp))
3739 				start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3740 			else
3741 				fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3742 
3743 		} else {
3744 			fw_bin = sos_hdr_v2_0->psp_fw_bin;
3745 		}
3746 
3747 		for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
3748 			err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
3749 						       sos_hdr_v2_0);
3750 			if (err)
3751 				goto out;
3752 		}
3753 		break;
3754 	default:
3755 		dev_err(adev->dev,
3756 			"unsupported psp sos firmware\n");
3757 		err = -EINVAL;
3758 		goto out;
3759 	}
3760 
3761 	return 0;
3762 out:
3763 	amdgpu_ucode_release(&adev->psp.sos_fw);
3764 
3765 	return err;
3766 }
3767 
3768 static bool is_ta_fw_applicable(struct psp_context *psp,
3769 			     const struct psp_fw_bin_desc *desc)
3770 {
3771 	struct amdgpu_device *adev = psp->adev;
3772 	uint32_t fw_version;
3773 
3774 	switch (desc->fw_type) {
3775 	case TA_FW_TYPE_PSP_XGMI:
3776 	case TA_FW_TYPE_PSP_XGMI_AUX:
3777 		/* for now, AUX TA only exists on 13.0.6 ta bin,
3778 		 * from v20.00.0x.14
3779 		 */
3780 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3781 		    IP_VERSION(13, 0, 6)) {
3782 			fw_version = le32_to_cpu(desc->fw_version);
3783 
3784 			if (adev->flags & AMD_IS_APU &&
3785 			    (fw_version & 0xff) >= 0x14)
3786 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
3787 			else
3788 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
3789 		}
3790 		break;
3791 	default:
3792 		break;
3793 	}
3794 
3795 	return true;
3796 }
3797 
3798 static int parse_ta_bin_descriptor(struct psp_context *psp,
3799 				   const struct psp_fw_bin_desc *desc,
3800 				   const struct ta_firmware_header_v2_0 *ta_hdr)
3801 {
3802 	uint8_t *ucode_start_addr  = NULL;
3803 
3804 	if (!psp || !desc || !ta_hdr)
3805 		return -EINVAL;
3806 
3807 	if (!is_ta_fw_applicable(psp, desc))
3808 		return 0;
3809 
3810 	ucode_start_addr  = (uint8_t *)ta_hdr +
3811 			    le32_to_cpu(desc->offset_bytes) +
3812 			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3813 
3814 	switch (desc->fw_type) {
3815 	case TA_FW_TYPE_PSP_ASD:
3816 		psp->asd_context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3817 		psp->asd_context.bin_desc.feature_version   = le32_to_cpu(desc->fw_version);
3818 		psp->asd_context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3819 		psp->asd_context.bin_desc.start_addr        = ucode_start_addr;
3820 		break;
3821 	case TA_FW_TYPE_PSP_XGMI:
3822 	case TA_FW_TYPE_PSP_XGMI_AUX:
3823 		psp->xgmi_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3824 		psp->xgmi_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3825 		psp->xgmi_context.context.bin_desc.start_addr       = ucode_start_addr;
3826 		break;
3827 	case TA_FW_TYPE_PSP_RAS:
3828 		psp->ras_context.context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3829 		psp->ras_context.context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3830 		psp->ras_context.context.bin_desc.start_addr        = ucode_start_addr;
3831 		break;
3832 	case TA_FW_TYPE_PSP_HDCP:
3833 		psp->hdcp_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3834 		psp->hdcp_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3835 		psp->hdcp_context.context.bin_desc.start_addr       = ucode_start_addr;
3836 		break;
3837 	case TA_FW_TYPE_PSP_DTM:
3838 		psp->dtm_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3839 		psp->dtm_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3840 		psp->dtm_context.context.bin_desc.start_addr       = ucode_start_addr;
3841 		break;
3842 	case TA_FW_TYPE_PSP_RAP:
3843 		psp->rap_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3844 		psp->rap_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3845 		psp->rap_context.context.bin_desc.start_addr       = ucode_start_addr;
3846 		break;
3847 	case TA_FW_TYPE_PSP_SECUREDISPLAY:
3848 		psp->securedisplay_context.context.bin_desc.fw_version =
3849 			le32_to_cpu(desc->fw_version);
3850 		psp->securedisplay_context.context.bin_desc.size_bytes =
3851 			le32_to_cpu(desc->size_bytes);
3852 		psp->securedisplay_context.context.bin_desc.start_addr =
3853 			ucode_start_addr;
3854 		break;
3855 	default:
3856 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3857 		break;
3858 	}
3859 
3860 	return 0;
3861 }
3862 
3863 static int parse_ta_v1_microcode(struct psp_context *psp)
3864 {
3865 	const struct ta_firmware_header_v1_0 *ta_hdr;
3866 	struct amdgpu_device *adev = psp->adev;
3867 
3868 	ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3869 
3870 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3871 		return -EINVAL;
3872 
3873 	adev->psp.xgmi_context.context.bin_desc.fw_version =
3874 		le32_to_cpu(ta_hdr->xgmi.fw_version);
3875 	adev->psp.xgmi_context.context.bin_desc.size_bytes =
3876 		le32_to_cpu(ta_hdr->xgmi.size_bytes);
3877 	adev->psp.xgmi_context.context.bin_desc.start_addr =
3878 		(uint8_t *)ta_hdr +
3879 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3880 
3881 	adev->psp.ras_context.context.bin_desc.fw_version =
3882 		le32_to_cpu(ta_hdr->ras.fw_version);
3883 	adev->psp.ras_context.context.bin_desc.size_bytes =
3884 		le32_to_cpu(ta_hdr->ras.size_bytes);
3885 	adev->psp.ras_context.context.bin_desc.start_addr =
3886 		(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3887 		le32_to_cpu(ta_hdr->ras.offset_bytes);
3888 
3889 	adev->psp.hdcp_context.context.bin_desc.fw_version =
3890 		le32_to_cpu(ta_hdr->hdcp.fw_version);
3891 	adev->psp.hdcp_context.context.bin_desc.size_bytes =
3892 		le32_to_cpu(ta_hdr->hdcp.size_bytes);
3893 	adev->psp.hdcp_context.context.bin_desc.start_addr =
3894 		(uint8_t *)ta_hdr +
3895 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3896 
3897 	adev->psp.dtm_context.context.bin_desc.fw_version =
3898 		le32_to_cpu(ta_hdr->dtm.fw_version);
3899 	adev->psp.dtm_context.context.bin_desc.size_bytes =
3900 		le32_to_cpu(ta_hdr->dtm.size_bytes);
3901 	adev->psp.dtm_context.context.bin_desc.start_addr =
3902 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3903 		le32_to_cpu(ta_hdr->dtm.offset_bytes);
3904 
3905 	adev->psp.securedisplay_context.context.bin_desc.fw_version =
3906 		le32_to_cpu(ta_hdr->securedisplay.fw_version);
3907 	adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3908 		le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3909 	adev->psp.securedisplay_context.context.bin_desc.start_addr =
3910 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3911 		le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3912 
3913 	adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3914 
3915 	return 0;
3916 }
3917 
3918 static int parse_ta_v2_microcode(struct psp_context *psp)
3919 {
3920 	const struct ta_firmware_header_v2_0 *ta_hdr;
3921 	struct amdgpu_device *adev = psp->adev;
3922 	int err = 0;
3923 	int ta_index = 0;
3924 
3925 	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3926 
3927 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3928 		return -EINVAL;
3929 
3930 	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3931 		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3932 		return -EINVAL;
3933 	}
3934 
3935 	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3936 		err = parse_ta_bin_descriptor(psp,
3937 					      &ta_hdr->ta_fw_bin[ta_index],
3938 					      ta_hdr);
3939 		if (err)
3940 			return err;
3941 	}
3942 
3943 	return 0;
3944 }
3945 
3946 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3947 {
3948 	const struct common_firmware_header *hdr;
3949 	struct amdgpu_device *adev = psp->adev;
3950 	int err;
3951 
3952 	if (amdgpu_is_kicker_fw(adev))
3953 		err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3954 					   "amdgpu/%s_ta_kicker.bin", chip_name);
3955 	else
3956 		err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3957 					   "amdgpu/%s_ta.bin", chip_name);
3958 	if (err)
3959 		return err;
3960 
3961 	hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3962 	switch (le16_to_cpu(hdr->header_version_major)) {
3963 	case 1:
3964 		err = parse_ta_v1_microcode(psp);
3965 		break;
3966 	case 2:
3967 		err = parse_ta_v2_microcode(psp);
3968 		break;
3969 	default:
3970 		dev_err(adev->dev, "unsupported TA header version\n");
3971 		err = -EINVAL;
3972 	}
3973 
3974 	if (err)
3975 		amdgpu_ucode_release(&adev->psp.ta_fw);
3976 
3977 	return err;
3978 }
3979 
3980 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3981 {
3982 	struct amdgpu_device *adev = psp->adev;
3983 	const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3984 	struct amdgpu_firmware_info *info = NULL;
3985 	int err = 0;
3986 
3987 	if (!amdgpu_sriov_vf(adev)) {
3988 		dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3989 		return -EINVAL;
3990 	}
3991 
3992 	err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
3993 				   "amdgpu/%s_cap.bin", chip_name);
3994 	if (err) {
3995 		if (err == -ENODEV) {
3996 			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3997 			err = 0;
3998 		} else {
3999 			dev_err(adev->dev, "fail to initialize cap microcode\n");
4000 		}
4001 		goto out;
4002 	}
4003 
4004 	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
4005 	info->ucode_id = AMDGPU_UCODE_ID_CAP;
4006 	info->fw = adev->psp.cap_fw;
4007 	cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
4008 		adev->psp.cap_fw->data;
4009 	adev->firmware.fw_size += ALIGN(
4010 			le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
4011 	adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
4012 	adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
4013 	adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
4014 
4015 	return 0;
4016 
4017 out:
4018 	amdgpu_ucode_release(&adev->psp.cap_fw);
4019 	return err;
4020 }
4021 
4022 int psp_config_sq_perfmon(struct psp_context *psp,
4023 		uint32_t xcp_id, bool core_override_enable,
4024 		bool reg_override_enable, bool perfmon_override_enable)
4025 {
4026 	int ret;
4027 
4028 	if (amdgpu_sriov_vf(psp->adev))
4029 		return 0;
4030 
4031 	if (xcp_id > MAX_XCP) {
4032 		dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
4033 		return -EINVAL;
4034 	}
4035 
4036 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
4037 		dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
4038 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
4039 		return -EINVAL;
4040 	}
4041 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
4042 
4043 	cmd->cmd_id	=	GFX_CMD_ID_CONFIG_SQ_PERFMON;
4044 	cmd->cmd.config_sq_perfmon.gfx_xcp_mask	=	BIT_MASK(xcp_id);
4045 	cmd->cmd.config_sq_perfmon.core_override	=	core_override_enable;
4046 	cmd->cmd.config_sq_perfmon.reg_override	=	reg_override_enable;
4047 	cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
4048 
4049 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
4050 	if (ret)
4051 		dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
4052 			xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
4053 
4054 	release_psp_cmd_buf(psp);
4055 	return ret;
4056 }
4057 
4058 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
4059 					enum amd_clockgating_state state)
4060 {
4061 	return 0;
4062 }
4063 
4064 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
4065 				     enum amd_powergating_state state)
4066 {
4067 	return 0;
4068 }
4069 
4070 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
4071 					 struct device_attribute *attr,
4072 					 char *buf)
4073 {
4074 	struct drm_device *ddev = dev_get_drvdata(dev);
4075 	struct amdgpu_device *adev = drm_to_adev(ddev);
4076 	struct amdgpu_ip_block *ip_block;
4077 	uint32_t fw_ver;
4078 	int ret;
4079 
4080 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
4081 	if (!ip_block || !ip_block->status.late_initialized) {
4082 		dev_info(adev->dev, "PSP block is not ready yet\n.");
4083 		return -EBUSY;
4084 	}
4085 
4086 	mutex_lock(&adev->psp.mutex);
4087 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
4088 	mutex_unlock(&adev->psp.mutex);
4089 
4090 	if (ret) {
4091 		dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
4092 		return ret;
4093 	}
4094 
4095 	return sysfs_emit(buf, "%x\n", fw_ver);
4096 }
4097 
4098 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
4099 						       struct device_attribute *attr,
4100 						       const char *buf,
4101 						       size_t count)
4102 {
4103 	struct drm_device *ddev = dev_get_drvdata(dev);
4104 	struct amdgpu_device *adev = drm_to_adev(ddev);
4105 	int ret, idx;
4106 	const struct firmware *usbc_pd_fw;
4107 	struct amdgpu_bo *fw_buf_bo = NULL;
4108 	uint64_t fw_pri_mc_addr;
4109 	void *fw_pri_cpu_addr;
4110 	struct amdgpu_ip_block *ip_block;
4111 
4112 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
4113 	if (!ip_block || !ip_block->status.late_initialized) {
4114 		dev_err(adev->dev, "PSP block is not ready yet.");
4115 		return -EBUSY;
4116 	}
4117 
4118 	if (!drm_dev_enter(ddev, &idx))
4119 		return -ENODEV;
4120 
4121 	ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
4122 				   "amdgpu/%s", buf);
4123 	if (ret)
4124 		goto fail;
4125 
4126 	/* LFB address which is aligned to 1MB boundary per PSP request */
4127 	ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
4128 				      AMDGPU_GEM_DOMAIN_VRAM |
4129 				      AMDGPU_GEM_DOMAIN_GTT,
4130 				      &fw_buf_bo, &fw_pri_mc_addr,
4131 				      &fw_pri_cpu_addr);
4132 	if (ret)
4133 		goto rel_buf;
4134 
4135 	memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
4136 
4137 	mutex_lock(&adev->psp.mutex);
4138 	ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
4139 	mutex_unlock(&adev->psp.mutex);
4140 
4141 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4142 
4143 rel_buf:
4144 	amdgpu_ucode_release(&usbc_pd_fw);
4145 fail:
4146 	if (ret) {
4147 		dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
4148 		count = ret;
4149 	}
4150 
4151 	drm_dev_exit(idx);
4152 	return count;
4153 }
4154 
4155 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
4156 {
4157 	int idx;
4158 
4159 	if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
4160 		return;
4161 
4162 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
4163 	memcpy(psp->fw_pri_buf, start_addr, bin_size);
4164 
4165 	drm_dev_exit(idx);
4166 }
4167 
4168 /**
4169  * DOC: usbc_pd_fw
4170  * Reading from this file will retrieve the USB-C PD firmware version. Writing to
4171  * this file will trigger the update process.
4172  */
4173 static DEVICE_ATTR(usbc_pd_fw, 0644,
4174 		   psp_usbc_pd_fw_sysfs_read,
4175 		   psp_usbc_pd_fw_sysfs_write);
4176 
4177 int is_psp_fw_valid(struct psp_bin_desc bin)
4178 {
4179 	return bin.size_bytes;
4180 }
4181 
4182 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
4183 					const struct bin_attribute *bin_attr,
4184 					char *buffer, loff_t pos, size_t count)
4185 {
4186 	struct device *dev = kobj_to_dev(kobj);
4187 	struct drm_device *ddev = dev_get_drvdata(dev);
4188 	struct amdgpu_device *adev = drm_to_adev(ddev);
4189 
4190 	adev->psp.vbflash_done = false;
4191 
4192 	/* Safeguard against memory drain */
4193 	if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
4194 		dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
4195 		kvfree(adev->psp.vbflash_tmp_buf);
4196 		adev->psp.vbflash_tmp_buf = NULL;
4197 		adev->psp.vbflash_image_size = 0;
4198 		return -ENOMEM;
4199 	}
4200 
4201 	/* TODO Just allocate max for now and optimize to realloc later if needed */
4202 	if (!adev->psp.vbflash_tmp_buf) {
4203 		adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
4204 		if (!adev->psp.vbflash_tmp_buf)
4205 			return -ENOMEM;
4206 	}
4207 
4208 	mutex_lock(&adev->psp.mutex);
4209 	memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
4210 	adev->psp.vbflash_image_size += count;
4211 	mutex_unlock(&adev->psp.mutex);
4212 
4213 	dev_dbg(adev->dev, "IFWI staged for update\n");
4214 
4215 	return count;
4216 }
4217 
4218 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
4219 				       const struct bin_attribute *bin_attr, char *buffer,
4220 				       loff_t pos, size_t count)
4221 {
4222 	struct device *dev = kobj_to_dev(kobj);
4223 	struct drm_device *ddev = dev_get_drvdata(dev);
4224 	struct amdgpu_device *adev = drm_to_adev(ddev);
4225 	struct amdgpu_bo *fw_buf_bo = NULL;
4226 	uint64_t fw_pri_mc_addr;
4227 	void *fw_pri_cpu_addr;
4228 	int ret;
4229 
4230 	if (adev->psp.vbflash_image_size == 0)
4231 		return -EINVAL;
4232 
4233 	dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
4234 
4235 	ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
4236 					AMDGPU_GPU_PAGE_SIZE,
4237 					AMDGPU_GEM_DOMAIN_VRAM,
4238 					&fw_buf_bo,
4239 					&fw_pri_mc_addr,
4240 					&fw_pri_cpu_addr);
4241 	if (ret)
4242 		goto rel_buf;
4243 
4244 	memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
4245 
4246 	mutex_lock(&adev->psp.mutex);
4247 	ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
4248 	mutex_unlock(&adev->psp.mutex);
4249 
4250 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4251 
4252 rel_buf:
4253 	kvfree(adev->psp.vbflash_tmp_buf);
4254 	adev->psp.vbflash_tmp_buf = NULL;
4255 	adev->psp.vbflash_image_size = 0;
4256 
4257 	if (ret) {
4258 		dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
4259 		return ret;
4260 	}
4261 
4262 	dev_dbg(adev->dev, "PSP IFWI flash process done\n");
4263 	return 0;
4264 }
4265 
4266 /**
4267  * DOC: psp_vbflash
4268  * Writing to this file will stage an IFWI for update. Reading from this file
4269  * will trigger the update process.
4270  */
4271 static const struct bin_attribute psp_vbflash_bin_attr = {
4272 	.attr = {.name = "psp_vbflash", .mode = 0660},
4273 	.size = 0,
4274 	.write_new = amdgpu_psp_vbflash_write,
4275 	.read_new = amdgpu_psp_vbflash_read,
4276 };
4277 
4278 /**
4279  * DOC: psp_vbflash_status
4280  * The status of the flash process.
4281  * 0: IFWI flash not complete.
4282  * 1: IFWI flash complete.
4283  */
4284 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
4285 					 struct device_attribute *attr,
4286 					 char *buf)
4287 {
4288 	struct drm_device *ddev = dev_get_drvdata(dev);
4289 	struct amdgpu_device *adev = drm_to_adev(ddev);
4290 	uint32_t vbflash_status;
4291 
4292 	vbflash_status = psp_vbflash_status(&adev->psp);
4293 	if (!adev->psp.vbflash_done)
4294 		vbflash_status = 0;
4295 	else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
4296 		vbflash_status = 1;
4297 
4298 	return sysfs_emit(buf, "0x%x\n", vbflash_status);
4299 }
4300 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
4301 
4302 static const struct bin_attribute *const bin_flash_attrs[] = {
4303 	&psp_vbflash_bin_attr,
4304 	NULL
4305 };
4306 
4307 static struct attribute *flash_attrs[] = {
4308 	&dev_attr_psp_vbflash_status.attr,
4309 	&dev_attr_usbc_pd_fw.attr,
4310 	NULL
4311 };
4312 
4313 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
4314 {
4315 	struct device *dev = kobj_to_dev(kobj);
4316 	struct drm_device *ddev = dev_get_drvdata(dev);
4317 	struct amdgpu_device *adev = drm_to_adev(ddev);
4318 
4319 	if (attr == &dev_attr_usbc_pd_fw.attr)
4320 		return adev->psp.sup_pd_fw_up ? 0660 : 0;
4321 
4322 	return adev->psp.sup_ifwi_up ? 0440 : 0;
4323 }
4324 
4325 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
4326 						const struct bin_attribute *attr,
4327 						int idx)
4328 {
4329 	struct device *dev = kobj_to_dev(kobj);
4330 	struct drm_device *ddev = dev_get_drvdata(dev);
4331 	struct amdgpu_device *adev = drm_to_adev(ddev);
4332 
4333 	return adev->psp.sup_ifwi_up ? 0660 : 0;
4334 }
4335 
4336 const struct attribute_group amdgpu_flash_attr_group = {
4337 	.attrs = flash_attrs,
4338 	.bin_attrs_new = bin_flash_attrs,
4339 	.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4340 	.is_visible = amdgpu_flash_attr_is_visible,
4341 };
4342 
4343 #if defined(CONFIG_DEBUG_FS)
4344 static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
4345 {
4346 	struct amdgpu_device *adev = filp->f_inode->i_private;
4347 	struct spirom_bo *bo_triplet;
4348 	int ret;
4349 
4350 	/* serialize the open() file calling */
4351 	if (!mutex_trylock(&adev->psp.mutex))
4352 		return -EBUSY;
4353 
4354 	/*
4355 	 * make sure only one userpace process is alive for dumping so that
4356 	 * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
4357 	 * let's say the case where one process try opening the file while
4358 	 * another one has proceeded to read or release. In this way, eliminate
4359 	 * the use of mutex for read() or release() callback as well.
4360 	 */
4361 	if (adev->psp.spirom_dump_trip) {
4362 		mutex_unlock(&adev->psp.mutex);
4363 		return -EBUSY;
4364 	}
4365 
4366 	bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
4367 	if (!bo_triplet) {
4368 		mutex_unlock(&adev->psp.mutex);
4369 		return -ENOMEM;
4370 	}
4371 
4372 	ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
4373 				      AMDGPU_GPU_PAGE_SIZE,
4374 				      AMDGPU_GEM_DOMAIN_GTT,
4375 				      &bo_triplet->bo,
4376 				      &bo_triplet->mc_addr,
4377 				      &bo_triplet->cpu_addr);
4378 	if (ret)
4379 		goto rel_trip;
4380 
4381 	ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
4382 	if (ret)
4383 		goto rel_bo;
4384 
4385 	adev->psp.spirom_dump_trip = bo_triplet;
4386 	mutex_unlock(&adev->psp.mutex);
4387 	return 0;
4388 rel_bo:
4389 	amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4390 			      &bo_triplet->cpu_addr);
4391 rel_trip:
4392 	kfree(bo_triplet);
4393 	mutex_unlock(&adev->psp.mutex);
4394 	dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
4395 	return ret;
4396 }
4397 
4398 static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
4399 					    loff_t *pos)
4400 {
4401 	struct amdgpu_device *adev = filp->f_inode->i_private;
4402 	struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4403 
4404 	if (!bo_triplet)
4405 		return -EINVAL;
4406 
4407 	return simple_read_from_buffer(buf,
4408 				       size,
4409 				       pos, bo_triplet->cpu_addr,
4410 				       AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4411 }
4412 
4413 static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
4414 {
4415 	struct amdgpu_device *adev = filp->f_inode->i_private;
4416 	struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4417 
4418 	if (bo_triplet) {
4419 		amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4420 				      &bo_triplet->cpu_addr);
4421 		kfree(bo_triplet);
4422 	}
4423 
4424 	adev->psp.spirom_dump_trip = NULL;
4425 	return 0;
4426 }
4427 
4428 static const struct file_operations psp_dump_spirom_debugfs_ops = {
4429 	.owner = THIS_MODULE,
4430 	.open = psp_read_spirom_debugfs_open,
4431 	.read = psp_read_spirom_debugfs_read,
4432 	.release = psp_read_spirom_debugfs_release,
4433 	.llseek = default_llseek,
4434 };
4435 #endif
4436 
4437 void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
4438 {
4439 #if defined(CONFIG_DEBUG_FS)
4440 	struct drm_minor *minor = adev_to_drm(adev)->primary;
4441 
4442 	debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
4443 				 adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4444 #endif
4445 }
4446 
4447 const struct amd_ip_funcs psp_ip_funcs = {
4448 	.name = "psp",
4449 	.early_init = psp_early_init,
4450 	.sw_init = psp_sw_init,
4451 	.sw_fini = psp_sw_fini,
4452 	.hw_init = psp_hw_init,
4453 	.hw_fini = psp_hw_fini,
4454 	.suspend = psp_suspend,
4455 	.resume = psp_resume,
4456 	.set_clockgating_state = psp_set_clockgating_state,
4457 	.set_powergating_state = psp_set_powergating_state,
4458 };
4459 
4460 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4461 	.type = AMD_IP_BLOCK_TYPE_PSP,
4462 	.major = 3,
4463 	.minor = 1,
4464 	.rev = 0,
4465 	.funcs = &psp_ip_funcs,
4466 };
4467 
4468 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4469 	.type = AMD_IP_BLOCK_TYPE_PSP,
4470 	.major = 10,
4471 	.minor = 0,
4472 	.rev = 0,
4473 	.funcs = &psp_ip_funcs,
4474 };
4475 
4476 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4477 	.type = AMD_IP_BLOCK_TYPE_PSP,
4478 	.major = 11,
4479 	.minor = 0,
4480 	.rev = 0,
4481 	.funcs = &psp_ip_funcs,
4482 };
4483 
4484 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4485 	.type = AMD_IP_BLOCK_TYPE_PSP,
4486 	.major = 11,
4487 	.minor = 0,
4488 	.rev = 8,
4489 	.funcs = &psp_ip_funcs,
4490 };
4491 
4492 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4493 	.type = AMD_IP_BLOCK_TYPE_PSP,
4494 	.major = 12,
4495 	.minor = 0,
4496 	.rev = 0,
4497 	.funcs = &psp_ip_funcs,
4498 };
4499 
4500 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4501 	.type = AMD_IP_BLOCK_TYPE_PSP,
4502 	.major = 13,
4503 	.minor = 0,
4504 	.rev = 0,
4505 	.funcs = &psp_ip_funcs,
4506 };
4507 
4508 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4509 	.type = AMD_IP_BLOCK_TYPE_PSP,
4510 	.major = 13,
4511 	.minor = 0,
4512 	.rev = 4,
4513 	.funcs = &psp_ip_funcs,
4514 };
4515 
4516 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4517 	.type = AMD_IP_BLOCK_TYPE_PSP,
4518 	.major = 14,
4519 	.minor = 0,
4520 	.rev = 0,
4521 	.funcs = &psp_ip_funcs,
4522 };
4523