xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c (revision 75372d75a4e23783583998ed99d5009d555850da)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42 #include "psp_v15_0.h"
43 #include "psp_v15_0_8.h"
44 
45 #include "amdgpu_ras.h"
46 #include "amdgpu_securedisplay.h"
47 #include "amdgpu_atomfirmware.h"
48 
49 #define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*16)
50 
51 static int psp_load_smu_fw(struct psp_context *psp);
52 static int psp_rap_terminate(struct psp_context *psp);
53 static int psp_securedisplay_terminate(struct psp_context *psp);
54 
55 static int psp_ring_init(struct psp_context *psp,
56 			 enum psp_ring_type ring_type)
57 {
58 	int ret = 0;
59 	struct psp_ring *ring;
60 	struct amdgpu_device *adev = psp->adev;
61 
62 	ring = &psp->km_ring;
63 
64 	ring->ring_type = ring_type;
65 
66 	/* allocate 4k Page of Local Frame Buffer memory for ring */
67 	ring->ring_size = 0x1000;
68 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
69 				      AMDGPU_GEM_DOMAIN_VRAM |
70 				      AMDGPU_GEM_DOMAIN_GTT,
71 				      &adev->firmware.rbuf,
72 				      &ring->ring_mem_mc_addr,
73 				      (void **)&ring->ring_mem);
74 	if (ret) {
75 		ring->ring_size = 0;
76 		return ret;
77 	}
78 
79 	return 0;
80 }
81 
82 /*
83  * Due to DF Cstate management centralized to PMFW, the firmware
84  * loading sequence will be updated as below:
85  *   - Load KDB
86  *   - Load SYS_DRV
87  *   - Load tOS
88  *   - Load PMFW
89  *   - Setup TMR
90  *   - Load other non-psp fw
91  *   - Load ASD
92  *   - Load XGMI/RAS/HDCP/DTM TA if any
93  *
94  * This new sequence is required for
95  *   - Arcturus and onwards
96  */
97 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
98 {
99 	struct amdgpu_device *adev = psp->adev;
100 
101 	if (amdgpu_sriov_vf(adev)) {
102 		psp->pmfw_centralized_cstate_management = false;
103 		return;
104 	}
105 
106 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
107 	case IP_VERSION(11, 0, 0):
108 	case IP_VERSION(11, 0, 4):
109 	case IP_VERSION(11, 0, 5):
110 	case IP_VERSION(11, 0, 7):
111 	case IP_VERSION(11, 0, 9):
112 	case IP_VERSION(11, 0, 11):
113 	case IP_VERSION(11, 0, 12):
114 	case IP_VERSION(11, 0, 13):
115 	case IP_VERSION(13, 0, 0):
116 	case IP_VERSION(13, 0, 2):
117 	case IP_VERSION(13, 0, 7):
118 		psp->pmfw_centralized_cstate_management = true;
119 		break;
120 	default:
121 		psp->pmfw_centralized_cstate_management = false;
122 		break;
123 	}
124 }
125 
126 static int psp_init_sriov_microcode(struct psp_context *psp)
127 {
128 	struct amdgpu_device *adev = psp->adev;
129 	char ucode_prefix[30];
130 	int ret = 0;
131 
132 	amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
133 
134 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
135 	case IP_VERSION(9, 0, 0):
136 	case IP_VERSION(11, 0, 7):
137 	case IP_VERSION(11, 0, 9):
138 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
139 		ret = psp_init_cap_microcode(psp, ucode_prefix);
140 		break;
141 	case IP_VERSION(13, 0, 2):
142 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
143 		ret = psp_init_cap_microcode(psp, ucode_prefix);
144 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
145 		break;
146 	case IP_VERSION(13, 0, 0):
147 		adev->virt.autoload_ucode_id = 0;
148 		break;
149 	case IP_VERSION(13, 0, 6):
150 	case IP_VERSION(13, 0, 14):
151 		ret = psp_init_cap_microcode(psp, ucode_prefix);
152 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
153 		break;
154 	case IP_VERSION(13, 0, 10):
155 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
156 		ret = psp_init_cap_microcode(psp, ucode_prefix);
157 		break;
158 	case IP_VERSION(13, 0, 12):
159 		ret = psp_init_ta_microcode(psp, ucode_prefix);
160 		break;
161 	default:
162 		return -EINVAL;
163 	}
164 	return ret;
165 }
166 
167 static int psp_early_init(struct amdgpu_ip_block *ip_block)
168 {
169 	struct amdgpu_device *adev = ip_block->adev;
170 	struct psp_context *psp = &adev->psp;
171 
172 	psp->autoload_supported = true;
173 	psp->boot_time_tmr = true;
174 
175 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
176 	case IP_VERSION(9, 0, 0):
177 		psp_v3_1_set_psp_funcs(psp);
178 		psp->autoload_supported = false;
179 		psp->boot_time_tmr = false;
180 		break;
181 	case IP_VERSION(10, 0, 0):
182 	case IP_VERSION(10, 0, 1):
183 		psp_v10_0_set_psp_funcs(psp);
184 		psp->autoload_supported = false;
185 		psp->boot_time_tmr = false;
186 		break;
187 	case IP_VERSION(11, 0, 2):
188 	case IP_VERSION(11, 0, 4):
189 		psp_v11_0_set_psp_funcs(psp);
190 		psp->autoload_supported = false;
191 		psp->boot_time_tmr = false;
192 		break;
193 	case IP_VERSION(11, 0, 0):
194 	case IP_VERSION(11, 0, 7):
195 		adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
196 		fallthrough;
197 	case IP_VERSION(11, 0, 5):
198 	case IP_VERSION(11, 0, 9):
199 	case IP_VERSION(11, 0, 11):
200 	case IP_VERSION(11, 5, 0):
201 	case IP_VERSION(11, 5, 2):
202 	case IP_VERSION(11, 0, 12):
203 	case IP_VERSION(11, 0, 13):
204 		psp_v11_0_set_psp_funcs(psp);
205 		psp->boot_time_tmr = false;
206 		break;
207 	case IP_VERSION(11, 0, 3):
208 	case IP_VERSION(12, 0, 1):
209 		psp_v12_0_set_psp_funcs(psp);
210 		psp->autoload_supported = false;
211 		psp->boot_time_tmr = false;
212 		break;
213 	case IP_VERSION(13, 0, 2):
214 		psp->boot_time_tmr = false;
215 		fallthrough;
216 	case IP_VERSION(13, 0, 6):
217 	case IP_VERSION(13, 0, 14):
218 		psp_v13_0_set_psp_funcs(psp);
219 		psp->autoload_supported = false;
220 		break;
221 	case IP_VERSION(13, 0, 12):
222 		psp_v13_0_set_psp_funcs(psp);
223 		psp->autoload_supported = false;
224 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
225 		break;
226 	case IP_VERSION(13, 0, 1):
227 	case IP_VERSION(13, 0, 3):
228 	case IP_VERSION(13, 0, 5):
229 	case IP_VERSION(13, 0, 8):
230 	case IP_VERSION(13, 0, 11):
231 	case IP_VERSION(14, 0, 0):
232 	case IP_VERSION(14, 0, 1):
233 	case IP_VERSION(14, 0, 4):
234 		psp_v13_0_set_psp_funcs(psp);
235 		psp->boot_time_tmr = false;
236 		break;
237 	case IP_VERSION(11, 0, 8):
238 		if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
239 			psp_v11_0_8_set_psp_funcs(psp);
240 		}
241 		psp->autoload_supported = false;
242 		psp->boot_time_tmr = false;
243 		break;
244 	case IP_VERSION(13, 0, 0):
245 	case IP_VERSION(13, 0, 7):
246 	case IP_VERSION(13, 0, 10):
247 		psp_v13_0_set_psp_funcs(psp);
248 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
249 		psp->boot_time_tmr = false;
250 		break;
251 	case IP_VERSION(13, 0, 4):
252 		psp_v13_0_4_set_psp_funcs(psp);
253 		psp->boot_time_tmr = false;
254 		break;
255 	case IP_VERSION(14, 0, 2):
256 	case IP_VERSION(14, 0, 3):
257 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
258 		psp_v14_0_set_psp_funcs(psp);
259 		break;
260 	case IP_VERSION(14, 0, 5):
261 		psp_v14_0_set_psp_funcs(psp);
262 		psp->boot_time_tmr = false;
263 		break;
264 	case IP_VERSION(15, 0, 0):
265 		psp_v15_0_0_set_psp_funcs(psp);
266 		psp->boot_time_tmr = false;
267 		break;
268 	case IP_VERSION(15, 0, 8):
269 		psp_v15_0_8_set_psp_funcs(psp);
270 		break;
271 	default:
272 		return -EINVAL;
273 	}
274 
275 	psp->adev = adev;
276 
277 	adev->psp_timeout = 20000;
278 
279 	psp_check_pmfw_centralized_cstate_management(psp);
280 
281 	if (amdgpu_sriov_vf(adev))
282 		return psp_init_sriov_microcode(psp);
283 	else
284 		return psp_init_microcode(psp);
285 }
286 
287 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
288 {
289 	amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
290 			      &mem_ctx->shared_buf);
291 	mem_ctx->shared_bo = NULL;
292 }
293 
294 static void psp_free_shared_bufs(struct psp_context *psp)
295 {
296 	void *tmr_buf;
297 	void **pptr;
298 
299 	/* free TMR memory buffer */
300 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
301 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
302 	psp->tmr_bo = NULL;
303 
304 	/* free xgmi shared memory */
305 	psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
306 
307 	/* free ras shared memory */
308 	psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
309 
310 	/* free hdcp shared memory */
311 	psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
312 
313 	/* free dtm shared memory */
314 	psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
315 
316 	/* free rap shared memory */
317 	psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
318 
319 	/* free securedisplay shared memory */
320 	psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
321 
322 
323 }
324 
325 static void psp_memory_training_fini(struct psp_context *psp)
326 {
327 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
328 
329 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
330 	kfree(ctx->sys_cache);
331 	ctx->sys_cache = NULL;
332 }
333 
334 static int psp_memory_training_init(struct psp_context *psp)
335 {
336 	int ret;
337 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
338 
339 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
340 		dev_dbg(psp->adev->dev, "memory training is not supported!\n");
341 		return 0;
342 	}
343 
344 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
345 	if (ctx->sys_cache == NULL) {
346 		dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
347 		ret = -ENOMEM;
348 		goto Err_out;
349 	}
350 
351 	dev_dbg(psp->adev->dev,
352 		"train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
353 		ctx->train_data_size,
354 		ctx->p2c_train_data_offset,
355 		ctx->c2p_train_data_offset);
356 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
357 	return 0;
358 
359 Err_out:
360 	psp_memory_training_fini(psp);
361 	return ret;
362 }
363 
364 /*
365  * Helper funciton to query psp runtime database entry
366  *
367  * @adev: amdgpu_device pointer
368  * @entry_type: the type of psp runtime database entry
369  * @db_entry: runtime database entry pointer
370  *
371  * Return false if runtime database doesn't exit or entry is invalid
372  * or true if the specific database entry is found, and copy to @db_entry
373  */
374 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
375 				     enum psp_runtime_entry_type entry_type,
376 				     void *db_entry)
377 {
378 	uint64_t db_header_pos, db_dir_pos;
379 	struct psp_runtime_data_header db_header = {0};
380 	struct psp_runtime_data_directory db_dir = {0};
381 	bool ret = false;
382 	int i;
383 
384 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
385 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
386 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
387 		return false;
388 
389 	db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
390 	db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
391 
392 	/* read runtime db header from vram */
393 	amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
394 			sizeof(struct psp_runtime_data_header), false);
395 
396 	if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
397 		/* runtime db doesn't exist, exit */
398 		dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
399 		return false;
400 	}
401 
402 	/* read runtime database entry from vram */
403 	amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
404 			sizeof(struct psp_runtime_data_directory), false);
405 
406 	if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
407 		/* invalid db entry count, exit */
408 		dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
409 		return false;
410 	}
411 
412 	/* look up for requested entry type */
413 	for (i = 0; i < db_dir.entry_count && !ret; i++) {
414 		if (db_dir.entry_list[i].entry_type == entry_type) {
415 			switch (entry_type) {
416 			case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
417 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
418 					/* invalid db entry size */
419 					dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
420 					return false;
421 				}
422 				/* read runtime database entry */
423 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
424 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
425 				ret = true;
426 				break;
427 			case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
428 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
429 					/* invalid db entry size */
430 					dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
431 					return false;
432 				}
433 				/* read runtime database entry */
434 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
435 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
436 				ret = true;
437 				break;
438 			default:
439 				ret = false;
440 				break;
441 			}
442 		}
443 	}
444 
445 	return ret;
446 }
447 
448 static int psp_sw_init(struct amdgpu_ip_block *ip_block)
449 {
450 	struct amdgpu_device *adev = ip_block->adev;
451 	struct psp_context *psp = &adev->psp;
452 	int ret;
453 	struct psp_runtime_boot_cfg_entry boot_cfg_entry;
454 	struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
455 	struct psp_runtime_scpm_entry scpm_entry;
456 
457 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
458 	if (!psp->cmd) {
459 		dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
460 		return -ENOMEM;
461 	}
462 
463 	adev->psp.xgmi_context.supports_extended_data =
464 		!adev->gmc.xgmi.connected_to_cpu &&
465 		amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
466 
467 	memset(&scpm_entry, 0, sizeof(scpm_entry));
468 	if ((psp_get_runtime_db_entry(adev,
469 				PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
470 				&scpm_entry)) &&
471 	    (scpm_entry.scpm_status != SCPM_DISABLE)) {
472 		adev->scpm_enabled = true;
473 		adev->scpm_status = scpm_entry.scpm_status;
474 	} else {
475 		adev->scpm_enabled = false;
476 		adev->scpm_status = SCPM_DISABLE;
477 	}
478 
479 	/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
480 
481 	memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
482 	if (psp_get_runtime_db_entry(adev,
483 				PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
484 				&boot_cfg_entry)) {
485 		psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
486 		if ((psp->boot_cfg_bitmask) &
487 		    BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
488 			/* If psp runtime database exists, then
489 			 * only enable two stage memory training
490 			 * when TWO_STAGE_DRAM_TRAINING bit is set
491 			 * in runtime database
492 			 */
493 			mem_training_ctx->enable_mem_training = true;
494 		}
495 
496 	} else {
497 		/* If psp runtime database doesn't exist or is
498 		 * invalid, force enable two stage memory training
499 		 */
500 		mem_training_ctx->enable_mem_training = true;
501 	}
502 
503 	if (mem_training_ctx->enable_mem_training) {
504 		ret = psp_memory_training_init(psp);
505 		if (ret) {
506 			dev_err(adev->dev, "Failed to initialize memory training!\n");
507 			return ret;
508 		}
509 
510 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
511 		if (ret) {
512 			dev_err(adev->dev, "Failed to process memory training!\n");
513 			return ret;
514 		}
515 	}
516 
517 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
518 				      (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
519 				      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
520 				      &psp->fw_pri_bo,
521 				      &psp->fw_pri_mc_addr,
522 				      &psp->fw_pri_buf);
523 	if (ret)
524 		return ret;
525 
526 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
527 				      AMDGPU_GEM_DOMAIN_VRAM |
528 				      AMDGPU_GEM_DOMAIN_GTT,
529 				      &psp->fence_buf_bo,
530 				      &psp->fence_buf_mc_addr,
531 				      &psp->fence_buf);
532 	if (ret)
533 		goto failed1;
534 
535 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
536 				      AMDGPU_GEM_DOMAIN_VRAM |
537 				      AMDGPU_GEM_DOMAIN_GTT,
538 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
539 				      (void **)&psp->cmd_buf_mem);
540 	if (ret)
541 		goto failed2;
542 
543 	return 0;
544 
545 failed2:
546 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
547 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
548 failed1:
549 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
550 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
551 	return ret;
552 }
553 
554 static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
555 {
556 	struct amdgpu_device *adev = ip_block->adev;
557 	struct psp_context *psp = &adev->psp;
558 
559 	psp_memory_training_fini(psp);
560 
561 	amdgpu_ucode_release(&psp->sos_fw);
562 	amdgpu_ucode_release(&psp->asd_fw);
563 	amdgpu_ucode_release(&psp->ta_fw);
564 	amdgpu_ucode_release(&psp->cap_fw);
565 	amdgpu_ucode_release(&psp->toc_fw);
566 
567 	kfree(psp->cmd);
568 	psp->cmd = NULL;
569 
570 	psp_free_shared_bufs(psp);
571 
572 	if (psp->km_ring.ring_mem)
573 		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
574 				      &psp->km_ring.ring_mem_mc_addr,
575 				      (void **)&psp->km_ring.ring_mem);
576 
577 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
578 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
579 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
580 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
581 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
582 			      (void **)&psp->cmd_buf_mem);
583 
584 	return 0;
585 }
586 
587 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val,
588 		 uint32_t mask, uint32_t flags)
589 {
590 	bool check_changed = flags & PSP_WAITREG_CHANGED;
591 	bool verbose = !(flags & PSP_WAITREG_NOVERBOSE);
592 	uint32_t val;
593 	int i;
594 	struct amdgpu_device *adev = psp->adev;
595 
596 	if (psp->adev->no_hw_access)
597 		return 0;
598 
599 	for (i = 0; i < adev->usec_timeout; i++) {
600 		val = RREG32(reg_index);
601 		if (check_changed) {
602 			if (val != reg_val)
603 				return 0;
604 		} else {
605 			if ((val & mask) == reg_val)
606 				return 0;
607 		}
608 		udelay(1);
609 	}
610 
611 	if (verbose)
612 		dev_err(adev->dev,
613 			"psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x",
614 			reg_index, mask, val, reg_val);
615 
616 	return -ETIME;
617 }
618 
619 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
620 			       uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
621 {
622 	uint32_t val;
623 	int i;
624 	struct amdgpu_device *adev = psp->adev;
625 
626 	if (psp->adev->no_hw_access)
627 		return 0;
628 
629 	for (i = 0; i < msec_timeout; i++) {
630 		val = RREG32(reg_index);
631 		if ((val & mask) == reg_val)
632 			return 0;
633 		msleep(1);
634 	}
635 
636 	return -ETIME;
637 }
638 
639 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
640 {
641 	switch (cmd_id) {
642 	case GFX_CMD_ID_LOAD_TA:
643 		return "LOAD_TA";
644 	case GFX_CMD_ID_UNLOAD_TA:
645 		return "UNLOAD_TA";
646 	case GFX_CMD_ID_INVOKE_CMD:
647 		return "INVOKE_CMD";
648 	case GFX_CMD_ID_LOAD_ASD:
649 		return "LOAD_ASD";
650 	case GFX_CMD_ID_SETUP_TMR:
651 		return "SETUP_TMR";
652 	case GFX_CMD_ID_LOAD_IP_FW:
653 		return "LOAD_IP_FW";
654 	case GFX_CMD_ID_DESTROY_TMR:
655 		return "DESTROY_TMR";
656 	case GFX_CMD_ID_SAVE_RESTORE:
657 		return "SAVE_RESTORE_IP_FW";
658 	case GFX_CMD_ID_SETUP_VMR:
659 		return "SETUP_VMR";
660 	case GFX_CMD_ID_DESTROY_VMR:
661 		return "DESTROY_VMR";
662 	case GFX_CMD_ID_PROG_REG:
663 		return "PROG_REG";
664 	case GFX_CMD_ID_GET_FW_ATTESTATION:
665 		return "GET_FW_ATTESTATION";
666 	case GFX_CMD_ID_LOAD_TOC:
667 		return "ID_LOAD_TOC";
668 	case GFX_CMD_ID_AUTOLOAD_RLC:
669 		return "AUTOLOAD_RLC";
670 	case GFX_CMD_ID_BOOT_CFG:
671 		return "BOOT_CFG";
672 	case GFX_CMD_ID_CONFIG_SQ_PERFMON:
673 		return "CONFIG_SQ_PERFMON";
674 	case GFX_CMD_ID_FB_FW_RESERV_ADDR:
675 		return "FB_FW_RESERV_ADDR";
676 	case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
677 		return "FB_FW_RESERV_EXT_ADDR";
678 	case GFX_CMD_ID_SRIOV_SPATIAL_PART:
679 		return "SPATIAL_PARTITION";
680 	case GFX_CMD_ID_FB_NPS_MODE:
681 		return "NPS_MODE_CHANGE";
682 	default:
683 		return "UNKNOWN CMD";
684 	}
685 }
686 
687 static bool psp_err_warn(struct psp_context *psp)
688 {
689 	struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
690 
691 	/* This response indicates reg list is already loaded */
692 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
693 	    cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
694 	    cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
695 	    cmd->resp.status == TEE_ERROR_CANCEL)
696 		return false;
697 
698 	return true;
699 }
700 
701 static int
702 psp_cmd_submit_buf(struct psp_context *psp,
703 		   struct amdgpu_firmware_info *ucode,
704 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
705 {
706 	int ret;
707 	int index;
708 	int timeout = psp->adev->psp_timeout;
709 	bool ras_intr = false;
710 	bool skip_unsupport = false;
711 
712 	if (psp->adev->no_hw_access)
713 		return 0;
714 
715 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
716 
717 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
718 
719 	index = atomic_inc_return(&psp->fence_value);
720 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
721 	if (ret) {
722 		atomic_dec(&psp->fence_value);
723 		goto exit;
724 	}
725 
726 	amdgpu_device_invalidate_hdp(psp->adev, NULL);
727 	while (*((unsigned int *)psp->fence_buf) != index) {
728 		if (--timeout == 0)
729 			break;
730 		/*
731 		 * Shouldn't wait for timeout when err_event_athub occurs,
732 		 * because gpu reset thread triggered and lock resource should
733 		 * be released for psp resume sequence.
734 		 */
735 		ras_intr = amdgpu_ras_intr_triggered();
736 		if (ras_intr)
737 			break;
738 		usleep_range(10, 100);
739 		amdgpu_device_invalidate_hdp(psp->adev, NULL);
740 	}
741 
742 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
743 	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
744 		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
745 
746 	memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
747 
748 	/* In some cases, psp response status is not 0 even there is no
749 	 * problem while the command is submitted. Some version of PSP FW
750 	 * doesn't write 0 to that field.
751 	 * So here we would like to only print a warning instead of an error
752 	 * during psp initialization to avoid breaking hw_init and it doesn't
753 	 * return -EINVAL.
754 	 */
755 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
756 		if (ucode)
757 			dev_warn(psp->adev->dev,
758 				 "failed to load ucode %s(0x%X) ",
759 				 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
760 		if (psp_err_warn(psp))
761 			dev_warn(
762 				psp->adev->dev,
763 				"psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
764 				psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
765 				psp->cmd_buf_mem->cmd_id,
766 				psp->cmd_buf_mem->resp.status);
767 		/* If any firmware (including CAP) load fails under SRIOV, it should
768 		 * return failure to stop the VF from initializing.
769 		 * Also return failure in case of timeout
770 		 */
771 		if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
772 			ret = -EINVAL;
773 			goto exit;
774 		}
775 	}
776 
777 	if (ucode) {
778 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
779 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
780 	}
781 
782 exit:
783 	return ret;
784 }
785 
786 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
787 {
788 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
789 
790 	mutex_lock(&psp->mutex);
791 
792 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
793 
794 	return cmd;
795 }
796 
797 static void release_psp_cmd_buf(struct psp_context *psp)
798 {
799 	mutex_unlock(&psp->mutex);
800 }
801 
802 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
803 				 struct psp_gfx_cmd_resp *cmd,
804 				 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
805 {
806 	struct amdgpu_device *adev = psp->adev;
807 	uint32_t size = 0;
808 	uint64_t tmr_pa = 0;
809 
810 	if (tmr_bo) {
811 		size = amdgpu_bo_size(tmr_bo);
812 		tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
813 	}
814 
815 	if (amdgpu_sriov_vf(psp->adev))
816 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
817 	else
818 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
819 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
820 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
821 	cmd->cmd.cmd_setup_tmr.buf_size = size;
822 	cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
823 	cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
824 	cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
825 }
826 
827 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
828 				      uint64_t pri_buf_mc, uint32_t size)
829 {
830 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
831 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
832 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
833 	cmd->cmd.cmd_load_toc.toc_size = size;
834 }
835 
836 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
837 static int psp_load_toc(struct psp_context *psp,
838 			uint32_t *tmr_size)
839 {
840 	int ret;
841 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
842 
843 	/* Copy toc to psp firmware private buffer */
844 	psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
845 
846 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
847 
848 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
849 				 psp->fence_buf_mc_addr);
850 	if (!ret)
851 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
852 
853 	release_psp_cmd_buf(psp);
854 
855 	return ret;
856 }
857 
858 /* Set up Trusted Memory Region */
859 static int psp_tmr_init(struct psp_context *psp)
860 {
861 	int ret = 0;
862 	int tmr_size;
863 	void *tmr_buf;
864 	void **pptr;
865 
866 	/*
867 	 * According to HW engineer, they prefer the TMR address be "naturally
868 	 * aligned" , e.g. the start address be an integer divide of TMR size.
869 	 *
870 	 * Note: this memory need be reserved till the driver
871 	 * uninitializes.
872 	 */
873 	tmr_size = PSP_TMR_SIZE(psp->adev);
874 
875 	/* For ASICs support RLC autoload, psp will parse the toc
876 	 * and calculate the total size of TMR needed
877 	 */
878 	if (!amdgpu_sriov_vf(psp->adev) &&
879 	    psp->toc.start_addr &&
880 	    psp->toc.size_bytes &&
881 	    psp->fw_pri_buf) {
882 		ret = psp_load_toc(psp, &tmr_size);
883 		if (ret) {
884 			dev_err(psp->adev->dev, "Failed to load toc\n");
885 			return ret;
886 		}
887 	}
888 
889 	if (!psp->tmr_bo && !psp->boot_time_tmr) {
890 		pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
891 		ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
892 					      PSP_TMR_ALIGNMENT,
893 					      AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM,
894 					      &psp->tmr_bo, &psp->tmr_mc_addr,
895 					      pptr);
896 	}
897 	if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
898 		psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
899 
900 	return ret;
901 }
902 
903 static bool psp_skip_tmr(struct psp_context *psp)
904 {
905 	u32 ip_version = amdgpu_ip_version(psp->adev, MP0_HWIP, 0);
906 
907 	if (amdgpu_sriov_vf(psp->adev))
908 		return (ip_version >= IP_VERSION(11, 0, 7)) ? true : false;
909 	else
910 		return (!psp->boot_time_tmr || !psp->autoload_supported) ? false : true;
911 }
912 
913 static int psp_tmr_load(struct psp_context *psp)
914 {
915 	int ret;
916 	struct psp_gfx_cmd_resp *cmd;
917 
918 	if (psp_skip_tmr(psp))
919 		return 0;
920 
921 	cmd = acquire_psp_cmd_buf(psp);
922 
923 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
924 	if (psp->tmr_bo)
925 		dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
926 			 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
927 
928 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
929 				 psp->fence_buf_mc_addr);
930 
931 	release_psp_cmd_buf(psp);
932 
933 	return ret;
934 }
935 
936 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
937 					struct psp_gfx_cmd_resp *cmd)
938 {
939 	if (amdgpu_sriov_vf(psp->adev))
940 		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
941 	else
942 		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
943 }
944 
945 static int psp_tmr_unload(struct psp_context *psp)
946 {
947 	int ret;
948 	struct psp_gfx_cmd_resp *cmd;
949 
950 	if (psp_skip_tmr(psp))
951 		return 0;
952 
953 	cmd = acquire_psp_cmd_buf(psp);
954 
955 	psp_prep_tmr_unload_cmd_buf(psp, cmd);
956 	dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
957 
958 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
959 				 psp->fence_buf_mc_addr);
960 
961 	release_psp_cmd_buf(psp);
962 
963 	return ret;
964 }
965 
966 static int psp_tmr_terminate(struct psp_context *psp)
967 {
968 	return psp_tmr_unload(psp);
969 }
970 
971 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
972 					uint64_t *output_ptr)
973 {
974 	int ret;
975 	struct psp_gfx_cmd_resp *cmd;
976 
977 	if (!output_ptr)
978 		return -EINVAL;
979 
980 	if (amdgpu_sriov_vf(psp->adev))
981 		return 0;
982 
983 	cmd = acquire_psp_cmd_buf(psp);
984 
985 	cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
986 
987 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
988 				 psp->fence_buf_mc_addr);
989 
990 	if (!ret) {
991 		*output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
992 			      ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
993 	}
994 
995 	release_psp_cmd_buf(psp);
996 
997 	return ret;
998 }
999 
1000 static int psp_get_fw_reservation_info(struct psp_context *psp,
1001 						   uint32_t cmd_id,
1002 						   uint64_t *addr,
1003 						   uint32_t *size)
1004 {
1005 	int ret;
1006 	uint32_t status;
1007 	struct psp_gfx_cmd_resp *cmd;
1008 
1009 	cmd = acquire_psp_cmd_buf(psp);
1010 
1011 	cmd->cmd_id = cmd_id;
1012 
1013 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1014 				 psp->fence_buf_mc_addr);
1015 	if (ret) {
1016 		release_psp_cmd_buf(psp);
1017 		return ret;
1018 	}
1019 
1020 	status = cmd->resp.status;
1021 	if (status == PSP_ERR_UNKNOWN_COMMAND) {
1022 		release_psp_cmd_buf(psp);
1023 		*addr = 0;
1024 		*size = 0;
1025 		return 0;
1026 	}
1027 
1028 	*addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 |
1029 		cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo;
1030 	*size = cmd->resp.uresp.fw_reserve_info.reserve_size;
1031 
1032 	release_psp_cmd_buf(psp);
1033 
1034 	return 0;
1035 }
1036 
1037 int psp_update_fw_reservation(struct psp_context *psp)
1038 {
1039 	int ret;
1040 	uint64_t reserv_addr, reserv_addr_ext;
1041 	uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
1042 	struct amdgpu_device *adev = psp->adev;
1043 
1044 	mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
1045 
1046 	if (amdgpu_sriov_vf(psp->adev))
1047 		return 0;
1048 
1049 	switch (mp0_ip_ver) {
1050 	case IP_VERSION(14, 0, 2):
1051 		if (adev->psp.sos.fw_version < 0x3b0e0d)
1052 			return 0;
1053 		break;
1054 
1055 	case IP_VERSION(14, 0, 3):
1056 		if (adev->psp.sos.fw_version < 0x3a0e14)
1057 			return 0;
1058 		break;
1059 
1060 	default:
1061 		return 0;
1062 	}
1063 
1064 	ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
1065 	if (ret)
1066 		return ret;
1067 	ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
1068 	if (ret)
1069 		return ret;
1070 
1071 	if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
1072 		dev_warn(adev->dev, "reserve fw region is not valid!\n");
1073 		return 0;
1074 	}
1075 
1076 	amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
1077 
1078 	reserv_size = roundup(reserv_size, SZ_1M);
1079 
1080 	ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL);
1081 	if (ret) {
1082 		dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret);
1083 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
1084 		return ret;
1085 	}
1086 
1087 	reserv_size_ext = roundup(reserv_size_ext, SZ_1M);
1088 
1089 	ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext,
1090 					 &adev->mman.fw_reserved_memory_extend, NULL);
1091 	if (ret) {
1092 		dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret);
1093 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL);
1094 		return ret;
1095 	}
1096 
1097 	return 0;
1098 }
1099 
1100 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
1101 {
1102 	struct psp_context *psp = &adev->psp;
1103 	struct psp_gfx_cmd_resp *cmd;
1104 	int ret;
1105 
1106 	if (amdgpu_sriov_vf(adev))
1107 		return 0;
1108 
1109 	cmd = acquire_psp_cmd_buf(psp);
1110 
1111 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1112 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
1113 
1114 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1115 	if (!ret) {
1116 		*boot_cfg =
1117 			(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
1118 	}
1119 
1120 	release_psp_cmd_buf(psp);
1121 
1122 	return ret;
1123 }
1124 
1125 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
1126 {
1127 	int ret;
1128 	struct psp_context *psp = &adev->psp;
1129 	struct psp_gfx_cmd_resp *cmd;
1130 
1131 	if (amdgpu_sriov_vf(adev))
1132 		return 0;
1133 
1134 	cmd = acquire_psp_cmd_buf(psp);
1135 
1136 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1137 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1138 	cmd->cmd.boot_cfg.boot_config = boot_cfg;
1139 	cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1140 
1141 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1142 
1143 	release_psp_cmd_buf(psp);
1144 
1145 	return ret;
1146 }
1147 
1148 static int psp_rl_load(struct amdgpu_device *adev)
1149 {
1150 	int ret;
1151 	struct psp_context *psp = &adev->psp;
1152 	struct psp_gfx_cmd_resp *cmd;
1153 
1154 	if (!is_psp_fw_valid(psp->rl))
1155 		return 0;
1156 
1157 	cmd = acquire_psp_cmd_buf(psp);
1158 
1159 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1160 	memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1161 
1162 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1163 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1164 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1165 	cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1166 	cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1167 
1168 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1169 
1170 	release_psp_cmd_buf(psp);
1171 
1172 	return ret;
1173 }
1174 
1175 int psp_memory_partition(struct psp_context *psp, int mode)
1176 {
1177 	struct psp_gfx_cmd_resp *cmd;
1178 	int ret;
1179 
1180 	if (amdgpu_sriov_vf(psp->adev))
1181 		return 0;
1182 
1183 	cmd = acquire_psp_cmd_buf(psp);
1184 
1185 	cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
1186 	cmd->cmd.cmd_memory_part.mode = mode;
1187 
1188 	dev_info(psp->adev->dev,
1189 		 "Requesting %d memory partition change through PSP", mode);
1190 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1191 	if (ret)
1192 		dev_err(psp->adev->dev,
1193 			"PSP request failed to change to NPS%d mode\n", mode);
1194 
1195 	release_psp_cmd_buf(psp);
1196 
1197 	return ret;
1198 }
1199 
1200 int psp_spatial_partition(struct psp_context *psp, int mode)
1201 {
1202 	struct psp_gfx_cmd_resp *cmd;
1203 	int ret;
1204 
1205 	if (amdgpu_sriov_vf(psp->adev))
1206 		return 0;
1207 
1208 	cmd = acquire_psp_cmd_buf(psp);
1209 
1210 	cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1211 	cmd->cmd.cmd_spatial_part.mode = mode;
1212 
1213 	dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1214 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1215 
1216 	release_psp_cmd_buf(psp);
1217 
1218 	return ret;
1219 }
1220 
1221 static int psp_asd_initialize(struct psp_context *psp)
1222 {
1223 	int ret;
1224 
1225 	/* If PSP version doesn't match ASD version, asd loading will be failed.
1226 	 * add workaround to bypass it for sriov now.
1227 	 * TODO: add version check to make it common
1228 	 */
1229 	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1230 		return 0;
1231 
1232 	/* bypass asd if display hardware is not available */
1233 	if (!amdgpu_device_has_display_hardware(psp->adev) &&
1234 	    amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1235 		return 0;
1236 
1237 	psp->asd_context.mem_context.shared_mc_addr  = 0;
1238 	psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1239 	psp->asd_context.ta_load_type                = GFX_CMD_ID_LOAD_ASD;
1240 
1241 	ret = psp_ta_load(psp, &psp->asd_context);
1242 	if (!ret)
1243 		psp->asd_context.initialized = true;
1244 
1245 	return ret;
1246 }
1247 
1248 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1249 				       uint32_t session_id)
1250 {
1251 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1252 	cmd->cmd.cmd_unload_ta.session_id = session_id;
1253 }
1254 
1255 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1256 {
1257 	int ret;
1258 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1259 
1260 	psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1261 
1262 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1263 
1264 	context->resp_status = cmd->resp.status;
1265 
1266 	release_psp_cmd_buf(psp);
1267 
1268 	return ret;
1269 }
1270 
1271 static int psp_asd_terminate(struct psp_context *psp)
1272 {
1273 	int ret;
1274 
1275 	if (amdgpu_sriov_vf(psp->adev))
1276 		return 0;
1277 
1278 	if (!psp->asd_context.initialized)
1279 		return 0;
1280 
1281 	ret = psp_ta_unload(psp, &psp->asd_context);
1282 	if (!ret)
1283 		psp->asd_context.initialized = false;
1284 
1285 	return ret;
1286 }
1287 
1288 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1289 		uint32_t id, uint32_t value)
1290 {
1291 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1292 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1293 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1294 }
1295 
1296 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1297 		uint32_t value)
1298 {
1299 	struct psp_gfx_cmd_resp *cmd;
1300 	int ret = 0;
1301 
1302 	if (reg >= PSP_REG_LAST)
1303 		return -EINVAL;
1304 
1305 	cmd = acquire_psp_cmd_buf(psp);
1306 
1307 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1308 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1309 	if (ret)
1310 		dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1311 
1312 	release_psp_cmd_buf(psp);
1313 
1314 	return ret;
1315 }
1316 
1317 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1318 				     uint64_t ta_bin_mc,
1319 				     struct ta_context *context)
1320 {
1321 	cmd->cmd_id				= context->ta_load_type;
1322 	cmd->cmd.cmd_load_ta.app_phy_addr_lo	= lower_32_bits(ta_bin_mc);
1323 	cmd->cmd.cmd_load_ta.app_phy_addr_hi	= upper_32_bits(ta_bin_mc);
1324 	cmd->cmd.cmd_load_ta.app_len		= context->bin_desc.size_bytes;
1325 
1326 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1327 		lower_32_bits(context->mem_context.shared_mc_addr);
1328 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1329 		upper_32_bits(context->mem_context.shared_mc_addr);
1330 	cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1331 }
1332 
1333 int psp_ta_init_shared_buf(struct psp_context *psp,
1334 				  struct ta_mem_context *mem_ctx)
1335 {
1336 	/*
1337 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1338 	 * physical) for ta to host memory
1339 	 */
1340 	return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1341 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1342 				      AMDGPU_GEM_DOMAIN_GTT,
1343 				      &mem_ctx->shared_bo,
1344 				      &mem_ctx->shared_mc_addr,
1345 				      &mem_ctx->shared_buf);
1346 }
1347 
1348 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1349 				       uint32_t ta_cmd_id,
1350 				       uint32_t session_id)
1351 {
1352 	cmd->cmd_id				= GFX_CMD_ID_INVOKE_CMD;
1353 	cmd->cmd.cmd_invoke_cmd.session_id	= session_id;
1354 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id	= ta_cmd_id;
1355 }
1356 
1357 int psp_ta_invoke(struct psp_context *psp,
1358 		  uint32_t ta_cmd_id,
1359 		  struct ta_context *context)
1360 {
1361 	int ret;
1362 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1363 
1364 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1365 
1366 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1367 				 psp->fence_buf_mc_addr);
1368 
1369 	context->resp_status = cmd->resp.status;
1370 
1371 	release_psp_cmd_buf(psp);
1372 
1373 	return ret;
1374 }
1375 
1376 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1377 {
1378 	int ret;
1379 	struct psp_gfx_cmd_resp *cmd;
1380 
1381 	cmd = acquire_psp_cmd_buf(psp);
1382 
1383 	psp_copy_fw(psp, context->bin_desc.start_addr,
1384 		    context->bin_desc.size_bytes);
1385 
1386 	if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
1387 		context->mem_context.shared_bo)
1388 		context->mem_context.shared_mc_addr =
1389 			amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
1390 
1391 	psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1392 
1393 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1394 				 psp->fence_buf_mc_addr);
1395 
1396 	context->resp_status = cmd->resp.status;
1397 
1398 	if (!ret)
1399 		context->session_id = cmd->resp.session_id;
1400 
1401 	release_psp_cmd_buf(psp);
1402 
1403 	return ret;
1404 }
1405 
1406 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1407 {
1408 	return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1409 }
1410 
1411 int psp_xgmi_terminate(struct psp_context *psp)
1412 {
1413 	int ret;
1414 	struct amdgpu_device *adev = psp->adev;
1415 
1416 	/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1417 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1418 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1419 	     adev->gmc.xgmi.connected_to_cpu))
1420 		return 0;
1421 
1422 	if (!psp->xgmi_context.context.initialized)
1423 		return 0;
1424 
1425 	ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1426 
1427 	psp->xgmi_context.context.initialized = false;
1428 
1429 	return ret;
1430 }
1431 
1432 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1433 {
1434 	struct ta_xgmi_shared_memory *xgmi_cmd;
1435 	int ret;
1436 
1437 	if (!psp->ta_fw ||
1438 	    !psp->xgmi_context.context.bin_desc.size_bytes ||
1439 	    !psp->xgmi_context.context.bin_desc.start_addr)
1440 		return -ENOENT;
1441 
1442 	if (!load_ta)
1443 		goto invoke;
1444 
1445 	psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1446 	psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1447 
1448 	if (!psp->xgmi_context.context.mem_context.shared_buf) {
1449 		ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1450 		if (ret)
1451 			return ret;
1452 	}
1453 
1454 	/* Load XGMI TA */
1455 	ret = psp_ta_load(psp, &psp->xgmi_context.context);
1456 	if (!ret)
1457 		psp->xgmi_context.context.initialized = true;
1458 	else
1459 		return ret;
1460 
1461 invoke:
1462 	/* Initialize XGMI session */
1463 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1464 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1465 	xgmi_cmd->flag_extend_link_record = set_extended_data;
1466 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1467 
1468 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1469 	/* note down the capbility flag for XGMI TA */
1470 	psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1471 
1472 	return ret;
1473 }
1474 
1475 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1476 {
1477 	struct ta_xgmi_shared_memory *xgmi_cmd;
1478 	int ret;
1479 
1480 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1481 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1482 
1483 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1484 
1485 	/* Invoke xgmi ta to get hive id */
1486 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1487 	if (ret)
1488 		return ret;
1489 
1490 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1491 
1492 	return 0;
1493 }
1494 
1495 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1496 {
1497 	struct ta_xgmi_shared_memory *xgmi_cmd;
1498 	int ret;
1499 
1500 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1501 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1502 
1503 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1504 
1505 	/* Invoke xgmi ta to get the node id */
1506 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1507 	if (ret)
1508 		return ret;
1509 
1510 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1511 
1512 	return 0;
1513 }
1514 
1515 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1516 {
1517 	return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1518 			IP_VERSION(13, 0, 2) &&
1519 		psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1520 	       amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1521 		       IP_VERSION(13, 0, 6);
1522 }
1523 
1524 /*
1525  * Chips that support extended topology information require the driver to
1526  * reflect topology information in the opposite direction.  This is
1527  * because the TA has already exceeded its link record limit and if the
1528  * TA holds bi-directional information, the driver would have to do
1529  * multiple fetches instead of just two.
1530  */
1531 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1532 					struct psp_xgmi_node_info node_info)
1533 {
1534 	struct amdgpu_device *mirror_adev;
1535 	struct amdgpu_hive_info *hive;
1536 	uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1537 	uint64_t dst_node_id = node_info.node_id;
1538 	uint8_t dst_num_hops = node_info.num_hops;
1539 	uint8_t dst_is_sharing_enabled = node_info.is_sharing_enabled;
1540 	uint8_t dst_num_links = node_info.num_links;
1541 
1542 	hive = amdgpu_get_xgmi_hive(psp->adev);
1543 	if (WARN_ON(!hive))
1544 		return;
1545 
1546 	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1547 		struct psp_xgmi_topology_info *mirror_top_info;
1548 		int j;
1549 
1550 		if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1551 			continue;
1552 
1553 		mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1554 		for (j = 0; j < mirror_top_info->num_nodes; j++) {
1555 			if (mirror_top_info->nodes[j].node_id != src_node_id)
1556 				continue;
1557 
1558 			mirror_top_info->nodes[j].num_hops = dst_num_hops;
1559 			mirror_top_info->nodes[j].is_sharing_enabled = dst_is_sharing_enabled;
1560 			/* prevent 0 num_links value re-reflection since reflection
1561 			 * criteria is based on num_hops (direct or indirect).
1562 			 */
1563 			if (dst_num_links) {
1564 				mirror_top_info->nodes[j].num_links = dst_num_links;
1565 				/* swap src and dst due to frame of reference */
1566 				for (int k = 0; k < dst_num_links; k++) {
1567 					mirror_top_info->nodes[j].port_num[k].src_xgmi_port_num =
1568 						node_info.port_num[k].dst_xgmi_port_num;
1569 					mirror_top_info->nodes[j].port_num[k].dst_xgmi_port_num =
1570 						node_info.port_num[k].src_xgmi_port_num;
1571 				}
1572 			}
1573 
1574 			break;
1575 		}
1576 
1577 		break;
1578 	}
1579 
1580 	amdgpu_put_xgmi_hive(hive);
1581 }
1582 
1583 int psp_xgmi_get_topology_info(struct psp_context *psp,
1584 			       int number_devices,
1585 			       struct psp_xgmi_topology_info *topology,
1586 			       bool get_extended_data)
1587 {
1588 	struct ta_xgmi_shared_memory *xgmi_cmd;
1589 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1590 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1591 	int i;
1592 	int ret;
1593 
1594 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1595 		return -EINVAL;
1596 
1597 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1598 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1599 	xgmi_cmd->flag_extend_link_record = get_extended_data;
1600 
1601 	/* Fill in the shared memory with topology information as input */
1602 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1603 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1604 	topology_info_input->num_nodes = number_devices;
1605 
1606 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1607 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1608 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1609 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1610 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1611 	}
1612 
1613 	/* Invoke xgmi ta to get the topology information */
1614 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1615 	if (ret)
1616 		return ret;
1617 
1618 	/* Read the output topology information from the shared memory */
1619 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1620 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1621 	for (i = 0; i < topology->num_nodes; i++) {
1622 		/* extended data will either be 0 or equal to non-extended data */
1623 		if (topology_info_output->nodes[i].num_hops)
1624 			topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1625 
1626 		/* non-extended data gets everything here so no need to update */
1627 		if (!get_extended_data) {
1628 			topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1629 			topology->nodes[i].is_sharing_enabled =
1630 					topology_info_output->nodes[i].is_sharing_enabled;
1631 			topology->nodes[i].sdma_engine =
1632 					topology_info_output->nodes[i].sdma_engine;
1633 		}
1634 
1635 	}
1636 
1637 	/* Invoke xgmi ta again to get the link information */
1638 	if (psp_xgmi_peer_link_info_supported(psp)) {
1639 		struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1640 		struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1641 		bool requires_reflection =
1642 			(psp->xgmi_context.supports_extended_data &&
1643 			 get_extended_data) ||
1644 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1645 				IP_VERSION(13, 0, 6) ||
1646 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1647 				IP_VERSION(13, 0, 14) ||
1648 			amdgpu_sriov_vf(psp->adev);
1649 		bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG ||
1650 			amdgpu_sriov_xgmi_ta_ext_peer_link_en(psp->adev);
1651 
1652 		/* popluate the shared output buffer rather than the cmd input buffer
1653 		 * with node_ids as the input for GET_PEER_LINKS command execution.
1654 		 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1655 		 * The same requirement for GET_EXTEND_PEER_LINKS command.
1656 		 */
1657 		if (ta_port_num_support) {
1658 			link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1659 
1660 			for (i = 0; i < topology->num_nodes; i++)
1661 				link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1662 
1663 			link_extend_info_output->num_nodes = topology->num_nodes;
1664 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1665 		} else {
1666 			link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1667 
1668 			for (i = 0; i < topology->num_nodes; i++)
1669 				link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1670 
1671 			link_info_output->num_nodes = topology->num_nodes;
1672 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1673 		}
1674 
1675 		ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1676 		if (ret)
1677 			return ret;
1678 
1679 		for (i = 0; i < topology->num_nodes; i++) {
1680 			uint8_t node_num_links = ta_port_num_support ?
1681 				link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1682 			/* accumulate num_links on extended data */
1683 			if (get_extended_data) {
1684 				topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1685 			} else {
1686 				topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1687 								topology->nodes[i].num_links : node_num_links;
1688 			}
1689 			/* popluate the connected port num info if supported and available */
1690 			if (ta_port_num_support && topology->nodes[i].num_links) {
1691 				memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1692 				       sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1693 			}
1694 
1695 			/* reflect the topology information for bi-directionality */
1696 			if (requires_reflection && topology->nodes[i].num_hops)
1697 				psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1698 		}
1699 	}
1700 
1701 	return 0;
1702 }
1703 
1704 int psp_xgmi_set_topology_info(struct psp_context *psp,
1705 			       int number_devices,
1706 			       struct psp_xgmi_topology_info *topology)
1707 {
1708 	struct ta_xgmi_shared_memory *xgmi_cmd;
1709 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1710 	int i;
1711 
1712 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1713 		return -EINVAL;
1714 
1715 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1716 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1717 
1718 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1719 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1720 	topology_info_input->num_nodes = number_devices;
1721 
1722 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1723 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1724 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1725 		topology_info_input->nodes[i].is_sharing_enabled = 1;
1726 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1727 	}
1728 
1729 	/* Invoke xgmi ta to set topology information */
1730 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1731 }
1732 
1733 // ras begin
1734 static void psp_ras_ta_check_status(struct psp_context *psp)
1735 {
1736 	struct ta_ras_shared_memory *ras_cmd =
1737 		(struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1738 
1739 	switch (ras_cmd->ras_status) {
1740 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1741 		dev_warn(psp->adev->dev,
1742 			 "RAS WARNING: cmd failed due to unsupported ip\n");
1743 		break;
1744 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1745 		dev_warn(psp->adev->dev,
1746 			 "RAS WARNING: cmd failed due to unsupported error injection\n");
1747 		break;
1748 	case TA_RAS_STATUS__SUCCESS:
1749 		break;
1750 	case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1751 		if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1752 			dev_warn(psp->adev->dev,
1753 				 "RAS WARNING: Inject error to critical region is not allowed\n");
1754 		break;
1755 	default:
1756 		dev_warn(psp->adev->dev,
1757 			 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1758 		break;
1759 	}
1760 }
1761 
1762 static int psp_ras_send_cmd(struct psp_context *psp,
1763 		enum ras_command cmd_id, void *in, void *out)
1764 {
1765 	struct ta_ras_shared_memory *ras_cmd;
1766 	uint32_t cmd = cmd_id;
1767 	int ret = 0;
1768 
1769 	if (!in)
1770 		return -EINVAL;
1771 
1772 	mutex_lock(&psp->ras_context.mutex);
1773 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1774 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1775 
1776 	switch (cmd) {
1777 	case TA_RAS_COMMAND__ENABLE_FEATURES:
1778 	case TA_RAS_COMMAND__DISABLE_FEATURES:
1779 		memcpy(&ras_cmd->ras_in_message,
1780 			in, sizeof(ras_cmd->ras_in_message));
1781 		break;
1782 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1783 		memcpy(&ras_cmd->ras_in_message.trigger_error,
1784 			in, sizeof(ras_cmd->ras_in_message.trigger_error));
1785 		break;
1786 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1787 		memcpy(&ras_cmd->ras_in_message.address,
1788 			in, sizeof(ras_cmd->ras_in_message.address));
1789 		break;
1790 	default:
1791 		dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1792 		ret = -EINVAL;
1793 		goto err_out;
1794 	}
1795 
1796 	ras_cmd->cmd_id = cmd;
1797 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1798 
1799 	switch (cmd) {
1800 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1801 		if (!ret && out)
1802 			memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1803 		break;
1804 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1805 		if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1806 			ret = -EINVAL;
1807 		else if (out)
1808 			memcpy(out,
1809 				&ras_cmd->ras_out_message.address,
1810 				sizeof(ras_cmd->ras_out_message.address));
1811 		break;
1812 	default:
1813 		break;
1814 	}
1815 
1816 err_out:
1817 	mutex_unlock(&psp->ras_context.mutex);
1818 
1819 	return ret;
1820 }
1821 
1822 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1823 {
1824 	struct ta_ras_shared_memory *ras_cmd;
1825 	int ret;
1826 
1827 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1828 
1829 	/*
1830 	 * TODO: bypass the loading in sriov for now
1831 	 */
1832 	if (amdgpu_sriov_vf(psp->adev))
1833 		return 0;
1834 
1835 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1836 
1837 	if (amdgpu_ras_intr_triggered())
1838 		return ret;
1839 
1840 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1841 		dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1842 		return -EINVAL;
1843 	}
1844 
1845 	if (!ret) {
1846 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1847 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1848 
1849 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1850 		} else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1851 			dev_warn(psp->adev->dev,
1852 				 "RAS internal register access blocked\n");
1853 
1854 		psp_ras_ta_check_status(psp);
1855 	}
1856 
1857 	return ret;
1858 }
1859 
1860 int psp_ras_enable_features(struct psp_context *psp,
1861 		union ta_ras_cmd_input *info, bool enable)
1862 {
1863 	enum ras_command cmd_id;
1864 	int ret;
1865 
1866 	if (!psp->ras_context.context.initialized || !info)
1867 		return -EINVAL;
1868 
1869 	cmd_id = enable ?
1870 		TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1871 	ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
1872 	if (ret)
1873 		return -EINVAL;
1874 
1875 	return 0;
1876 }
1877 
1878 int psp_ras_terminate(struct psp_context *psp)
1879 {
1880 	int ret;
1881 
1882 	/*
1883 	 * TODO: bypass the terminate in sriov for now
1884 	 */
1885 	if (amdgpu_sriov_vf(psp->adev))
1886 		return 0;
1887 
1888 	if (!psp->ras_context.context.initialized)
1889 		return 0;
1890 
1891 	ret = psp_ta_unload(psp, &psp->ras_context.context);
1892 
1893 	psp->ras_context.context.initialized = false;
1894 
1895 	mutex_destroy(&psp->ras_context.mutex);
1896 
1897 	return ret;
1898 }
1899 
1900 int psp_ras_initialize(struct psp_context *psp)
1901 {
1902 	int ret;
1903 	uint32_t boot_cfg = 0xFF;
1904 	struct amdgpu_device *adev = psp->adev;
1905 	struct ta_ras_shared_memory *ras_cmd;
1906 
1907 	/*
1908 	 * TODO: bypass the initialize in sriov for now
1909 	 */
1910 	if (amdgpu_sriov_vf(adev))
1911 		return 0;
1912 
1913 	if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1914 	    !adev->psp.ras_context.context.bin_desc.start_addr) {
1915 		dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1916 		return 0;
1917 	}
1918 
1919 	if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1920 		/* query GECC enablement status from boot config
1921 		 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1922 		 */
1923 		ret = psp_boot_config_get(adev, &boot_cfg);
1924 		if (ret)
1925 			dev_warn(adev->dev, "PSP get boot config failed\n");
1926 
1927 		if (boot_cfg == 1 && !adev->ras_default_ecc_enabled &&
1928 		    amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1929 			dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n");
1930 			dev_warn(adev->dev,
1931 				"To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n");
1932 		} else {
1933 			if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) &&
1934 				amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1935 				if (boot_cfg == 1) {
1936 					dev_info(adev->dev, "GECC is enabled\n");
1937 				} else {
1938 					/* enable GECC in next boot cycle if it is disabled
1939 					 * in boot config, or force enable GECC if failed to
1940 					 * get boot configuration
1941 					 */
1942 					ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1943 					if (ret)
1944 						dev_warn(adev->dev, "PSP set boot config failed\n");
1945 					else
1946 						dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1947 				}
1948 			} else {
1949 				if (!boot_cfg) {
1950 					if (!adev->ras_default_ecc_enabled &&
1951 					    amdgpu_ras_enable != 1 &&
1952 					    amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
1953 						dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n");
1954 					else
1955 						dev_info(adev->dev, "GECC is disabled\n");
1956 				} else {
1957 					/* disable GECC in next boot cycle if ras is
1958 					 * disabled by module parameter amdgpu_ras_enable
1959 					 * and/or amdgpu_ras_mask, or boot_config_get call
1960 					 * is failed
1961 					 */
1962 					ret = psp_boot_config_set(adev, 0);
1963 					if (ret)
1964 						dev_warn(adev->dev, "PSP set boot config failed\n");
1965 					else
1966 						dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1967 				}
1968 			}
1969 		}
1970 	}
1971 
1972 	psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1973 	psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1974 
1975 	if (!psp->ras_context.context.mem_context.shared_buf) {
1976 		ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1977 		if (ret)
1978 			return ret;
1979 	}
1980 
1981 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1982 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1983 
1984 	if (amdgpu_ras_is_poison_mode_supported(adev))
1985 		ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1986 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1987 		ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1988 	ras_cmd->ras_in_message.init_flags.xcc_mask =
1989 		adev->gfx.xcc_mask;
1990 	ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1991 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
1992 		ras_cmd->ras_in_message.init_flags.nps_mode =
1993 			adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1994 	ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask;
1995 	ras_cmd->ras_in_message.init_flags.vram_type = (uint8_t)adev->gmc.vram_type;
1996 
1997 	ret = psp_ta_load(psp, &psp->ras_context.context);
1998 
1999 	if (!ret && !ras_cmd->ras_status) {
2000 		psp->ras_context.context.initialized = true;
2001 		mutex_init(&psp->ras_context.mutex);
2002 	} else {
2003 		if (ras_cmd->ras_status)
2004 			dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
2005 
2006 		/* fail to load RAS TA */
2007 		psp->ras_context.context.initialized = false;
2008 	}
2009 
2010 	return ret;
2011 }
2012 
2013 int psp_ras_trigger_error(struct psp_context *psp,
2014 			  struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
2015 {
2016 	struct amdgpu_device *adev = psp->adev;
2017 	int ret;
2018 	uint32_t dev_mask;
2019 	uint32_t ras_status = 0;
2020 
2021 	if (!psp->ras_context.context.initialized || !info)
2022 		return -EINVAL;
2023 
2024 	switch (info->block_id) {
2025 	case TA_RAS_BLOCK__GFX:
2026 		dev_mask = GET_MASK(GC, instance_mask);
2027 		break;
2028 	case TA_RAS_BLOCK__SDMA:
2029 		dev_mask = GET_MASK(SDMA0, instance_mask);
2030 		break;
2031 	case TA_RAS_BLOCK__VCN:
2032 	case TA_RAS_BLOCK__JPEG:
2033 		dev_mask = GET_MASK(VCN, instance_mask);
2034 		break;
2035 	default:
2036 		dev_mask = instance_mask;
2037 		break;
2038 	}
2039 
2040 	/* reuse sub_block_index for backward compatibility */
2041 	dev_mask <<= AMDGPU_RAS_INST_SHIFT;
2042 	dev_mask &= AMDGPU_RAS_INST_MASK;
2043 	info->sub_block_index |= dev_mask;
2044 
2045 	ret = psp_ras_send_cmd(psp,
2046 			TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
2047 	if (ret)
2048 		return -EINVAL;
2049 
2050 	/* If err_event_athub occurs error inject was successful, however
2051 	 *  return status from TA is no long reliable
2052 	 */
2053 	if (amdgpu_ras_intr_triggered())
2054 		return 0;
2055 
2056 	if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
2057 		return -EACCES;
2058 	else if (ras_status)
2059 		return -EINVAL;
2060 
2061 	return 0;
2062 }
2063 
2064 int psp_ras_query_address(struct psp_context *psp,
2065 			  struct ta_ras_query_address_input *addr_in,
2066 			  struct ta_ras_query_address_output *addr_out)
2067 {
2068 	int ret;
2069 
2070 	if (!psp->ras_context.context.initialized ||
2071 		!addr_in || !addr_out)
2072 		return -EINVAL;
2073 
2074 	ret = psp_ras_send_cmd(psp,
2075 			TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
2076 
2077 	return ret;
2078 }
2079 // ras end
2080 
2081 // HDCP start
2082 static int psp_hdcp_initialize(struct psp_context *psp)
2083 {
2084 	int ret;
2085 
2086 	/*
2087 	 * TODO: bypass the initialize in sriov for now
2088 	 */
2089 	if (amdgpu_sriov_vf(psp->adev))
2090 		return 0;
2091 
2092 	/* bypass hdcp initialization if dmu is harvested */
2093 	if (!amdgpu_device_has_display_hardware(psp->adev))
2094 		return 0;
2095 
2096 	if (!psp->hdcp_context.context.bin_desc.size_bytes ||
2097 	    !psp->hdcp_context.context.bin_desc.start_addr) {
2098 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
2099 		return 0;
2100 	}
2101 
2102 	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
2103 	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2104 
2105 	if (!psp->hdcp_context.context.mem_context.shared_buf) {
2106 		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
2107 		if (ret)
2108 			return ret;
2109 	}
2110 
2111 	ret = psp_ta_load(psp, &psp->hdcp_context.context);
2112 	if (!ret) {
2113 		psp->hdcp_context.context.initialized = true;
2114 		mutex_init(&psp->hdcp_context.mutex);
2115 	}
2116 
2117 	return ret;
2118 }
2119 
2120 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2121 {
2122 	/*
2123 	 * TODO: bypass the loading in sriov for now
2124 	 */
2125 	if (amdgpu_sriov_vf(psp->adev))
2126 		return 0;
2127 
2128 	if (!psp->hdcp_context.context.initialized)
2129 		return 0;
2130 
2131 	return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
2132 }
2133 
2134 static int psp_hdcp_terminate(struct psp_context *psp)
2135 {
2136 	int ret;
2137 
2138 	/*
2139 	 * TODO: bypass the terminate in sriov for now
2140 	 */
2141 	if (amdgpu_sriov_vf(psp->adev))
2142 		return 0;
2143 
2144 	if (!psp->hdcp_context.context.initialized)
2145 		return 0;
2146 
2147 	ret = psp_ta_unload(psp, &psp->hdcp_context.context);
2148 
2149 	psp->hdcp_context.context.initialized = false;
2150 
2151 	return ret;
2152 }
2153 // HDCP end
2154 
2155 // DTM start
2156 static int psp_dtm_initialize(struct psp_context *psp)
2157 {
2158 	int ret;
2159 
2160 	/*
2161 	 * TODO: bypass the initialize in sriov for now
2162 	 */
2163 	if (amdgpu_sriov_vf(psp->adev))
2164 		return 0;
2165 
2166 	/* bypass dtm initialization if dmu is harvested */
2167 	if (!amdgpu_device_has_display_hardware(psp->adev))
2168 		return 0;
2169 
2170 	if (!psp->dtm_context.context.bin_desc.size_bytes ||
2171 	    !psp->dtm_context.context.bin_desc.start_addr) {
2172 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
2173 		return 0;
2174 	}
2175 
2176 	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
2177 	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2178 
2179 	if (!psp->dtm_context.context.mem_context.shared_buf) {
2180 		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
2181 		if (ret)
2182 			return ret;
2183 	}
2184 
2185 	ret = psp_ta_load(psp, &psp->dtm_context.context);
2186 	if (!ret) {
2187 		psp->dtm_context.context.initialized = true;
2188 		mutex_init(&psp->dtm_context.mutex);
2189 	}
2190 
2191 	return ret;
2192 }
2193 
2194 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2195 {
2196 	/*
2197 	 * TODO: bypass the loading in sriov for now
2198 	 */
2199 	if (amdgpu_sriov_vf(psp->adev))
2200 		return 0;
2201 
2202 	if (!psp->dtm_context.context.initialized)
2203 		return 0;
2204 
2205 	return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
2206 }
2207 
2208 static int psp_dtm_terminate(struct psp_context *psp)
2209 {
2210 	int ret;
2211 
2212 	/*
2213 	 * TODO: bypass the terminate in sriov for now
2214 	 */
2215 	if (amdgpu_sriov_vf(psp->adev))
2216 		return 0;
2217 
2218 	if (!psp->dtm_context.context.initialized)
2219 		return 0;
2220 
2221 	ret = psp_ta_unload(psp, &psp->dtm_context.context);
2222 
2223 	psp->dtm_context.context.initialized = false;
2224 
2225 	return ret;
2226 }
2227 // DTM end
2228 
2229 // RAP start
2230 static int psp_rap_initialize(struct psp_context *psp)
2231 {
2232 	int ret;
2233 	enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2234 
2235 	/*
2236 	 * TODO: bypass the initialize in sriov for now
2237 	 */
2238 	if (amdgpu_sriov_vf(psp->adev))
2239 		return 0;
2240 
2241 	if (!psp->rap_context.context.bin_desc.size_bytes ||
2242 	    !psp->rap_context.context.bin_desc.start_addr) {
2243 		dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2244 		return 0;
2245 	}
2246 
2247 	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2248 	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2249 
2250 	if (!psp->rap_context.context.mem_context.shared_buf) {
2251 		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2252 		if (ret)
2253 			return ret;
2254 	}
2255 
2256 	ret = psp_ta_load(psp, &psp->rap_context.context);
2257 	if (!ret) {
2258 		psp->rap_context.context.initialized = true;
2259 		mutex_init(&psp->rap_context.mutex);
2260 	} else
2261 		return ret;
2262 
2263 	ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2264 	if (ret || status != TA_RAP_STATUS__SUCCESS) {
2265 		psp_rap_terminate(psp);
2266 		/* free rap shared memory */
2267 		psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2268 
2269 		dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2270 			 ret, status);
2271 
2272 		return ret;
2273 	}
2274 
2275 	return 0;
2276 }
2277 
2278 static int psp_rap_terminate(struct psp_context *psp)
2279 {
2280 	int ret;
2281 
2282 	if (!psp->rap_context.context.initialized)
2283 		return 0;
2284 
2285 	ret = psp_ta_unload(psp, &psp->rap_context.context);
2286 
2287 	psp->rap_context.context.initialized = false;
2288 
2289 	return ret;
2290 }
2291 
2292 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2293 {
2294 	struct ta_rap_shared_memory *rap_cmd;
2295 	int ret = 0;
2296 
2297 	if (!psp->rap_context.context.initialized)
2298 		return 0;
2299 
2300 	if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2301 	    ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2302 		return -EINVAL;
2303 
2304 	mutex_lock(&psp->rap_context.mutex);
2305 
2306 	rap_cmd = (struct ta_rap_shared_memory *)
2307 		  psp->rap_context.context.mem_context.shared_buf;
2308 	memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2309 
2310 	rap_cmd->cmd_id = ta_cmd_id;
2311 	rap_cmd->validation_method_id = METHOD_A;
2312 
2313 	ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2314 	if (ret)
2315 		goto out_unlock;
2316 
2317 	if (status)
2318 		*status = rap_cmd->rap_status;
2319 
2320 out_unlock:
2321 	mutex_unlock(&psp->rap_context.mutex);
2322 
2323 	return ret;
2324 }
2325 // RAP end
2326 
2327 /* securedisplay start */
2328 static int psp_securedisplay_initialize(struct psp_context *psp)
2329 {
2330 	int ret;
2331 	struct ta_securedisplay_cmd *securedisplay_cmd;
2332 
2333 	/*
2334 	 * TODO: bypass the initialize in sriov for now
2335 	 */
2336 	if (amdgpu_sriov_vf(psp->adev))
2337 		return 0;
2338 
2339 	/* bypass securedisplay initialization if dmu is harvested */
2340 	if (!amdgpu_device_has_display_hardware(psp->adev))
2341 		return 0;
2342 
2343 	if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2344 	    !psp->securedisplay_context.context.bin_desc.start_addr) {
2345 		dev_info(psp->adev->dev,
2346 			 "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
2347 		return 0;
2348 	}
2349 
2350 	psp->securedisplay_context.context.mem_context.shared_mem_size =
2351 		PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2352 	psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2353 
2354 	if (!psp->securedisplay_context.context.initialized) {
2355 		ret = psp_ta_init_shared_buf(psp,
2356 					     &psp->securedisplay_context.context.mem_context);
2357 		if (ret)
2358 			return ret;
2359 	}
2360 
2361 	ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2362 	if (!ret && !psp->securedisplay_context.context.resp_status) {
2363 		psp->securedisplay_context.context.initialized = true;
2364 		mutex_init(&psp->securedisplay_context.mutex);
2365 	} else {
2366 		/* don't try again */
2367 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2368 		return ret;
2369 	}
2370 
2371 	mutex_lock(&psp->securedisplay_context.mutex);
2372 
2373 	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2374 			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2375 
2376 	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2377 
2378 	mutex_unlock(&psp->securedisplay_context.mutex);
2379 
2380 	if (ret) {
2381 		psp_securedisplay_terminate(psp);
2382 		/* free securedisplay shared memory */
2383 		psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2384 		dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2385 		return -EINVAL;
2386 	}
2387 
2388 	if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2389 		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2390 		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2391 			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2392 		/* don't try again */
2393 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2394 	}
2395 
2396 	return 0;
2397 }
2398 
2399 static int psp_securedisplay_terminate(struct psp_context *psp)
2400 {
2401 	int ret;
2402 
2403 	/*
2404 	 * TODO:bypass the terminate in sriov for now
2405 	 */
2406 	if (amdgpu_sriov_vf(psp->adev))
2407 		return 0;
2408 
2409 	if (!psp->securedisplay_context.context.initialized)
2410 		return 0;
2411 
2412 	ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2413 
2414 	psp->securedisplay_context.context.initialized = false;
2415 
2416 	return ret;
2417 }
2418 
2419 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2420 {
2421 	int ret;
2422 
2423 	if (!psp->securedisplay_context.context.initialized)
2424 		return -EINVAL;
2425 
2426 	if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2427 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
2428 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
2429 		return -EINVAL;
2430 
2431 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2432 
2433 	return ret;
2434 }
2435 /* SECUREDISPLAY end */
2436 
2437 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2438 {
2439 	struct psp_context *psp = &adev->psp;
2440 	int ret = 0;
2441 
2442 	if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2443 		ret = psp->funcs->wait_for_bootloader(psp);
2444 
2445 	return ret;
2446 }
2447 
2448 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2449 {
2450 	if (psp->funcs &&
2451 	    psp->funcs->get_ras_capability) {
2452 		return psp->funcs->get_ras_capability(psp);
2453 	} else {
2454 		return false;
2455 	}
2456 }
2457 
2458 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
2459 {
2460 	struct psp_context *psp = &adev->psp;
2461 
2462 	if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
2463 		return false;
2464 
2465 	if (psp->funcs && psp->funcs->is_reload_needed)
2466 		return psp->funcs->is_reload_needed(psp);
2467 
2468 	return false;
2469 }
2470 
2471 static void psp_update_gpu_addresses(struct amdgpu_device *adev)
2472 {
2473 	struct psp_context *psp = &adev->psp;
2474 
2475 	if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
2476 		psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
2477 		psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
2478 		psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
2479 	}
2480 	if (adev->firmware.rbuf && psp->km_ring.ring_mem)
2481 		psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
2482 }
2483 
2484 static int psp_hw_start(struct psp_context *psp)
2485 {
2486 	struct amdgpu_device *adev = psp->adev;
2487 	int ret;
2488 
2489 	if (amdgpu_virt_xgmi_migrate_enabled(adev))
2490 		psp_update_gpu_addresses(adev);
2491 
2492 	if (!amdgpu_sriov_vf(adev)) {
2493 		if ((is_psp_fw_valid(psp->kdb)) &&
2494 		    (psp->funcs->bootloader_load_kdb != NULL)) {
2495 			ret = psp_bootloader_load_kdb(psp);
2496 			if (ret) {
2497 				dev_err(adev->dev, "PSP load kdb failed!\n");
2498 				return ret;
2499 			}
2500 		}
2501 
2502 		if ((is_psp_fw_valid(psp->spl)) &&
2503 		    (psp->funcs->bootloader_load_spl != NULL)) {
2504 			ret = psp_bootloader_load_spl(psp);
2505 			if (ret) {
2506 				dev_err(adev->dev, "PSP load spl failed!\n");
2507 				return ret;
2508 			}
2509 		}
2510 
2511 		if ((is_psp_fw_valid(psp->sys)) &&
2512 		    (psp->funcs->bootloader_load_sysdrv != NULL)) {
2513 			ret = psp_bootloader_load_sysdrv(psp);
2514 			if (ret) {
2515 				dev_err(adev->dev, "PSP load sys drv failed!\n");
2516 				return ret;
2517 			}
2518 		}
2519 
2520 		if ((is_psp_fw_valid(psp->soc_drv)) &&
2521 		    (psp->funcs->bootloader_load_soc_drv != NULL)) {
2522 			ret = psp_bootloader_load_soc_drv(psp);
2523 			if (ret) {
2524 				dev_err(adev->dev, "PSP load soc drv failed!\n");
2525 				return ret;
2526 			}
2527 		}
2528 
2529 		if ((is_psp_fw_valid(psp->intf_drv)) &&
2530 		    (psp->funcs->bootloader_load_intf_drv != NULL)) {
2531 			ret = psp_bootloader_load_intf_drv(psp);
2532 			if (ret) {
2533 				dev_err(adev->dev, "PSP load intf drv failed!\n");
2534 				return ret;
2535 			}
2536 		}
2537 
2538 		if ((is_psp_fw_valid(psp->dbg_drv)) &&
2539 		    (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2540 			ret = psp_bootloader_load_dbg_drv(psp);
2541 			if (ret) {
2542 				dev_err(adev->dev, "PSP load dbg drv failed!\n");
2543 				return ret;
2544 			}
2545 		}
2546 
2547 		if ((is_psp_fw_valid(psp->ras_drv)) &&
2548 		    (psp->funcs->bootloader_load_ras_drv != NULL)) {
2549 			ret = psp_bootloader_load_ras_drv(psp);
2550 			if (ret) {
2551 				dev_err(adev->dev, "PSP load ras_drv failed!\n");
2552 				return ret;
2553 			}
2554 		}
2555 
2556 		if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2557 		    (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2558 			ret = psp_bootloader_load_ipkeymgr_drv(psp);
2559 			if (ret) {
2560 				dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2561 				return ret;
2562 			}
2563 		}
2564 
2565 		if ((is_psp_fw_valid(psp->spdm_drv)) &&
2566 		    (psp->funcs->bootloader_load_spdm_drv != NULL)) {
2567 			ret = psp_bootloader_load_spdm_drv(psp);
2568 			if (ret) {
2569 				dev_err(adev->dev, "PSP load spdm_drv failed!\n");
2570 				return ret;
2571 			}
2572 		}
2573 
2574 		if ((is_psp_fw_valid(psp->sos)) &&
2575 		    (psp->funcs->bootloader_load_sos != NULL)) {
2576 			ret = psp_bootloader_load_sos(psp);
2577 			if (ret) {
2578 				dev_err(adev->dev, "PSP load sos failed!\n");
2579 				return ret;
2580 			}
2581 		}
2582 	}
2583 
2584 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2585 	if (ret) {
2586 		dev_err(adev->dev, "PSP create ring failed!\n");
2587 		return ret;
2588 	}
2589 
2590 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2591 		ret = psp_update_fw_reservation(psp);
2592 		if (ret) {
2593 			dev_err(adev->dev, "update fw reservation failed!\n");
2594 			return ret;
2595 		}
2596 	}
2597 
2598 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2599 		goto skip_pin_bo;
2600 
2601 	if (!psp->boot_time_tmr || psp->autoload_supported) {
2602 		ret = psp_tmr_init(psp);
2603 		if (ret) {
2604 			dev_err(adev->dev, "PSP tmr init failed!\n");
2605 			return ret;
2606 		}
2607 	}
2608 
2609 skip_pin_bo:
2610 	/*
2611 	 * For ASICs with DF Cstate management centralized
2612 	 * to PMFW, TMR setup should be performed after PMFW
2613 	 * loaded and before other non-psp firmware loaded.
2614 	 */
2615 	if (psp->pmfw_centralized_cstate_management) {
2616 		ret = psp_load_smu_fw(psp);
2617 		if (ret)
2618 			return ret;
2619 	}
2620 
2621 	ret = psp_tmr_load(psp);
2622 	if (ret) {
2623 		dev_err(adev->dev, "PSP load tmr failed!\n");
2624 		return ret;
2625 	}
2626 
2627 	return 0;
2628 }
2629 
2630 int amdgpu_psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2631 			   enum psp_gfx_fw_type *type)
2632 {
2633 	switch (ucode->ucode_id) {
2634 	case AMDGPU_UCODE_ID_CAP:
2635 		*type = GFX_FW_TYPE_CAP;
2636 		break;
2637 	case AMDGPU_UCODE_ID_SDMA0:
2638 		*type = GFX_FW_TYPE_SDMA0;
2639 		break;
2640 	case AMDGPU_UCODE_ID_SDMA1:
2641 		*type = GFX_FW_TYPE_SDMA1;
2642 		break;
2643 	case AMDGPU_UCODE_ID_SDMA2:
2644 		*type = GFX_FW_TYPE_SDMA2;
2645 		break;
2646 	case AMDGPU_UCODE_ID_SDMA3:
2647 		*type = GFX_FW_TYPE_SDMA3;
2648 		break;
2649 	case AMDGPU_UCODE_ID_SDMA4:
2650 		*type = GFX_FW_TYPE_SDMA4;
2651 		break;
2652 	case AMDGPU_UCODE_ID_SDMA5:
2653 		*type = GFX_FW_TYPE_SDMA5;
2654 		break;
2655 	case AMDGPU_UCODE_ID_SDMA6:
2656 		*type = GFX_FW_TYPE_SDMA6;
2657 		break;
2658 	case AMDGPU_UCODE_ID_SDMA7:
2659 		*type = GFX_FW_TYPE_SDMA7;
2660 		break;
2661 	case AMDGPU_UCODE_ID_CP_MES:
2662 		*type = GFX_FW_TYPE_CP_MES;
2663 		break;
2664 	case AMDGPU_UCODE_ID_CP_MES_DATA:
2665 		*type = GFX_FW_TYPE_MES_STACK;
2666 		break;
2667 	case AMDGPU_UCODE_ID_CP_MES1:
2668 		*type = GFX_FW_TYPE_CP_MES_KIQ;
2669 		break;
2670 	case AMDGPU_UCODE_ID_CP_MES1_DATA:
2671 		*type = GFX_FW_TYPE_MES_KIQ_STACK;
2672 		break;
2673 	case AMDGPU_UCODE_ID_CP_CE:
2674 		*type = GFX_FW_TYPE_CP_CE;
2675 		break;
2676 	case AMDGPU_UCODE_ID_CP_PFP:
2677 		*type = GFX_FW_TYPE_CP_PFP;
2678 		break;
2679 	case AMDGPU_UCODE_ID_CP_ME:
2680 		*type = GFX_FW_TYPE_CP_ME;
2681 		break;
2682 	case AMDGPU_UCODE_ID_CP_MEC1:
2683 		*type = GFX_FW_TYPE_CP_MEC;
2684 		break;
2685 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
2686 		*type = GFX_FW_TYPE_CP_MEC_ME1;
2687 		break;
2688 	case AMDGPU_UCODE_ID_CP_MEC2:
2689 		*type = GFX_FW_TYPE_CP_MEC;
2690 		break;
2691 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
2692 		*type = GFX_FW_TYPE_CP_MEC_ME2;
2693 		break;
2694 	case AMDGPU_UCODE_ID_RLC_P:
2695 		*type = GFX_FW_TYPE_RLC_P;
2696 		break;
2697 	case AMDGPU_UCODE_ID_RLC_V:
2698 		*type = GFX_FW_TYPE_RLC_V;
2699 		break;
2700 	case AMDGPU_UCODE_ID_RLC_G:
2701 		*type = GFX_FW_TYPE_RLC_G;
2702 		break;
2703 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2704 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2705 		break;
2706 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2707 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2708 		break;
2709 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2710 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2711 		break;
2712 	case AMDGPU_UCODE_ID_RLC_IRAM:
2713 		*type = GFX_FW_TYPE_RLC_IRAM;
2714 		break;
2715 	case AMDGPU_UCODE_ID_RLC_DRAM:
2716 		*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2717 		break;
2718 	case AMDGPU_UCODE_ID_RLC_IRAM_1:
2719 		*type = GFX_FW_TYPE_RLX6_UCODE_CORE1;
2720 		break;
2721 	case AMDGPU_UCODE_ID_RLC_DRAM_1:
2722 		*type = GFX_FW_TYPE_RLX6_DRAM_BOOT_CORE1;
2723 		break;
2724 	case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2725 		*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2726 		break;
2727 	case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2728 		*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2729 		break;
2730 	case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2731 		*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2732 		break;
2733 	case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2734 		*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2735 		break;
2736 	case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2737 		*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2738 		break;
2739 	case AMDGPU_UCODE_ID_SMC:
2740 		*type = GFX_FW_TYPE_SMU;
2741 		break;
2742 	case AMDGPU_UCODE_ID_PPTABLE:
2743 		*type = GFX_FW_TYPE_PPTABLE;
2744 		break;
2745 	case AMDGPU_UCODE_ID_UVD:
2746 		*type = GFX_FW_TYPE_UVD;
2747 		break;
2748 	case AMDGPU_UCODE_ID_UVD1:
2749 		*type = GFX_FW_TYPE_UVD1;
2750 		break;
2751 	case AMDGPU_UCODE_ID_VCE:
2752 		*type = GFX_FW_TYPE_VCE;
2753 		break;
2754 	case AMDGPU_UCODE_ID_VCN:
2755 		*type = GFX_FW_TYPE_VCN;
2756 		break;
2757 	case AMDGPU_UCODE_ID_VCN1:
2758 		*type = GFX_FW_TYPE_VCN1;
2759 		break;
2760 	case AMDGPU_UCODE_ID_DMCU_ERAM:
2761 		*type = GFX_FW_TYPE_DMCU_ERAM;
2762 		break;
2763 	case AMDGPU_UCODE_ID_DMCU_INTV:
2764 		*type = GFX_FW_TYPE_DMCU_ISR;
2765 		break;
2766 	case AMDGPU_UCODE_ID_VCN0_RAM:
2767 		*type = GFX_FW_TYPE_VCN0_RAM;
2768 		break;
2769 	case AMDGPU_UCODE_ID_VCN1_RAM:
2770 		*type = GFX_FW_TYPE_VCN1_RAM;
2771 		break;
2772 	case AMDGPU_UCODE_ID_DMCUB:
2773 		*type = GFX_FW_TYPE_DMUB;
2774 		break;
2775 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2776 	case AMDGPU_UCODE_ID_SDMA_RS64:
2777 		*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2778 		break;
2779 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2780 		*type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2781 		break;
2782 	case AMDGPU_UCODE_ID_IMU_I:
2783 		*type = GFX_FW_TYPE_IMU_I;
2784 		break;
2785 	case AMDGPU_UCODE_ID_IMU_D:
2786 		*type = GFX_FW_TYPE_IMU_D;
2787 		break;
2788 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
2789 		*type = GFX_FW_TYPE_RS64_PFP;
2790 		break;
2791 	case AMDGPU_UCODE_ID_CP_RS64_ME:
2792 		*type = GFX_FW_TYPE_RS64_ME;
2793 		break;
2794 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
2795 		*type = GFX_FW_TYPE_RS64_MEC;
2796 		break;
2797 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2798 		*type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2799 		break;
2800 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2801 		*type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2802 		break;
2803 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2804 		*type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2805 		break;
2806 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2807 		*type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2808 		break;
2809 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2810 		*type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2811 		break;
2812 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2813 		*type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2814 		break;
2815 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2816 		*type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2817 		break;
2818 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2819 		*type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2820 		break;
2821 	case AMDGPU_UCODE_ID_VPE_CTX:
2822 		*type = GFX_FW_TYPE_VPEC_FW1;
2823 		break;
2824 	case AMDGPU_UCODE_ID_VPE_CTL:
2825 		*type = GFX_FW_TYPE_VPEC_FW2;
2826 		break;
2827 	case AMDGPU_UCODE_ID_VPE:
2828 		*type = GFX_FW_TYPE_VPE;
2829 		break;
2830 	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2831 		*type = GFX_FW_TYPE_UMSCH_UCODE;
2832 		break;
2833 	case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2834 		*type = GFX_FW_TYPE_UMSCH_DATA;
2835 		break;
2836 	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2837 		*type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2838 		break;
2839 	case AMDGPU_UCODE_ID_P2S_TABLE:
2840 		*type = GFX_FW_TYPE_P2S_TABLE;
2841 		break;
2842 	case AMDGPU_UCODE_ID_JPEG_RAM:
2843 		*type = GFX_FW_TYPE_JPEG_RAM;
2844 		break;
2845 	case AMDGPU_UCODE_ID_ISP:
2846 		*type = GFX_FW_TYPE_ISP;
2847 		break;
2848 	case AMDGPU_UCODE_ID_MAXIMUM:
2849 	default:
2850 		return -EINVAL;
2851 	}
2852 
2853 	return 0;
2854 }
2855 
2856 static void psp_print_fw_hdr(struct psp_context *psp,
2857 			     struct amdgpu_firmware_info *ucode)
2858 {
2859 	struct amdgpu_device *adev = psp->adev;
2860 	struct common_firmware_header *hdr;
2861 
2862 	switch (ucode->ucode_id) {
2863 	case AMDGPU_UCODE_ID_SDMA0:
2864 	case AMDGPU_UCODE_ID_SDMA1:
2865 	case AMDGPU_UCODE_ID_SDMA2:
2866 	case AMDGPU_UCODE_ID_SDMA3:
2867 	case AMDGPU_UCODE_ID_SDMA4:
2868 	case AMDGPU_UCODE_ID_SDMA5:
2869 	case AMDGPU_UCODE_ID_SDMA6:
2870 	case AMDGPU_UCODE_ID_SDMA7:
2871 		hdr = (struct common_firmware_header *)
2872 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2873 		amdgpu_ucode_print_sdma_hdr(hdr);
2874 		break;
2875 	case AMDGPU_UCODE_ID_CP_CE:
2876 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2877 		amdgpu_ucode_print_gfx_hdr(hdr);
2878 		break;
2879 	case AMDGPU_UCODE_ID_CP_PFP:
2880 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2881 		amdgpu_ucode_print_gfx_hdr(hdr);
2882 		break;
2883 	case AMDGPU_UCODE_ID_CP_ME:
2884 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2885 		amdgpu_ucode_print_gfx_hdr(hdr);
2886 		break;
2887 	case AMDGPU_UCODE_ID_CP_MEC1:
2888 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2889 		amdgpu_ucode_print_gfx_hdr(hdr);
2890 		break;
2891 	case AMDGPU_UCODE_ID_RLC_G:
2892 	case AMDGPU_UCODE_ID_RLC_DRAM_1:
2893 	case AMDGPU_UCODE_ID_RLC_IRAM_1:
2894 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2895 		amdgpu_ucode_print_rlc_hdr(hdr);
2896 		break;
2897 	case AMDGPU_UCODE_ID_SMC:
2898 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
2899 		amdgpu_ucode_print_smc_hdr(hdr);
2900 		break;
2901 	default:
2902 		break;
2903 	}
2904 }
2905 
2906 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2907 				       struct amdgpu_firmware_info *ucode,
2908 				       struct psp_gfx_cmd_resp *cmd)
2909 {
2910 	int ret;
2911 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
2912 
2913 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2914 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2915 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2916 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2917 
2918 	ret = psp_get_fw_type(psp, ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2919 	if (ret)
2920 		dev_err(psp->adev->dev, "Unknown firmware type %d\n", ucode->ucode_id);
2921 	return ret;
2922 }
2923 
2924 int psp_execute_ip_fw_load(struct psp_context *psp,
2925 			   struct amdgpu_firmware_info *ucode)
2926 {
2927 	int ret = 0;
2928 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2929 
2930 	ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2931 	if (!ret) {
2932 		ret = psp_cmd_submit_buf(psp, ucode, cmd,
2933 					 psp->fence_buf_mc_addr);
2934 	}
2935 
2936 	release_psp_cmd_buf(psp);
2937 
2938 	return ret;
2939 }
2940 
2941 static int psp_load_p2s_table(struct psp_context *psp)
2942 {
2943 	int ret;
2944 	struct amdgpu_device *adev = psp->adev;
2945 	struct amdgpu_firmware_info *ucode =
2946 		&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2947 
2948 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2949 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2950 		return 0;
2951 
2952 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2953 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2954 		uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2955 								0x0036003C;
2956 		if (psp->sos.fw_version < supp_vers)
2957 			return 0;
2958 	}
2959 
2960 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2961 		return 0;
2962 
2963 	ret = psp_execute_ip_fw_load(psp, ucode);
2964 
2965 	return ret;
2966 }
2967 
2968 static int psp_load_smu_fw(struct psp_context *psp)
2969 {
2970 	int ret;
2971 	struct amdgpu_device *adev = psp->adev;
2972 	struct amdgpu_firmware_info *ucode =
2973 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2974 	struct amdgpu_ras *ras = psp->ras_context.ras;
2975 
2976 	/*
2977 	 * Skip SMU FW reloading in case of using BACO for runpm only,
2978 	 * as SMU is always alive.
2979 	 */
2980 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2981 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2982 		return 0;
2983 
2984 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2985 		return 0;
2986 
2987 	if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2988 	     (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2989 	      amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2990 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2991 		if (ret)
2992 			dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2993 	}
2994 
2995 	ret = psp_execute_ip_fw_load(psp, ucode);
2996 
2997 	if (ret)
2998 		dev_err(adev->dev, "PSP load smu failed!\n");
2999 
3000 	return ret;
3001 }
3002 
3003 static bool fw_load_skip_check(struct psp_context *psp,
3004 			       struct amdgpu_firmware_info *ucode)
3005 {
3006 	if (!ucode->fw || !ucode->ucode_size)
3007 		return true;
3008 
3009 	if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
3010 		return true;
3011 
3012 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
3013 	    (psp_smu_reload_quirk(psp) ||
3014 	     psp->autoload_supported ||
3015 	     psp->pmfw_centralized_cstate_management))
3016 		return true;
3017 
3018 	if (amdgpu_sriov_vf(psp->adev) &&
3019 	    amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
3020 		return true;
3021 
3022 	if (psp->autoload_supported &&
3023 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
3024 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
3025 		/* skip mec JT when autoload is enabled */
3026 		return true;
3027 
3028 	return false;
3029 }
3030 
3031 int psp_load_fw_list(struct psp_context *psp,
3032 		     struct amdgpu_firmware_info **ucode_list, int ucode_count)
3033 {
3034 	int ret = 0, i;
3035 	struct amdgpu_firmware_info *ucode;
3036 
3037 	for (i = 0; i < ucode_count; ++i) {
3038 		ucode = ucode_list[i];
3039 		psp_print_fw_hdr(psp, ucode);
3040 		ret = psp_execute_ip_fw_load(psp, ucode);
3041 		if (ret)
3042 			return ret;
3043 	}
3044 	return ret;
3045 }
3046 
3047 static int psp_load_non_psp_fw(struct psp_context *psp)
3048 {
3049 	int i, ret;
3050 	struct amdgpu_firmware_info *ucode;
3051 	struct amdgpu_device *adev = psp->adev;
3052 
3053 	if (psp->autoload_supported &&
3054 	    !psp->pmfw_centralized_cstate_management) {
3055 		ret = psp_load_smu_fw(psp);
3056 		if (ret)
3057 			return ret;
3058 	}
3059 
3060 	/* Load P2S table first if it's available */
3061 	psp_load_p2s_table(psp);
3062 
3063 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
3064 		ucode = &adev->firmware.ucode[i];
3065 
3066 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
3067 		    !fw_load_skip_check(psp, ucode)) {
3068 			ret = psp_load_smu_fw(psp);
3069 			if (ret)
3070 				return ret;
3071 			continue;
3072 		}
3073 
3074 		if (fw_load_skip_check(psp, ucode))
3075 			continue;
3076 
3077 		if (psp->autoload_supported &&
3078 		    (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3079 			     IP_VERSION(11, 0, 7) ||
3080 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3081 			     IP_VERSION(11, 0, 11) ||
3082 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3083 			     IP_VERSION(11, 0, 12) ||
3084 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3085 			     IP_VERSION(15, 0, 0) ||
3086 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3087 			     IP_VERSION(15, 0, 8)) &&
3088 		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
3089 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
3090 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
3091 			/* PSP only receive one SDMA fw for sienna_cichlid,
3092 			 * as all four sdma fw are same
3093 			 */
3094 			continue;
3095 
3096 		psp_print_fw_hdr(psp, ucode);
3097 
3098 		ret = psp_execute_ip_fw_load(psp, ucode);
3099 		if (ret)
3100 			return ret;
3101 
3102 		/* Start rlc autoload after psp received all the gfx firmware */
3103 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
3104 		    adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
3105 			ret = psp_rlc_autoload_start(psp);
3106 			if (ret) {
3107 				dev_err(adev->dev, "Failed to start rlc autoload\n");
3108 				return ret;
3109 			}
3110 		}
3111 	}
3112 
3113 	return 0;
3114 }
3115 
3116 static int psp_load_fw(struct amdgpu_device *adev)
3117 {
3118 	int ret;
3119 	struct psp_context *psp = &adev->psp;
3120 
3121 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3122 		/* should not destroy ring, only stop */
3123 		psp_ring_stop(psp, PSP_RING_TYPE__KM);
3124 	} else {
3125 		memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
3126 
3127 		ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
3128 		if (ret) {
3129 			dev_err(adev->dev, "PSP ring init failed!\n");
3130 			goto failed;
3131 		}
3132 	}
3133 
3134 	ret = psp_hw_start(psp);
3135 	if (ret)
3136 		goto failed;
3137 
3138 	ret = psp_load_non_psp_fw(psp);
3139 	if (ret)
3140 		goto failed1;
3141 
3142 	ret = psp_asd_initialize(psp);
3143 	if (ret) {
3144 		dev_err(adev->dev, "PSP load asd failed!\n");
3145 		goto failed1;
3146 	}
3147 
3148 	ret = psp_rl_load(adev);
3149 	if (ret) {
3150 		dev_err(adev->dev, "PSP load RL failed!\n");
3151 		goto failed1;
3152 	}
3153 
3154 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3155 		if (adev->gmc.xgmi.num_physical_nodes > 1) {
3156 			ret = psp_xgmi_initialize(psp, false, true);
3157 			/* Warning the XGMI seesion initialize failure
3158 			 * Instead of stop driver initialization
3159 			 */
3160 			if (ret)
3161 				dev_err(psp->adev->dev,
3162 					"XGMI: Failed to initialize XGMI session\n");
3163 		}
3164 	}
3165 
3166 	if (psp->ta_fw) {
3167 		ret = psp_ras_initialize(psp);
3168 		if (ret)
3169 			dev_err(psp->adev->dev,
3170 				"RAS: Failed to initialize RAS\n");
3171 
3172 		ret = psp_hdcp_initialize(psp);
3173 		if (ret)
3174 			dev_err(psp->adev->dev,
3175 				"HDCP: Failed to initialize HDCP\n");
3176 
3177 		ret = psp_dtm_initialize(psp);
3178 		if (ret)
3179 			dev_err(psp->adev->dev,
3180 				"DTM: Failed to initialize DTM\n");
3181 
3182 		ret = psp_rap_initialize(psp);
3183 		if (ret)
3184 			dev_err(psp->adev->dev,
3185 				"RAP: Failed to initialize RAP\n");
3186 
3187 		ret = psp_securedisplay_initialize(psp);
3188 		if (ret)
3189 			dev_err(psp->adev->dev,
3190 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3191 	}
3192 
3193 	return 0;
3194 
3195 failed1:
3196 	psp_free_shared_bufs(psp);
3197 failed:
3198 	/*
3199 	 * all cleanup jobs (xgmi terminate, ras terminate,
3200 	 * ring destroy, cmd/fence/fw buffers destory,
3201 	 * psp->cmd destory) are delayed to psp_hw_fini
3202 	 */
3203 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3204 	return ret;
3205 }
3206 
3207 static int psp_hw_init(struct amdgpu_ip_block *ip_block)
3208 {
3209 	int ret;
3210 	struct amdgpu_device *adev = ip_block->adev;
3211 
3212 	mutex_lock(&adev->firmware.mutex);
3213 
3214 	ret = amdgpu_ucode_init_bo(adev);
3215 	if (ret)
3216 		goto failed;
3217 
3218 	ret = psp_load_fw(adev);
3219 	if (ret) {
3220 		dev_err(adev->dev, "PSP firmware loading failed\n");
3221 		goto failed;
3222 	}
3223 
3224 	mutex_unlock(&adev->firmware.mutex);
3225 	return 0;
3226 
3227 failed:
3228 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
3229 	mutex_unlock(&adev->firmware.mutex);
3230 	return -EINVAL;
3231 }
3232 
3233 static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
3234 {
3235 	struct amdgpu_device *adev = ip_block->adev;
3236 	struct psp_context *psp = &adev->psp;
3237 
3238 	if (psp->ta_fw) {
3239 		psp_ras_terminate(psp);
3240 		psp_securedisplay_terminate(psp);
3241 		psp_rap_terminate(psp);
3242 		psp_dtm_terminate(psp);
3243 		psp_hdcp_terminate(psp);
3244 
3245 		if (adev->gmc.xgmi.num_physical_nodes > 1)
3246 			psp_xgmi_terminate(psp);
3247 	}
3248 
3249 	psp_asd_terminate(psp);
3250 	psp_tmr_terminate(psp);
3251 
3252 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3253 
3254 	return 0;
3255 }
3256 
3257 static int psp_suspend(struct amdgpu_ip_block *ip_block)
3258 {
3259 	int ret = 0;
3260 	struct amdgpu_device *adev = ip_block->adev;
3261 	struct psp_context *psp = &adev->psp;
3262 
3263 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3264 	    psp->xgmi_context.context.initialized) {
3265 		ret = psp_xgmi_terminate(psp);
3266 		if (ret) {
3267 			dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3268 			goto out;
3269 		}
3270 	}
3271 
3272 	if (psp->ta_fw) {
3273 		ret = psp_ras_terminate(psp);
3274 		if (ret) {
3275 			dev_err(adev->dev, "Failed to terminate ras ta\n");
3276 			goto out;
3277 		}
3278 		ret = psp_hdcp_terminate(psp);
3279 		if (ret) {
3280 			dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3281 			goto out;
3282 		}
3283 		ret = psp_dtm_terminate(psp);
3284 		if (ret) {
3285 			dev_err(adev->dev, "Failed to terminate dtm ta\n");
3286 			goto out;
3287 		}
3288 		ret = psp_rap_terminate(psp);
3289 		if (ret) {
3290 			dev_err(adev->dev, "Failed to terminate rap ta\n");
3291 			goto out;
3292 		}
3293 		ret = psp_securedisplay_terminate(psp);
3294 		if (ret) {
3295 			dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3296 			goto out;
3297 		}
3298 	}
3299 
3300 	ret = psp_asd_terminate(psp);
3301 	if (ret) {
3302 		dev_err(adev->dev, "Failed to terminate asd\n");
3303 		goto out;
3304 	}
3305 
3306 	ret = psp_tmr_terminate(psp);
3307 	if (ret) {
3308 		dev_err(adev->dev, "Failed to terminate tmr\n");
3309 		goto out;
3310 	}
3311 
3312 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3313 	if (ret)
3314 		dev_err(adev->dev, "PSP ring stop failed\n");
3315 
3316 out:
3317 	return ret;
3318 }
3319 
3320 static int psp_resume(struct amdgpu_ip_block *ip_block)
3321 {
3322 	int ret;
3323 	struct amdgpu_device *adev = ip_block->adev;
3324 	struct psp_context *psp = &adev->psp;
3325 
3326 	dev_info(adev->dev, "PSP is resuming...\n");
3327 
3328 	if (psp->mem_train_ctx.enable_mem_training) {
3329 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3330 		if (ret) {
3331 			dev_err(adev->dev, "Failed to process memory training!\n");
3332 			return ret;
3333 		}
3334 	}
3335 
3336 	mutex_lock(&adev->firmware.mutex);
3337 
3338 	ret = amdgpu_ucode_init_bo(adev);
3339 	if (ret)
3340 		goto failed;
3341 
3342 	ret = psp_hw_start(psp);
3343 	if (ret)
3344 		goto failed;
3345 
3346 	ret = psp_load_non_psp_fw(psp);
3347 	if (ret)
3348 		goto failed;
3349 
3350 	ret = psp_asd_initialize(psp);
3351 	if (ret) {
3352 		dev_err(adev->dev, "PSP load asd failed!\n");
3353 		goto failed;
3354 	}
3355 
3356 	ret = psp_rl_load(adev);
3357 	if (ret) {
3358 		dev_err(adev->dev, "PSP load RL failed!\n");
3359 		goto failed;
3360 	}
3361 
3362 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3363 		ret = psp_xgmi_initialize(psp, false, true);
3364 		/* Warning the XGMI seesion initialize failure
3365 		 * Instead of stop driver initialization
3366 		 */
3367 		if (ret)
3368 			dev_err(psp->adev->dev,
3369 				"XGMI: Failed to initialize XGMI session\n");
3370 	}
3371 
3372 	if (psp->ta_fw) {
3373 		ret = psp_ras_initialize(psp);
3374 		if (ret)
3375 			dev_err(psp->adev->dev,
3376 				"RAS: Failed to initialize RAS\n");
3377 
3378 		ret = psp_hdcp_initialize(psp);
3379 		if (ret)
3380 			dev_err(psp->adev->dev,
3381 				"HDCP: Failed to initialize HDCP\n");
3382 
3383 		ret = psp_dtm_initialize(psp);
3384 		if (ret)
3385 			dev_err(psp->adev->dev,
3386 				"DTM: Failed to initialize DTM\n");
3387 
3388 		ret = psp_rap_initialize(psp);
3389 		if (ret)
3390 			dev_err(psp->adev->dev,
3391 				"RAP: Failed to initialize RAP\n");
3392 
3393 		ret = psp_securedisplay_initialize(psp);
3394 		if (ret)
3395 			dev_err(psp->adev->dev,
3396 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3397 	}
3398 
3399 	mutex_unlock(&adev->firmware.mutex);
3400 
3401 	return 0;
3402 
3403 failed:
3404 	dev_err(adev->dev, "PSP resume failed\n");
3405 	mutex_unlock(&adev->firmware.mutex);
3406 	return ret;
3407 }
3408 
3409 int psp_gpu_reset(struct amdgpu_device *adev)
3410 {
3411 	int ret;
3412 
3413 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3414 		return 0;
3415 
3416 	mutex_lock(&adev->psp.mutex);
3417 	ret = psp_mode1_reset(&adev->psp);
3418 	mutex_unlock(&adev->psp.mutex);
3419 
3420 	return ret;
3421 }
3422 
3423 int psp_rlc_autoload_start(struct psp_context *psp)
3424 {
3425 	int ret;
3426 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3427 
3428 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3429 
3430 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
3431 				 psp->fence_buf_mc_addr);
3432 
3433 	release_psp_cmd_buf(psp);
3434 
3435 	return ret;
3436 }
3437 
3438 int psp_ring_cmd_submit(struct psp_context *psp,
3439 			uint64_t cmd_buf_mc_addr,
3440 			uint64_t fence_mc_addr,
3441 			int index)
3442 {
3443 	unsigned int psp_write_ptr_reg = 0;
3444 	struct psp_gfx_rb_frame *write_frame;
3445 	struct psp_ring *ring = &psp->km_ring;
3446 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3447 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3448 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3449 	struct amdgpu_device *adev = psp->adev;
3450 	uint32_t ring_size_dw = ring->ring_size / 4;
3451 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3452 
3453 	/* KM (GPCOM) prepare write pointer */
3454 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
3455 
3456 	/* Update KM RB frame pointer to new frame */
3457 	/* write_frame ptr increments by size of rb_frame in bytes */
3458 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3459 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
3460 		write_frame = ring_buffer_start;
3461 	else
3462 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3463 	/* Check invalid write_frame ptr address */
3464 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3465 		dev_err(adev->dev,
3466 			"ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3467 			ring_buffer_start, ring_buffer_end, write_frame);
3468 		dev_err(adev->dev,
3469 			"write_frame is pointing to address out of bounds\n");
3470 		return -EINVAL;
3471 	}
3472 
3473 	/* Initialize KM RB frame */
3474 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3475 
3476 	/* Update KM RB frame */
3477 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3478 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3479 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3480 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3481 	write_frame->fence_value = index;
3482 	amdgpu_device_flush_hdp(adev, NULL);
3483 
3484 	/* Update the write Pointer in DWORDs */
3485 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3486 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
3487 	return 0;
3488 }
3489 
3490 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3491 {
3492 	struct amdgpu_device *adev = psp->adev;
3493 	const struct psp_firmware_header_v1_0 *asd_hdr;
3494 	int err = 0;
3495 
3496 	err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
3497 				   "amdgpu/%s_asd.bin", chip_name);
3498 	if (err)
3499 		goto out;
3500 
3501 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3502 	adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3503 	adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3504 	adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3505 	adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3506 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3507 	return 0;
3508 out:
3509 	amdgpu_ucode_release(&adev->psp.asd_fw);
3510 	return err;
3511 }
3512 
3513 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3514 {
3515 	struct amdgpu_device *adev = psp->adev;
3516 	const struct psp_firmware_header_v1_0 *toc_hdr;
3517 	int err = 0;
3518 
3519 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
3520 				   "amdgpu/%s_toc.bin", chip_name);
3521 	if (err)
3522 		goto out;
3523 
3524 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3525 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3526 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3527 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3528 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3529 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3530 	return 0;
3531 out:
3532 	amdgpu_ucode_release(&adev->psp.toc_fw);
3533 	return err;
3534 }
3535 
3536 static int parse_sos_bin_descriptor(struct psp_context *psp,
3537 				   const struct psp_fw_bin_desc *desc,
3538 				   const struct psp_firmware_header_v2_0 *sos_hdr)
3539 {
3540 	uint8_t *ucode_start_addr  = NULL;
3541 
3542 	if (!psp || !desc || !sos_hdr)
3543 		return -EINVAL;
3544 
3545 	ucode_start_addr  = (uint8_t *)sos_hdr +
3546 			    le32_to_cpu(desc->offset_bytes) +
3547 			    le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3548 
3549 	switch (desc->fw_type) {
3550 	case PSP_FW_TYPE_PSP_SOS:
3551 		psp->sos.fw_version        = le32_to_cpu(desc->fw_version);
3552 		psp->sos.feature_version   = le32_to_cpu(desc->fw_version);
3553 		psp->sos.size_bytes        = le32_to_cpu(desc->size_bytes);
3554 		psp->sos.start_addr	   = ucode_start_addr;
3555 		break;
3556 	case PSP_FW_TYPE_PSP_SYS_DRV:
3557 		psp->sys.fw_version        = le32_to_cpu(desc->fw_version);
3558 		psp->sys.feature_version   = le32_to_cpu(desc->fw_version);
3559 		psp->sys.size_bytes        = le32_to_cpu(desc->size_bytes);
3560 		psp->sys.start_addr        = ucode_start_addr;
3561 		break;
3562 	case PSP_FW_TYPE_PSP_KDB:
3563 		psp->kdb.fw_version        = le32_to_cpu(desc->fw_version);
3564 		psp->kdb.feature_version   = le32_to_cpu(desc->fw_version);
3565 		psp->kdb.size_bytes        = le32_to_cpu(desc->size_bytes);
3566 		psp->kdb.start_addr        = ucode_start_addr;
3567 		break;
3568 	case PSP_FW_TYPE_PSP_TOC:
3569 		psp->toc.fw_version        = le32_to_cpu(desc->fw_version);
3570 		psp->toc.feature_version   = le32_to_cpu(desc->fw_version);
3571 		psp->toc.size_bytes        = le32_to_cpu(desc->size_bytes);
3572 		psp->toc.start_addr        = ucode_start_addr;
3573 		break;
3574 	case PSP_FW_TYPE_PSP_SPL:
3575 		psp->spl.fw_version        = le32_to_cpu(desc->fw_version);
3576 		psp->spl.feature_version   = le32_to_cpu(desc->fw_version);
3577 		psp->spl.size_bytes        = le32_to_cpu(desc->size_bytes);
3578 		psp->spl.start_addr        = ucode_start_addr;
3579 		break;
3580 	case PSP_FW_TYPE_PSP_RL:
3581 		psp->rl.fw_version         = le32_to_cpu(desc->fw_version);
3582 		psp->rl.feature_version    = le32_to_cpu(desc->fw_version);
3583 		psp->rl.size_bytes         = le32_to_cpu(desc->size_bytes);
3584 		psp->rl.start_addr         = ucode_start_addr;
3585 		break;
3586 	case PSP_FW_TYPE_PSP_SOC_DRV:
3587 		psp->soc_drv.fw_version         = le32_to_cpu(desc->fw_version);
3588 		psp->soc_drv.feature_version    = le32_to_cpu(desc->fw_version);
3589 		psp->soc_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3590 		psp->soc_drv.start_addr         = ucode_start_addr;
3591 		break;
3592 	case PSP_FW_TYPE_PSP_INTF_DRV:
3593 		psp->intf_drv.fw_version        = le32_to_cpu(desc->fw_version);
3594 		psp->intf_drv.feature_version   = le32_to_cpu(desc->fw_version);
3595 		psp->intf_drv.size_bytes        = le32_to_cpu(desc->size_bytes);
3596 		psp->intf_drv.start_addr        = ucode_start_addr;
3597 		break;
3598 	case PSP_FW_TYPE_PSP_DBG_DRV:
3599 		psp->dbg_drv.fw_version         = le32_to_cpu(desc->fw_version);
3600 		psp->dbg_drv.feature_version    = le32_to_cpu(desc->fw_version);
3601 		psp->dbg_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3602 		psp->dbg_drv.start_addr         = ucode_start_addr;
3603 		break;
3604 	case PSP_FW_TYPE_PSP_RAS_DRV:
3605 		psp->ras_drv.fw_version         = le32_to_cpu(desc->fw_version);
3606 		psp->ras_drv.feature_version    = le32_to_cpu(desc->fw_version);
3607 		psp->ras_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3608 		psp->ras_drv.start_addr         = ucode_start_addr;
3609 		break;
3610 	case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3611 		psp->ipkeymgr_drv.fw_version         = le32_to_cpu(desc->fw_version);
3612 		psp->ipkeymgr_drv.feature_version    = le32_to_cpu(desc->fw_version);
3613 		psp->ipkeymgr_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3614 		psp->ipkeymgr_drv.start_addr         = ucode_start_addr;
3615 		break;
3616 	case PSP_FW_TYPE_PSP_SPDM_DRV:
3617 		psp->spdm_drv.fw_version	= le32_to_cpu(desc->fw_version);
3618 		psp->spdm_drv.feature_version	= le32_to_cpu(desc->fw_version);
3619 		psp->spdm_drv.size_bytes	= le32_to_cpu(desc->size_bytes);
3620 		psp->spdm_drv.start_addr	= ucode_start_addr;
3621 		break;
3622 	default:
3623 		dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3624 		break;
3625 	}
3626 
3627 	return 0;
3628 }
3629 
3630 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3631 {
3632 	const struct psp_firmware_header_v1_0 *sos_hdr;
3633 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3634 	uint8_t *ucode_array_start_addr;
3635 
3636 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3637 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3638 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3639 
3640 	if (adev->gmc.xgmi.connected_to_cpu ||
3641 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3642 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3643 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3644 
3645 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3646 		adev->psp.sys.start_addr = ucode_array_start_addr;
3647 
3648 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3649 		adev->psp.sos.start_addr = ucode_array_start_addr +
3650 				le32_to_cpu(sos_hdr->sos.offset_bytes);
3651 	} else {
3652 		/* Load alternate PSP SOS FW */
3653 		sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3654 
3655 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3656 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3657 
3658 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3659 		adev->psp.sys.start_addr = ucode_array_start_addr +
3660 			le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3661 
3662 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3663 		adev->psp.sos.start_addr = ucode_array_start_addr +
3664 			le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3665 	}
3666 
3667 	if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3668 		dev_warn(adev->dev, "PSP SOS FW not available");
3669 		return -EINVAL;
3670 	}
3671 
3672 	return 0;
3673 }
3674 
3675 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3676 {
3677 	struct amdgpu_device *adev = psp->adev;
3678 	const struct psp_firmware_header_v1_0 *sos_hdr;
3679 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3680 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3681 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3682 	const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3683 	const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
3684 	int fw_index, fw_bin_count, start_index = 0;
3685 	const struct psp_fw_bin_desc *fw_bin;
3686 	uint8_t *ucode_array_start_addr;
3687 	int err = 0;
3688 
3689 	if (amdgpu_is_kicker_fw(adev))
3690 		err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3691 					   "amdgpu/%s_sos_kicker.bin", chip_name);
3692 	else
3693 		err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3694 					   "amdgpu/%s_sos.bin", chip_name);
3695 	if (err)
3696 		goto out;
3697 
3698 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3699 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3700 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3701 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3702 
3703 	switch (sos_hdr->header.header_version_major) {
3704 	case 1:
3705 		err = psp_init_sos_base_fw(adev);
3706 		if (err)
3707 			goto out;
3708 
3709 		if (sos_hdr->header.header_version_minor == 1) {
3710 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3711 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3712 			adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3713 					le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3714 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3715 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3716 					le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3717 		}
3718 		if (sos_hdr->header.header_version_minor == 2) {
3719 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3720 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3721 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3722 						    le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3723 		}
3724 		if (sos_hdr->header.header_version_minor == 3) {
3725 			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3726 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3727 			adev->psp.toc.start_addr = ucode_array_start_addr +
3728 				le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3729 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3730 			adev->psp.kdb.start_addr = ucode_array_start_addr +
3731 				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3732 			adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3733 			adev->psp.spl.start_addr = ucode_array_start_addr +
3734 				le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3735 			adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3736 			adev->psp.rl.start_addr = ucode_array_start_addr +
3737 				le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3738 		}
3739 		break;
3740 	case 2:
3741 		sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3742 
3743 		fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
3744 
3745 		if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
3746 			dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3747 			err = -EINVAL;
3748 			goto out;
3749 		}
3750 
3751 		if (sos_hdr_v2_0->header.header_version_minor == 1) {
3752 			sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
3753 
3754 			fw_bin = sos_hdr_v2_1->psp_fw_bin;
3755 
3756 			if (psp_is_aux_sos_load_required(psp))
3757 				start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3758 			else
3759 				fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3760 
3761 		} else {
3762 			fw_bin = sos_hdr_v2_0->psp_fw_bin;
3763 		}
3764 
3765 		for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
3766 			err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
3767 						       sos_hdr_v2_0);
3768 			if (err)
3769 				goto out;
3770 		}
3771 		break;
3772 	default:
3773 		dev_err(adev->dev,
3774 			"unsupported psp sos firmware\n");
3775 		err = -EINVAL;
3776 		goto out;
3777 	}
3778 
3779 	return 0;
3780 out:
3781 	amdgpu_ucode_release(&adev->psp.sos_fw);
3782 
3783 	return err;
3784 }
3785 
3786 static bool is_ta_fw_applicable(struct psp_context *psp,
3787 			     const struct psp_fw_bin_desc *desc)
3788 {
3789 	struct amdgpu_device *adev = psp->adev;
3790 	uint32_t fw_version;
3791 
3792 	switch (desc->fw_type) {
3793 	case TA_FW_TYPE_PSP_XGMI:
3794 	case TA_FW_TYPE_PSP_XGMI_AUX:
3795 		/* for now, AUX TA only exists on 13.0.6 ta bin,
3796 		 * from v20.00.0x.14
3797 		 */
3798 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3799 		    IP_VERSION(13, 0, 6)) {
3800 			fw_version = le32_to_cpu(desc->fw_version);
3801 
3802 			if (adev->flags & AMD_IS_APU &&
3803 			    (fw_version & 0xff) >= 0x14)
3804 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
3805 			else
3806 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
3807 		}
3808 		break;
3809 	default:
3810 		break;
3811 	}
3812 
3813 	return true;
3814 }
3815 
3816 static int parse_ta_bin_descriptor(struct psp_context *psp,
3817 				   const struct psp_fw_bin_desc *desc,
3818 				   const struct ta_firmware_header_v2_0 *ta_hdr)
3819 {
3820 	uint8_t *ucode_start_addr  = NULL;
3821 
3822 	if (!psp || !desc || !ta_hdr)
3823 		return -EINVAL;
3824 
3825 	if (!is_ta_fw_applicable(psp, desc))
3826 		return 0;
3827 
3828 	ucode_start_addr  = (uint8_t *)ta_hdr +
3829 			    le32_to_cpu(desc->offset_bytes) +
3830 			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3831 
3832 	switch (desc->fw_type) {
3833 	case TA_FW_TYPE_PSP_ASD:
3834 		psp->asd_context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3835 		psp->asd_context.bin_desc.feature_version   = le32_to_cpu(desc->fw_version);
3836 		psp->asd_context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3837 		psp->asd_context.bin_desc.start_addr        = ucode_start_addr;
3838 		break;
3839 	case TA_FW_TYPE_PSP_XGMI:
3840 	case TA_FW_TYPE_PSP_XGMI_AUX:
3841 		psp->xgmi_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3842 		psp->xgmi_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3843 		psp->xgmi_context.context.bin_desc.start_addr       = ucode_start_addr;
3844 		break;
3845 	case TA_FW_TYPE_PSP_RAS:
3846 		psp->ras_context.context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3847 		psp->ras_context.context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3848 		psp->ras_context.context.bin_desc.start_addr        = ucode_start_addr;
3849 		break;
3850 	case TA_FW_TYPE_PSP_HDCP:
3851 		psp->hdcp_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3852 		psp->hdcp_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3853 		psp->hdcp_context.context.bin_desc.start_addr       = ucode_start_addr;
3854 		break;
3855 	case TA_FW_TYPE_PSP_DTM:
3856 		psp->dtm_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3857 		psp->dtm_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3858 		psp->dtm_context.context.bin_desc.start_addr       = ucode_start_addr;
3859 		break;
3860 	case TA_FW_TYPE_PSP_RAP:
3861 		psp->rap_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3862 		psp->rap_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3863 		psp->rap_context.context.bin_desc.start_addr       = ucode_start_addr;
3864 		break;
3865 	case TA_FW_TYPE_PSP_SECUREDISPLAY:
3866 		psp->securedisplay_context.context.bin_desc.fw_version =
3867 			le32_to_cpu(desc->fw_version);
3868 		psp->securedisplay_context.context.bin_desc.size_bytes =
3869 			le32_to_cpu(desc->size_bytes);
3870 		psp->securedisplay_context.context.bin_desc.start_addr =
3871 			ucode_start_addr;
3872 		break;
3873 	default:
3874 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3875 		break;
3876 	}
3877 
3878 	return 0;
3879 }
3880 
3881 static int parse_ta_v1_microcode(struct psp_context *psp)
3882 {
3883 	const struct ta_firmware_header_v1_0 *ta_hdr;
3884 	struct amdgpu_device *adev = psp->adev;
3885 
3886 	ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3887 
3888 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3889 		return -EINVAL;
3890 
3891 	adev->psp.xgmi_context.context.bin_desc.fw_version =
3892 		le32_to_cpu(ta_hdr->xgmi.fw_version);
3893 	adev->psp.xgmi_context.context.bin_desc.size_bytes =
3894 		le32_to_cpu(ta_hdr->xgmi.size_bytes);
3895 	adev->psp.xgmi_context.context.bin_desc.start_addr =
3896 		(uint8_t *)ta_hdr +
3897 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3898 
3899 	adev->psp.ras_context.context.bin_desc.fw_version =
3900 		le32_to_cpu(ta_hdr->ras.fw_version);
3901 	adev->psp.ras_context.context.bin_desc.size_bytes =
3902 		le32_to_cpu(ta_hdr->ras.size_bytes);
3903 	adev->psp.ras_context.context.bin_desc.start_addr =
3904 		(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3905 		le32_to_cpu(ta_hdr->ras.offset_bytes);
3906 
3907 	adev->psp.hdcp_context.context.bin_desc.fw_version =
3908 		le32_to_cpu(ta_hdr->hdcp.fw_version);
3909 	adev->psp.hdcp_context.context.bin_desc.size_bytes =
3910 		le32_to_cpu(ta_hdr->hdcp.size_bytes);
3911 	adev->psp.hdcp_context.context.bin_desc.start_addr =
3912 		(uint8_t *)ta_hdr +
3913 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3914 
3915 	adev->psp.dtm_context.context.bin_desc.fw_version =
3916 		le32_to_cpu(ta_hdr->dtm.fw_version);
3917 	adev->psp.dtm_context.context.bin_desc.size_bytes =
3918 		le32_to_cpu(ta_hdr->dtm.size_bytes);
3919 	adev->psp.dtm_context.context.bin_desc.start_addr =
3920 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3921 		le32_to_cpu(ta_hdr->dtm.offset_bytes);
3922 
3923 	adev->psp.securedisplay_context.context.bin_desc.fw_version =
3924 		le32_to_cpu(ta_hdr->securedisplay.fw_version);
3925 	adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3926 		le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3927 	adev->psp.securedisplay_context.context.bin_desc.start_addr =
3928 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3929 		le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3930 
3931 	adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3932 
3933 	return 0;
3934 }
3935 
3936 static int parse_ta_v2_microcode(struct psp_context *psp)
3937 {
3938 	const struct ta_firmware_header_v2_0 *ta_hdr;
3939 	struct amdgpu_device *adev = psp->adev;
3940 	int err = 0;
3941 	int ta_index = 0;
3942 
3943 	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3944 
3945 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3946 		return -EINVAL;
3947 
3948 	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3949 		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3950 		return -EINVAL;
3951 	}
3952 
3953 	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3954 		err = parse_ta_bin_descriptor(psp,
3955 					      &ta_hdr->ta_fw_bin[ta_index],
3956 					      ta_hdr);
3957 		if (err)
3958 			return err;
3959 	}
3960 
3961 	return 0;
3962 }
3963 
3964 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3965 {
3966 	const struct common_firmware_header *hdr;
3967 	struct amdgpu_device *adev = psp->adev;
3968 	int err;
3969 
3970 	if (amdgpu_is_kicker_fw(adev))
3971 		err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3972 					   "amdgpu/%s_ta_kicker.bin", chip_name);
3973 	else
3974 		err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3975 					   "amdgpu/%s_ta.bin", chip_name);
3976 	if (err)
3977 		return err;
3978 
3979 	hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3980 	switch (le16_to_cpu(hdr->header_version_major)) {
3981 	case 1:
3982 		err = parse_ta_v1_microcode(psp);
3983 		break;
3984 	case 2:
3985 		err = parse_ta_v2_microcode(psp);
3986 		break;
3987 	default:
3988 		dev_err(adev->dev, "unsupported TA header version\n");
3989 		err = -EINVAL;
3990 	}
3991 
3992 	if (err)
3993 		amdgpu_ucode_release(&adev->psp.ta_fw);
3994 
3995 	return err;
3996 }
3997 
3998 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3999 {
4000 	struct amdgpu_device *adev = psp->adev;
4001 	const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
4002 	struct amdgpu_firmware_info *info = NULL;
4003 	int err = 0;
4004 
4005 	if (!amdgpu_sriov_vf(adev)) {
4006 		dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
4007 		return -EINVAL;
4008 	}
4009 
4010 	err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
4011 				   "amdgpu/%s_cap.bin", chip_name);
4012 	if (err) {
4013 		if (err == -ENODEV) {
4014 			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
4015 			err = 0;
4016 		} else {
4017 			dev_err(adev->dev, "fail to initialize cap microcode\n");
4018 		}
4019 		goto out;
4020 	}
4021 
4022 	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
4023 	info->ucode_id = AMDGPU_UCODE_ID_CAP;
4024 	info->fw = adev->psp.cap_fw;
4025 	cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
4026 		adev->psp.cap_fw->data;
4027 	adev->firmware.fw_size += ALIGN(
4028 			le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
4029 	adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
4030 	adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
4031 	adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
4032 
4033 	return 0;
4034 
4035 out:
4036 	amdgpu_ucode_release(&adev->psp.cap_fw);
4037 	return err;
4038 }
4039 
4040 int psp_config_sq_perfmon(struct psp_context *psp,
4041 		uint32_t xcp_id, bool core_override_enable,
4042 		bool reg_override_enable, bool perfmon_override_enable)
4043 {
4044 	int ret;
4045 
4046 	if (amdgpu_sriov_vf(psp->adev))
4047 		return 0;
4048 
4049 	if (xcp_id > MAX_XCP) {
4050 		dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
4051 		return -EINVAL;
4052 	}
4053 
4054 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
4055 		dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
4056 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
4057 		return -EINVAL;
4058 	}
4059 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
4060 
4061 	cmd->cmd_id	=	GFX_CMD_ID_CONFIG_SQ_PERFMON;
4062 	cmd->cmd.config_sq_perfmon.gfx_xcp_mask	=	BIT_MASK(xcp_id);
4063 	cmd->cmd.config_sq_perfmon.core_override	=	core_override_enable;
4064 	cmd->cmd.config_sq_perfmon.reg_override	=	reg_override_enable;
4065 	cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
4066 
4067 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
4068 	if (ret)
4069 		dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
4070 			xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
4071 
4072 	release_psp_cmd_buf(psp);
4073 	return ret;
4074 }
4075 
4076 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
4077 					enum amd_clockgating_state state)
4078 {
4079 	return 0;
4080 }
4081 
4082 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
4083 				     enum amd_powergating_state state)
4084 {
4085 	return 0;
4086 }
4087 
4088 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
4089 					 struct device_attribute *attr,
4090 					 char *buf)
4091 {
4092 	struct drm_device *ddev = dev_get_drvdata(dev);
4093 	struct amdgpu_device *adev = drm_to_adev(ddev);
4094 	struct amdgpu_ip_block *ip_block;
4095 	uint32_t fw_ver;
4096 	int ret;
4097 
4098 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
4099 	if (!ip_block || !ip_block->status.late_initialized) {
4100 		dev_info(adev->dev, "PSP block is not ready yet\n.");
4101 		return -EBUSY;
4102 	}
4103 
4104 	mutex_lock(&adev->psp.mutex);
4105 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
4106 	mutex_unlock(&adev->psp.mutex);
4107 
4108 	if (ret) {
4109 		dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
4110 		return ret;
4111 	}
4112 
4113 	return sysfs_emit(buf, "%x\n", fw_ver);
4114 }
4115 
4116 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
4117 						       struct device_attribute *attr,
4118 						       const char *buf,
4119 						       size_t count)
4120 {
4121 	struct drm_device *ddev = dev_get_drvdata(dev);
4122 	struct amdgpu_device *adev = drm_to_adev(ddev);
4123 	int ret, idx;
4124 	const struct firmware *usbc_pd_fw;
4125 	struct amdgpu_bo *fw_buf_bo = NULL;
4126 	uint64_t fw_pri_mc_addr;
4127 	void *fw_pri_cpu_addr;
4128 	struct amdgpu_ip_block *ip_block;
4129 
4130 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
4131 	if (!ip_block || !ip_block->status.late_initialized) {
4132 		dev_err(adev->dev, "PSP block is not ready yet.");
4133 		return -EBUSY;
4134 	}
4135 
4136 	if (!drm_dev_enter(ddev, &idx))
4137 		return -ENODEV;
4138 
4139 	ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
4140 				   "amdgpu/%s", buf);
4141 	if (ret)
4142 		goto fail;
4143 
4144 	/* LFB address which is aligned to 1MB boundary per PSP request */
4145 	ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
4146 				      AMDGPU_GEM_DOMAIN_VRAM |
4147 				      AMDGPU_GEM_DOMAIN_GTT,
4148 				      &fw_buf_bo, &fw_pri_mc_addr,
4149 				      &fw_pri_cpu_addr);
4150 	if (ret)
4151 		goto rel_buf;
4152 
4153 	memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
4154 
4155 	mutex_lock(&adev->psp.mutex);
4156 	ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
4157 	mutex_unlock(&adev->psp.mutex);
4158 
4159 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4160 
4161 rel_buf:
4162 	amdgpu_ucode_release(&usbc_pd_fw);
4163 fail:
4164 	if (ret) {
4165 		dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
4166 		count = ret;
4167 	}
4168 
4169 	drm_dev_exit(idx);
4170 	return count;
4171 }
4172 
4173 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
4174 {
4175 	int idx;
4176 
4177 	if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
4178 		return;
4179 
4180 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
4181 	memcpy(psp->fw_pri_buf, start_addr, bin_size);
4182 
4183 	drm_dev_exit(idx);
4184 }
4185 
4186 /**
4187  * DOC: usbc_pd_fw
4188  * Reading from this file will retrieve the USB-C PD firmware version. Writing to
4189  * this file will trigger the update process.
4190  */
4191 static DEVICE_ATTR(usbc_pd_fw, 0644,
4192 		   psp_usbc_pd_fw_sysfs_read,
4193 		   psp_usbc_pd_fw_sysfs_write);
4194 
4195 int is_psp_fw_valid(struct psp_bin_desc bin)
4196 {
4197 	return bin.size_bytes;
4198 }
4199 
4200 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
4201 					const struct bin_attribute *bin_attr,
4202 					char *buffer, loff_t pos, size_t count)
4203 {
4204 	struct device *dev = kobj_to_dev(kobj);
4205 	struct drm_device *ddev = dev_get_drvdata(dev);
4206 	struct amdgpu_device *adev = drm_to_adev(ddev);
4207 
4208 	adev->psp.vbflash_done = false;
4209 
4210 	/* Safeguard against memory drain */
4211 	if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
4212 		dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
4213 		kvfree(adev->psp.vbflash_tmp_buf);
4214 		adev->psp.vbflash_tmp_buf = NULL;
4215 		adev->psp.vbflash_image_size = 0;
4216 		return -ENOMEM;
4217 	}
4218 
4219 	/* TODO Just allocate max for now and optimize to realloc later if needed */
4220 	if (!adev->psp.vbflash_tmp_buf) {
4221 		adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
4222 		if (!adev->psp.vbflash_tmp_buf)
4223 			return -ENOMEM;
4224 	}
4225 
4226 	mutex_lock(&adev->psp.mutex);
4227 	memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
4228 	adev->psp.vbflash_image_size += count;
4229 	mutex_unlock(&adev->psp.mutex);
4230 
4231 	dev_dbg(adev->dev, "IFWI staged for update\n");
4232 
4233 	return count;
4234 }
4235 
4236 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
4237 				       const struct bin_attribute *bin_attr, char *buffer,
4238 				       loff_t pos, size_t count)
4239 {
4240 	struct device *dev = kobj_to_dev(kobj);
4241 	struct drm_device *ddev = dev_get_drvdata(dev);
4242 	struct amdgpu_device *adev = drm_to_adev(ddev);
4243 	struct amdgpu_bo *fw_buf_bo = NULL;
4244 	uint64_t fw_pri_mc_addr;
4245 	void *fw_pri_cpu_addr;
4246 	int ret;
4247 
4248 	if (adev->psp.vbflash_image_size == 0)
4249 		return -EINVAL;
4250 
4251 	dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
4252 
4253 	ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
4254 					AMDGPU_GPU_PAGE_SIZE,
4255 					AMDGPU_GEM_DOMAIN_VRAM,
4256 					&fw_buf_bo,
4257 					&fw_pri_mc_addr,
4258 					&fw_pri_cpu_addr);
4259 	if (ret)
4260 		goto rel_buf;
4261 
4262 	memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
4263 
4264 	mutex_lock(&adev->psp.mutex);
4265 	ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
4266 	mutex_unlock(&adev->psp.mutex);
4267 
4268 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4269 
4270 rel_buf:
4271 	kvfree(adev->psp.vbflash_tmp_buf);
4272 	adev->psp.vbflash_tmp_buf = NULL;
4273 	adev->psp.vbflash_image_size = 0;
4274 
4275 	if (ret) {
4276 		dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
4277 		return ret;
4278 	}
4279 
4280 	dev_dbg(adev->dev, "PSP IFWI flash process done\n");
4281 	return 0;
4282 }
4283 
4284 /**
4285  * DOC: psp_vbflash
4286  * Writing to this file will stage an IFWI for update. Reading from this file
4287  * will trigger the update process.
4288  */
4289 static const struct bin_attribute psp_vbflash_bin_attr = {
4290 	.attr = {.name = "psp_vbflash", .mode = 0660},
4291 	.size = 0,
4292 	.write = amdgpu_psp_vbflash_write,
4293 	.read = amdgpu_psp_vbflash_read,
4294 };
4295 
4296 /**
4297  * DOC: psp_vbflash_status
4298  * The status of the flash process.
4299  * 0: IFWI flash not complete.
4300  * 1: IFWI flash complete.
4301  */
4302 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
4303 					 struct device_attribute *attr,
4304 					 char *buf)
4305 {
4306 	struct drm_device *ddev = dev_get_drvdata(dev);
4307 	struct amdgpu_device *adev = drm_to_adev(ddev);
4308 	uint32_t vbflash_status;
4309 
4310 	vbflash_status = psp_vbflash_status(&adev->psp);
4311 	if (!adev->psp.vbflash_done)
4312 		vbflash_status = 0;
4313 	else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
4314 		vbflash_status = 1;
4315 
4316 	return sysfs_emit(buf, "0x%x\n", vbflash_status);
4317 }
4318 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
4319 
4320 static const struct bin_attribute *const bin_flash_attrs[] = {
4321 	&psp_vbflash_bin_attr,
4322 	NULL
4323 };
4324 
4325 static struct attribute *flash_attrs[] = {
4326 	&dev_attr_psp_vbflash_status.attr,
4327 	&dev_attr_usbc_pd_fw.attr,
4328 	NULL
4329 };
4330 
4331 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
4332 {
4333 	struct device *dev = kobj_to_dev(kobj);
4334 	struct drm_device *ddev = dev_get_drvdata(dev);
4335 	struct amdgpu_device *adev = drm_to_adev(ddev);
4336 
4337 	if (attr == &dev_attr_usbc_pd_fw.attr)
4338 		return adev->psp.sup_pd_fw_up ? 0660 : 0;
4339 
4340 	return adev->psp.sup_ifwi_up ? 0440 : 0;
4341 }
4342 
4343 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
4344 						const struct bin_attribute *attr,
4345 						int idx)
4346 {
4347 	struct device *dev = kobj_to_dev(kobj);
4348 	struct drm_device *ddev = dev_get_drvdata(dev);
4349 	struct amdgpu_device *adev = drm_to_adev(ddev);
4350 
4351 	return adev->psp.sup_ifwi_up ? 0660 : 0;
4352 }
4353 
4354 const struct attribute_group amdgpu_flash_attr_group = {
4355 	.attrs = flash_attrs,
4356 	.bin_attrs = bin_flash_attrs,
4357 	.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4358 	.is_visible = amdgpu_flash_attr_is_visible,
4359 };
4360 
4361 #if defined(CONFIG_DEBUG_FS)
4362 static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
4363 {
4364 	struct amdgpu_device *adev = filp->f_inode->i_private;
4365 	struct spirom_bo *bo_triplet;
4366 	int ret;
4367 
4368 	/* serialize the open() file calling */
4369 	if (!mutex_trylock(&adev->psp.mutex))
4370 		return -EBUSY;
4371 
4372 	/*
4373 	 * make sure only one userpace process is alive for dumping so that
4374 	 * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
4375 	 * let's say the case where one process try opening the file while
4376 	 * another one has proceeded to read or release. In this way, eliminate
4377 	 * the use of mutex for read() or release() callback as well.
4378 	 */
4379 	if (adev->psp.spirom_dump_trip) {
4380 		mutex_unlock(&adev->psp.mutex);
4381 		return -EBUSY;
4382 	}
4383 
4384 	bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
4385 	if (!bo_triplet) {
4386 		mutex_unlock(&adev->psp.mutex);
4387 		return -ENOMEM;
4388 	}
4389 
4390 	ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
4391 				      AMDGPU_GPU_PAGE_SIZE,
4392 				      AMDGPU_GEM_DOMAIN_GTT,
4393 				      &bo_triplet->bo,
4394 				      &bo_triplet->mc_addr,
4395 				      &bo_triplet->cpu_addr);
4396 	if (ret)
4397 		goto rel_trip;
4398 
4399 	ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
4400 	if (ret)
4401 		goto rel_bo;
4402 
4403 	adev->psp.spirom_dump_trip = bo_triplet;
4404 	mutex_unlock(&adev->psp.mutex);
4405 	return 0;
4406 rel_bo:
4407 	amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4408 			      &bo_triplet->cpu_addr);
4409 rel_trip:
4410 	kfree(bo_triplet);
4411 	mutex_unlock(&adev->psp.mutex);
4412 	dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
4413 	return ret;
4414 }
4415 
4416 static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
4417 					    loff_t *pos)
4418 {
4419 	struct amdgpu_device *adev = filp->f_inode->i_private;
4420 	struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4421 
4422 	if (!bo_triplet)
4423 		return -EINVAL;
4424 
4425 	return simple_read_from_buffer(buf,
4426 				       size,
4427 				       pos, bo_triplet->cpu_addr,
4428 				       AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4429 }
4430 
4431 static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
4432 {
4433 	struct amdgpu_device *adev = filp->f_inode->i_private;
4434 	struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4435 
4436 	if (bo_triplet) {
4437 		amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4438 				      &bo_triplet->cpu_addr);
4439 		kfree(bo_triplet);
4440 	}
4441 
4442 	adev->psp.spirom_dump_trip = NULL;
4443 	return 0;
4444 }
4445 
4446 static const struct file_operations psp_dump_spirom_debugfs_ops = {
4447 	.owner = THIS_MODULE,
4448 	.open = psp_read_spirom_debugfs_open,
4449 	.read = psp_read_spirom_debugfs_read,
4450 	.release = psp_read_spirom_debugfs_release,
4451 	.llseek = default_llseek,
4452 };
4453 #endif
4454 
4455 void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
4456 {
4457 #if defined(CONFIG_DEBUG_FS)
4458 	struct drm_minor *minor = adev_to_drm(adev)->primary;
4459 
4460 	debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
4461 				 adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4462 #endif
4463 }
4464 
4465 const struct amd_ip_funcs psp_ip_funcs = {
4466 	.name = "psp",
4467 	.early_init = psp_early_init,
4468 	.sw_init = psp_sw_init,
4469 	.sw_fini = psp_sw_fini,
4470 	.hw_init = psp_hw_init,
4471 	.hw_fini = psp_hw_fini,
4472 	.suspend = psp_suspend,
4473 	.resume = psp_resume,
4474 	.set_clockgating_state = psp_set_clockgating_state,
4475 	.set_powergating_state = psp_set_powergating_state,
4476 };
4477 
4478 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4479 	.type = AMD_IP_BLOCK_TYPE_PSP,
4480 	.major = 3,
4481 	.minor = 1,
4482 	.rev = 0,
4483 	.funcs = &psp_ip_funcs,
4484 };
4485 
4486 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4487 	.type = AMD_IP_BLOCK_TYPE_PSP,
4488 	.major = 10,
4489 	.minor = 0,
4490 	.rev = 0,
4491 	.funcs = &psp_ip_funcs,
4492 };
4493 
4494 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4495 	.type = AMD_IP_BLOCK_TYPE_PSP,
4496 	.major = 11,
4497 	.minor = 0,
4498 	.rev = 0,
4499 	.funcs = &psp_ip_funcs,
4500 };
4501 
4502 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4503 	.type = AMD_IP_BLOCK_TYPE_PSP,
4504 	.major = 11,
4505 	.minor = 0,
4506 	.rev = 8,
4507 	.funcs = &psp_ip_funcs,
4508 };
4509 
4510 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4511 	.type = AMD_IP_BLOCK_TYPE_PSP,
4512 	.major = 12,
4513 	.minor = 0,
4514 	.rev = 0,
4515 	.funcs = &psp_ip_funcs,
4516 };
4517 
4518 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4519 	.type = AMD_IP_BLOCK_TYPE_PSP,
4520 	.major = 13,
4521 	.minor = 0,
4522 	.rev = 0,
4523 	.funcs = &psp_ip_funcs,
4524 };
4525 
4526 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4527 	.type = AMD_IP_BLOCK_TYPE_PSP,
4528 	.major = 13,
4529 	.minor = 0,
4530 	.rev = 4,
4531 	.funcs = &psp_ip_funcs,
4532 };
4533 
4534 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4535 	.type = AMD_IP_BLOCK_TYPE_PSP,
4536 	.major = 14,
4537 	.minor = 0,
4538 	.rev = 0,
4539 	.funcs = &psp_ip_funcs,
4540 };
4541 
4542 const struct amdgpu_ip_block_version psp_v15_0_ip_block = {
4543 	.type = AMD_IP_BLOCK_TYPE_PSP,
4544 	.major = 15,
4545 	.minor = 0,
4546 	.rev = 0,
4547 	.funcs = &psp_ip_funcs,
4548 };
4549 
4550 const struct amdgpu_ip_block_version psp_v15_0_8_ip_block = {
4551 	.type = AMD_IP_BLOCK_TYPE_PSP,
4552 	.major = 15,
4553 	.minor = 0,
4554 	.rev = 8,
4555 	.funcs = &psp_ip_funcs,
4556 };
4557