xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c (revision f94877038770073b465eece8636e221653d2beae)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42 
43 #include "amdgpu_ras.h"
44 #include "amdgpu_securedisplay.h"
45 #include "amdgpu_atomfirmware.h"
46 
47 #define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*16)
48 
49 static int psp_load_smu_fw(struct psp_context *psp);
50 static int psp_rap_terminate(struct psp_context *psp);
51 static int psp_securedisplay_terminate(struct psp_context *psp);
52 
53 static int psp_ring_init(struct psp_context *psp,
54 			 enum psp_ring_type ring_type)
55 {
56 	int ret = 0;
57 	struct psp_ring *ring;
58 	struct amdgpu_device *adev = psp->adev;
59 
60 	ring = &psp->km_ring;
61 
62 	ring->ring_type = ring_type;
63 
64 	/* allocate 4k Page of Local Frame Buffer memory for ring */
65 	ring->ring_size = 0x1000;
66 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 				      AMDGPU_GEM_DOMAIN_VRAM |
68 				      AMDGPU_GEM_DOMAIN_GTT,
69 				      &adev->firmware.rbuf,
70 				      &ring->ring_mem_mc_addr,
71 				      (void **)&ring->ring_mem);
72 	if (ret) {
73 		ring->ring_size = 0;
74 		return ret;
75 	}
76 
77 	return 0;
78 }
79 
80 /*
81  * Due to DF Cstate management centralized to PMFW, the firmware
82  * loading sequence will be updated as below:
83  *   - Load KDB
84  *   - Load SYS_DRV
85  *   - Load tOS
86  *   - Load PMFW
87  *   - Setup TMR
88  *   - Load other non-psp fw
89  *   - Load ASD
90  *   - Load XGMI/RAS/HDCP/DTM TA if any
91  *
92  * This new sequence is required for
93  *   - Arcturus and onwards
94  */
95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96 {
97 	struct amdgpu_device *adev = psp->adev;
98 
99 	if (amdgpu_sriov_vf(adev)) {
100 		psp->pmfw_centralized_cstate_management = false;
101 		return;
102 	}
103 
104 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 	case IP_VERSION(11, 0, 0):
106 	case IP_VERSION(11, 0, 4):
107 	case IP_VERSION(11, 0, 5):
108 	case IP_VERSION(11, 0, 7):
109 	case IP_VERSION(11, 0, 9):
110 	case IP_VERSION(11, 0, 11):
111 	case IP_VERSION(11, 0, 12):
112 	case IP_VERSION(11, 0, 13):
113 	case IP_VERSION(13, 0, 0):
114 	case IP_VERSION(13, 0, 2):
115 	case IP_VERSION(13, 0, 7):
116 		psp->pmfw_centralized_cstate_management = true;
117 		break;
118 	default:
119 		psp->pmfw_centralized_cstate_management = false;
120 		break;
121 	}
122 }
123 
124 static int psp_init_sriov_microcode(struct psp_context *psp)
125 {
126 	struct amdgpu_device *adev = psp->adev;
127 	char ucode_prefix[30];
128 	int ret = 0;
129 
130 	amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131 
132 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 	case IP_VERSION(9, 0, 0):
134 	case IP_VERSION(11, 0, 7):
135 	case IP_VERSION(11, 0, 9):
136 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 		ret = psp_init_cap_microcode(psp, ucode_prefix);
138 		break;
139 	case IP_VERSION(13, 0, 2):
140 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 		ret = psp_init_cap_microcode(psp, ucode_prefix);
142 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 		break;
144 	case IP_VERSION(13, 0, 0):
145 		adev->virt.autoload_ucode_id = 0;
146 		break;
147 	case IP_VERSION(13, 0, 6):
148 	case IP_VERSION(13, 0, 14):
149 		ret = psp_init_cap_microcode(psp, ucode_prefix);
150 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
151 		break;
152 	case IP_VERSION(13, 0, 10):
153 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
154 		ret = psp_init_cap_microcode(psp, ucode_prefix);
155 		break;
156 	case IP_VERSION(13, 0, 12):
157 		ret = psp_init_ta_microcode(psp, ucode_prefix);
158 		break;
159 	default:
160 		return -EINVAL;
161 	}
162 	return ret;
163 }
164 
165 static int psp_early_init(struct amdgpu_ip_block *ip_block)
166 {
167 	struct amdgpu_device *adev = ip_block->adev;
168 	struct psp_context *psp = &adev->psp;
169 
170 	psp->autoload_supported = true;
171 	psp->boot_time_tmr = true;
172 
173 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
174 	case IP_VERSION(9, 0, 0):
175 		psp_v3_1_set_psp_funcs(psp);
176 		psp->autoload_supported = false;
177 		psp->boot_time_tmr = false;
178 		break;
179 	case IP_VERSION(10, 0, 0):
180 	case IP_VERSION(10, 0, 1):
181 		psp_v10_0_set_psp_funcs(psp);
182 		psp->autoload_supported = false;
183 		psp->boot_time_tmr = false;
184 		break;
185 	case IP_VERSION(11, 0, 2):
186 	case IP_VERSION(11, 0, 4):
187 		psp_v11_0_set_psp_funcs(psp);
188 		psp->autoload_supported = false;
189 		psp->boot_time_tmr = false;
190 		break;
191 	case IP_VERSION(11, 0, 0):
192 	case IP_VERSION(11, 0, 7):
193 		adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
194 		fallthrough;
195 	case IP_VERSION(11, 0, 5):
196 	case IP_VERSION(11, 0, 9):
197 	case IP_VERSION(11, 0, 11):
198 	case IP_VERSION(11, 5, 0):
199 	case IP_VERSION(11, 5, 2):
200 	case IP_VERSION(11, 0, 12):
201 	case IP_VERSION(11, 0, 13):
202 		psp_v11_0_set_psp_funcs(psp);
203 		psp->boot_time_tmr = false;
204 		break;
205 	case IP_VERSION(11, 0, 3):
206 	case IP_VERSION(12, 0, 1):
207 		psp_v12_0_set_psp_funcs(psp);
208 		psp->autoload_supported = false;
209 		psp->boot_time_tmr = false;
210 		break;
211 	case IP_VERSION(13, 0, 2):
212 		psp->boot_time_tmr = false;
213 		fallthrough;
214 	case IP_VERSION(13, 0, 6):
215 	case IP_VERSION(13, 0, 14):
216 		psp_v13_0_set_psp_funcs(psp);
217 		psp->autoload_supported = false;
218 		break;
219 	case IP_VERSION(13, 0, 12):
220 		psp_v13_0_set_psp_funcs(psp);
221 		psp->autoload_supported = false;
222 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
223 		break;
224 	case IP_VERSION(13, 0, 1):
225 	case IP_VERSION(13, 0, 3):
226 	case IP_VERSION(13, 0, 5):
227 	case IP_VERSION(13, 0, 8):
228 	case IP_VERSION(13, 0, 11):
229 	case IP_VERSION(14, 0, 0):
230 	case IP_VERSION(14, 0, 1):
231 	case IP_VERSION(14, 0, 4):
232 		psp_v13_0_set_psp_funcs(psp);
233 		psp->boot_time_tmr = false;
234 		break;
235 	case IP_VERSION(11, 0, 8):
236 		if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
237 			psp_v11_0_8_set_psp_funcs(psp);
238 		}
239 		psp->autoload_supported = false;
240 		psp->boot_time_tmr = false;
241 		break;
242 	case IP_VERSION(13, 0, 0):
243 	case IP_VERSION(13, 0, 7):
244 	case IP_VERSION(13, 0, 10):
245 		psp_v13_0_set_psp_funcs(psp);
246 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
247 		psp->boot_time_tmr = false;
248 		break;
249 	case IP_VERSION(13, 0, 4):
250 		psp_v13_0_4_set_psp_funcs(psp);
251 		psp->boot_time_tmr = false;
252 		break;
253 	case IP_VERSION(14, 0, 2):
254 	case IP_VERSION(14, 0, 3):
255 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
256 		psp_v14_0_set_psp_funcs(psp);
257 		break;
258 	case IP_VERSION(14, 0, 5):
259 		psp_v14_0_set_psp_funcs(psp);
260 		psp->boot_time_tmr = false;
261 		break;
262 	default:
263 		return -EINVAL;
264 	}
265 
266 	psp->adev = adev;
267 
268 	adev->psp_timeout = 20000;
269 
270 	psp_check_pmfw_centralized_cstate_management(psp);
271 
272 	if (amdgpu_sriov_vf(adev))
273 		return psp_init_sriov_microcode(psp);
274 	else
275 		return psp_init_microcode(psp);
276 }
277 
278 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
279 {
280 	amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
281 			      &mem_ctx->shared_buf);
282 	mem_ctx->shared_bo = NULL;
283 }
284 
285 static void psp_free_shared_bufs(struct psp_context *psp)
286 {
287 	void *tmr_buf;
288 	void **pptr;
289 
290 	/* free TMR memory buffer */
291 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
292 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
293 	psp->tmr_bo = NULL;
294 
295 	/* free xgmi shared memory */
296 	psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
297 
298 	/* free ras shared memory */
299 	psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
300 
301 	/* free hdcp shared memory */
302 	psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
303 
304 	/* free dtm shared memory */
305 	psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
306 
307 	/* free rap shared memory */
308 	psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
309 
310 	/* free securedisplay shared memory */
311 	psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
312 
313 
314 }
315 
316 static void psp_memory_training_fini(struct psp_context *psp)
317 {
318 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
319 
320 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
321 	kfree(ctx->sys_cache);
322 	ctx->sys_cache = NULL;
323 }
324 
325 static int psp_memory_training_init(struct psp_context *psp)
326 {
327 	int ret;
328 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
329 
330 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
331 		dev_dbg(psp->adev->dev, "memory training is not supported!\n");
332 		return 0;
333 	}
334 
335 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
336 	if (ctx->sys_cache == NULL) {
337 		dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
338 		ret = -ENOMEM;
339 		goto Err_out;
340 	}
341 
342 	dev_dbg(psp->adev->dev,
343 		"train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
344 		ctx->train_data_size,
345 		ctx->p2c_train_data_offset,
346 		ctx->c2p_train_data_offset);
347 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
348 	return 0;
349 
350 Err_out:
351 	psp_memory_training_fini(psp);
352 	return ret;
353 }
354 
355 /*
356  * Helper funciton to query psp runtime database entry
357  *
358  * @adev: amdgpu_device pointer
359  * @entry_type: the type of psp runtime database entry
360  * @db_entry: runtime database entry pointer
361  *
362  * Return false if runtime database doesn't exit or entry is invalid
363  * or true if the specific database entry is found, and copy to @db_entry
364  */
365 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
366 				     enum psp_runtime_entry_type entry_type,
367 				     void *db_entry)
368 {
369 	uint64_t db_header_pos, db_dir_pos;
370 	struct psp_runtime_data_header db_header = {0};
371 	struct psp_runtime_data_directory db_dir = {0};
372 	bool ret = false;
373 	int i;
374 
375 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
376 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
377 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
378 		return false;
379 
380 	db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
381 	db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
382 
383 	/* read runtime db header from vram */
384 	amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
385 			sizeof(struct psp_runtime_data_header), false);
386 
387 	if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
388 		/* runtime db doesn't exist, exit */
389 		dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
390 		return false;
391 	}
392 
393 	/* read runtime database entry from vram */
394 	amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
395 			sizeof(struct psp_runtime_data_directory), false);
396 
397 	if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
398 		/* invalid db entry count, exit */
399 		dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
400 		return false;
401 	}
402 
403 	/* look up for requested entry type */
404 	for (i = 0; i < db_dir.entry_count && !ret; i++) {
405 		if (db_dir.entry_list[i].entry_type == entry_type) {
406 			switch (entry_type) {
407 			case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
408 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
409 					/* invalid db entry size */
410 					dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
411 					return false;
412 				}
413 				/* read runtime database entry */
414 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
415 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
416 				ret = true;
417 				break;
418 			case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
419 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
420 					/* invalid db entry size */
421 					dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
422 					return false;
423 				}
424 				/* read runtime database entry */
425 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
426 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
427 				ret = true;
428 				break;
429 			default:
430 				ret = false;
431 				break;
432 			}
433 		}
434 	}
435 
436 	return ret;
437 }
438 
439 static int psp_sw_init(struct amdgpu_ip_block *ip_block)
440 {
441 	struct amdgpu_device *adev = ip_block->adev;
442 	struct psp_context *psp = &adev->psp;
443 	int ret;
444 	struct psp_runtime_boot_cfg_entry boot_cfg_entry;
445 	struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
446 	struct psp_runtime_scpm_entry scpm_entry;
447 
448 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
449 	if (!psp->cmd) {
450 		dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
451 		ret = -ENOMEM;
452 	}
453 
454 	adev->psp.xgmi_context.supports_extended_data =
455 		!adev->gmc.xgmi.connected_to_cpu &&
456 		amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
457 
458 	memset(&scpm_entry, 0, sizeof(scpm_entry));
459 	if ((psp_get_runtime_db_entry(adev,
460 				PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
461 				&scpm_entry)) &&
462 	    (scpm_entry.scpm_status != SCPM_DISABLE)) {
463 		adev->scpm_enabled = true;
464 		adev->scpm_status = scpm_entry.scpm_status;
465 	} else {
466 		adev->scpm_enabled = false;
467 		adev->scpm_status = SCPM_DISABLE;
468 	}
469 
470 	/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
471 
472 	memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
473 	if (psp_get_runtime_db_entry(adev,
474 				PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
475 				&boot_cfg_entry)) {
476 		psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
477 		if ((psp->boot_cfg_bitmask) &
478 		    BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
479 			/* If psp runtime database exists, then
480 			 * only enable two stage memory training
481 			 * when TWO_STAGE_DRAM_TRAINING bit is set
482 			 * in runtime database
483 			 */
484 			mem_training_ctx->enable_mem_training = true;
485 		}
486 
487 	} else {
488 		/* If psp runtime database doesn't exist or is
489 		 * invalid, force enable two stage memory training
490 		 */
491 		mem_training_ctx->enable_mem_training = true;
492 	}
493 
494 	if (mem_training_ctx->enable_mem_training) {
495 		ret = psp_memory_training_init(psp);
496 		if (ret) {
497 			dev_err(adev->dev, "Failed to initialize memory training!\n");
498 			return ret;
499 		}
500 
501 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
502 		if (ret) {
503 			dev_err(adev->dev, "Failed to process memory training!\n");
504 			return ret;
505 		}
506 	}
507 
508 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
509 				      (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
510 				      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
511 				      &psp->fw_pri_bo,
512 				      &psp->fw_pri_mc_addr,
513 				      &psp->fw_pri_buf);
514 	if (ret)
515 		return ret;
516 
517 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
518 				      AMDGPU_GEM_DOMAIN_VRAM |
519 				      AMDGPU_GEM_DOMAIN_GTT,
520 				      &psp->fence_buf_bo,
521 				      &psp->fence_buf_mc_addr,
522 				      &psp->fence_buf);
523 	if (ret)
524 		goto failed1;
525 
526 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
527 				      AMDGPU_GEM_DOMAIN_VRAM |
528 				      AMDGPU_GEM_DOMAIN_GTT,
529 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
530 				      (void **)&psp->cmd_buf_mem);
531 	if (ret)
532 		goto failed2;
533 
534 	return 0;
535 
536 failed2:
537 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
538 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
539 failed1:
540 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
541 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
542 	return ret;
543 }
544 
545 static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
546 {
547 	struct amdgpu_device *adev = ip_block->adev;
548 	struct psp_context *psp = &adev->psp;
549 
550 	psp_memory_training_fini(psp);
551 
552 	amdgpu_ucode_release(&psp->sos_fw);
553 	amdgpu_ucode_release(&psp->asd_fw);
554 	amdgpu_ucode_release(&psp->ta_fw);
555 	amdgpu_ucode_release(&psp->cap_fw);
556 	amdgpu_ucode_release(&psp->toc_fw);
557 
558 	kfree(psp->cmd);
559 	psp->cmd = NULL;
560 
561 	psp_free_shared_bufs(psp);
562 
563 	if (psp->km_ring.ring_mem)
564 		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
565 				      &psp->km_ring.ring_mem_mc_addr,
566 				      (void **)&psp->km_ring.ring_mem);
567 
568 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
569 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
570 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
571 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
572 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
573 			      (void **)&psp->cmd_buf_mem);
574 
575 	return 0;
576 }
577 
578 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
579 		 uint32_t reg_val, uint32_t mask, bool check_changed)
580 {
581 	uint32_t val;
582 	int i;
583 	struct amdgpu_device *adev = psp->adev;
584 
585 	if (psp->adev->no_hw_access)
586 		return 0;
587 
588 	for (i = 0; i < adev->usec_timeout; i++) {
589 		val = RREG32(reg_index);
590 		if (check_changed) {
591 			if (val != reg_val)
592 				return 0;
593 		} else {
594 			if ((val & mask) == reg_val)
595 				return 0;
596 		}
597 		udelay(1);
598 	}
599 
600 	return -ETIME;
601 }
602 
603 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
604 			       uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
605 {
606 	uint32_t val;
607 	int i;
608 	struct amdgpu_device *adev = psp->adev;
609 
610 	if (psp->adev->no_hw_access)
611 		return 0;
612 
613 	for (i = 0; i < msec_timeout; i++) {
614 		val = RREG32(reg_index);
615 		if ((val & mask) == reg_val)
616 			return 0;
617 		msleep(1);
618 	}
619 
620 	return -ETIME;
621 }
622 
623 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
624 {
625 	switch (cmd_id) {
626 	case GFX_CMD_ID_LOAD_TA:
627 		return "LOAD_TA";
628 	case GFX_CMD_ID_UNLOAD_TA:
629 		return "UNLOAD_TA";
630 	case GFX_CMD_ID_INVOKE_CMD:
631 		return "INVOKE_CMD";
632 	case GFX_CMD_ID_LOAD_ASD:
633 		return "LOAD_ASD";
634 	case GFX_CMD_ID_SETUP_TMR:
635 		return "SETUP_TMR";
636 	case GFX_CMD_ID_LOAD_IP_FW:
637 		return "LOAD_IP_FW";
638 	case GFX_CMD_ID_DESTROY_TMR:
639 		return "DESTROY_TMR";
640 	case GFX_CMD_ID_SAVE_RESTORE:
641 		return "SAVE_RESTORE_IP_FW";
642 	case GFX_CMD_ID_SETUP_VMR:
643 		return "SETUP_VMR";
644 	case GFX_CMD_ID_DESTROY_VMR:
645 		return "DESTROY_VMR";
646 	case GFX_CMD_ID_PROG_REG:
647 		return "PROG_REG";
648 	case GFX_CMD_ID_GET_FW_ATTESTATION:
649 		return "GET_FW_ATTESTATION";
650 	case GFX_CMD_ID_LOAD_TOC:
651 		return "ID_LOAD_TOC";
652 	case GFX_CMD_ID_AUTOLOAD_RLC:
653 		return "AUTOLOAD_RLC";
654 	case GFX_CMD_ID_BOOT_CFG:
655 		return "BOOT_CFG";
656 	case GFX_CMD_ID_CONFIG_SQ_PERFMON:
657 		return "CONFIG_SQ_PERFMON";
658 	default:
659 		return "UNKNOWN CMD";
660 	}
661 }
662 
663 static bool psp_err_warn(struct psp_context *psp)
664 {
665 	struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
666 
667 	/* This response indicates reg list is already loaded */
668 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
669 	    cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
670 	    cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
671 	    cmd->resp.status == TEE_ERROR_CANCEL)
672 		return false;
673 
674 	return true;
675 }
676 
677 static int
678 psp_cmd_submit_buf(struct psp_context *psp,
679 		   struct amdgpu_firmware_info *ucode,
680 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
681 {
682 	int ret;
683 	int index;
684 	int timeout = psp->adev->psp_timeout;
685 	bool ras_intr = false;
686 	bool skip_unsupport = false;
687 
688 	if (psp->adev->no_hw_access)
689 		return 0;
690 
691 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
692 
693 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
694 
695 	index = atomic_inc_return(&psp->fence_value);
696 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
697 	if (ret) {
698 		atomic_dec(&psp->fence_value);
699 		goto exit;
700 	}
701 
702 	amdgpu_device_invalidate_hdp(psp->adev, NULL);
703 	while (*((unsigned int *)psp->fence_buf) != index) {
704 		if (--timeout == 0)
705 			break;
706 		/*
707 		 * Shouldn't wait for timeout when err_event_athub occurs,
708 		 * because gpu reset thread triggered and lock resource should
709 		 * be released for psp resume sequence.
710 		 */
711 		ras_intr = amdgpu_ras_intr_triggered();
712 		if (ras_intr)
713 			break;
714 		usleep_range(10, 100);
715 		amdgpu_device_invalidate_hdp(psp->adev, NULL);
716 	}
717 
718 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
719 	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
720 		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
721 
722 	memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
723 
724 	/* In some cases, psp response status is not 0 even there is no
725 	 * problem while the command is submitted. Some version of PSP FW
726 	 * doesn't write 0 to that field.
727 	 * So here we would like to only print a warning instead of an error
728 	 * during psp initialization to avoid breaking hw_init and it doesn't
729 	 * return -EINVAL.
730 	 */
731 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
732 		if (ucode)
733 			dev_warn(psp->adev->dev,
734 				 "failed to load ucode %s(0x%X) ",
735 				 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
736 		if (psp_err_warn(psp))
737 			dev_warn(
738 				psp->adev->dev,
739 				"psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
740 				psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
741 				psp->cmd_buf_mem->cmd_id,
742 				psp->cmd_buf_mem->resp.status);
743 		/* If any firmware (including CAP) load fails under SRIOV, it should
744 		 * return failure to stop the VF from initializing.
745 		 * Also return failure in case of timeout
746 		 */
747 		if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
748 			ret = -EINVAL;
749 			goto exit;
750 		}
751 	}
752 
753 	if (ucode) {
754 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
755 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
756 	}
757 
758 exit:
759 	return ret;
760 }
761 
762 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
763 {
764 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
765 
766 	mutex_lock(&psp->mutex);
767 
768 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
769 
770 	return cmd;
771 }
772 
773 static void release_psp_cmd_buf(struct psp_context *psp)
774 {
775 	mutex_unlock(&psp->mutex);
776 }
777 
778 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
779 				 struct psp_gfx_cmd_resp *cmd,
780 				 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
781 {
782 	struct amdgpu_device *adev = psp->adev;
783 	uint32_t size = 0;
784 	uint64_t tmr_pa = 0;
785 
786 	if (tmr_bo) {
787 		size = amdgpu_bo_size(tmr_bo);
788 		tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
789 	}
790 
791 	if (amdgpu_sriov_vf(psp->adev))
792 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
793 	else
794 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
795 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
796 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
797 	cmd->cmd.cmd_setup_tmr.buf_size = size;
798 	cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
799 	cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
800 	cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
801 }
802 
803 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
804 				      uint64_t pri_buf_mc, uint32_t size)
805 {
806 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
807 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
808 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
809 	cmd->cmd.cmd_load_toc.toc_size = size;
810 }
811 
812 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
813 static int psp_load_toc(struct psp_context *psp,
814 			uint32_t *tmr_size)
815 {
816 	int ret;
817 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
818 
819 	/* Copy toc to psp firmware private buffer */
820 	psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
821 
822 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
823 
824 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
825 				 psp->fence_buf_mc_addr);
826 	if (!ret)
827 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
828 
829 	release_psp_cmd_buf(psp);
830 
831 	return ret;
832 }
833 
834 /* Set up Trusted Memory Region */
835 static int psp_tmr_init(struct psp_context *psp)
836 {
837 	int ret = 0;
838 	int tmr_size;
839 	void *tmr_buf;
840 	void **pptr;
841 
842 	/*
843 	 * According to HW engineer, they prefer the TMR address be "naturally
844 	 * aligned" , e.g. the start address be an integer divide of TMR size.
845 	 *
846 	 * Note: this memory need be reserved till the driver
847 	 * uninitializes.
848 	 */
849 	tmr_size = PSP_TMR_SIZE(psp->adev);
850 
851 	/* For ASICs support RLC autoload, psp will parse the toc
852 	 * and calculate the total size of TMR needed
853 	 */
854 	if (!amdgpu_sriov_vf(psp->adev) &&
855 	    psp->toc.start_addr &&
856 	    psp->toc.size_bytes &&
857 	    psp->fw_pri_buf) {
858 		ret = psp_load_toc(psp, &tmr_size);
859 		if (ret) {
860 			dev_err(psp->adev->dev, "Failed to load toc\n");
861 			return ret;
862 		}
863 	}
864 
865 	if (!psp->tmr_bo && !psp->boot_time_tmr) {
866 		pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
867 		ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
868 					      PSP_TMR_ALIGNMENT,
869 					      AMDGPU_HAS_VRAM(psp->adev) ?
870 					      AMDGPU_GEM_DOMAIN_VRAM :
871 					      AMDGPU_GEM_DOMAIN_GTT,
872 					      &psp->tmr_bo, &psp->tmr_mc_addr,
873 					      pptr);
874 	}
875 	if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
876 		psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
877 
878 	return ret;
879 }
880 
881 static bool psp_skip_tmr(struct psp_context *psp)
882 {
883 	switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
884 	case IP_VERSION(11, 0, 9):
885 	case IP_VERSION(11, 0, 7):
886 	case IP_VERSION(13, 0, 2):
887 	case IP_VERSION(13, 0, 6):
888 	case IP_VERSION(13, 0, 10):
889 	case IP_VERSION(13, 0, 12):
890 	case IP_VERSION(13, 0, 14):
891 		return true;
892 	default:
893 		return false;
894 	}
895 }
896 
897 static int psp_tmr_load(struct psp_context *psp)
898 {
899 	int ret;
900 	struct psp_gfx_cmd_resp *cmd;
901 
902 	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
903 	 * Already set up by host driver.
904 	 */
905 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
906 		return 0;
907 
908 	cmd = acquire_psp_cmd_buf(psp);
909 
910 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
911 	if (psp->tmr_bo)
912 		dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
913 			 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
914 
915 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
916 				 psp->fence_buf_mc_addr);
917 
918 	release_psp_cmd_buf(psp);
919 
920 	return ret;
921 }
922 
923 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
924 					struct psp_gfx_cmd_resp *cmd)
925 {
926 	if (amdgpu_sriov_vf(psp->adev))
927 		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
928 	else
929 		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
930 }
931 
932 static int psp_tmr_unload(struct psp_context *psp)
933 {
934 	int ret;
935 	struct psp_gfx_cmd_resp *cmd;
936 
937 	/* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
938 	 * as TMR is not loaded at all
939 	 */
940 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
941 		return 0;
942 
943 	cmd = acquire_psp_cmd_buf(psp);
944 
945 	psp_prep_tmr_unload_cmd_buf(psp, cmd);
946 	dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
947 
948 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
949 				 psp->fence_buf_mc_addr);
950 
951 	release_psp_cmd_buf(psp);
952 
953 	return ret;
954 }
955 
956 static int psp_tmr_terminate(struct psp_context *psp)
957 {
958 	return psp_tmr_unload(psp);
959 }
960 
961 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
962 					uint64_t *output_ptr)
963 {
964 	int ret;
965 	struct psp_gfx_cmd_resp *cmd;
966 
967 	if (!output_ptr)
968 		return -EINVAL;
969 
970 	if (amdgpu_sriov_vf(psp->adev))
971 		return 0;
972 
973 	cmd = acquire_psp_cmd_buf(psp);
974 
975 	cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
976 
977 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
978 				 psp->fence_buf_mc_addr);
979 
980 	if (!ret) {
981 		*output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
982 			      ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
983 	}
984 
985 	release_psp_cmd_buf(psp);
986 
987 	return ret;
988 }
989 
990 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
991 {
992 	struct psp_context *psp = &adev->psp;
993 	struct psp_gfx_cmd_resp *cmd;
994 	int ret;
995 
996 	if (amdgpu_sriov_vf(adev))
997 		return 0;
998 
999 	cmd = acquire_psp_cmd_buf(psp);
1000 
1001 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1002 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
1003 
1004 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1005 	if (!ret) {
1006 		*boot_cfg =
1007 			(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
1008 	}
1009 
1010 	release_psp_cmd_buf(psp);
1011 
1012 	return ret;
1013 }
1014 
1015 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
1016 {
1017 	int ret;
1018 	struct psp_context *psp = &adev->psp;
1019 	struct psp_gfx_cmd_resp *cmd;
1020 
1021 	if (amdgpu_sriov_vf(adev))
1022 		return 0;
1023 
1024 	cmd = acquire_psp_cmd_buf(psp);
1025 
1026 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1027 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1028 	cmd->cmd.boot_cfg.boot_config = boot_cfg;
1029 	cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1030 
1031 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1032 
1033 	release_psp_cmd_buf(psp);
1034 
1035 	return ret;
1036 }
1037 
1038 static int psp_rl_load(struct amdgpu_device *adev)
1039 {
1040 	int ret;
1041 	struct psp_context *psp = &adev->psp;
1042 	struct psp_gfx_cmd_resp *cmd;
1043 
1044 	if (!is_psp_fw_valid(psp->rl))
1045 		return 0;
1046 
1047 	cmd = acquire_psp_cmd_buf(psp);
1048 
1049 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1050 	memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1051 
1052 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1053 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1054 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1055 	cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1056 	cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1057 
1058 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1059 
1060 	release_psp_cmd_buf(psp);
1061 
1062 	return ret;
1063 }
1064 
1065 int psp_memory_partition(struct psp_context *psp, int mode)
1066 {
1067 	struct psp_gfx_cmd_resp *cmd;
1068 	int ret;
1069 
1070 	if (amdgpu_sriov_vf(psp->adev))
1071 		return 0;
1072 
1073 	cmd = acquire_psp_cmd_buf(psp);
1074 
1075 	cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
1076 	cmd->cmd.cmd_memory_part.mode = mode;
1077 
1078 	dev_info(psp->adev->dev,
1079 		 "Requesting %d memory partition change through PSP", mode);
1080 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1081 	if (ret)
1082 		dev_err(psp->adev->dev,
1083 			"PSP request failed to change to NPS%d mode\n", mode);
1084 
1085 	release_psp_cmd_buf(psp);
1086 
1087 	return ret;
1088 }
1089 
1090 int psp_spatial_partition(struct psp_context *psp, int mode)
1091 {
1092 	struct psp_gfx_cmd_resp *cmd;
1093 	int ret;
1094 
1095 	if (amdgpu_sriov_vf(psp->adev))
1096 		return 0;
1097 
1098 	cmd = acquire_psp_cmd_buf(psp);
1099 
1100 	cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1101 	cmd->cmd.cmd_spatial_part.mode = mode;
1102 
1103 	dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1104 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1105 
1106 	release_psp_cmd_buf(psp);
1107 
1108 	return ret;
1109 }
1110 
1111 static int psp_asd_initialize(struct psp_context *psp)
1112 {
1113 	int ret;
1114 
1115 	/* If PSP version doesn't match ASD version, asd loading will be failed.
1116 	 * add workaround to bypass it for sriov now.
1117 	 * TODO: add version check to make it common
1118 	 */
1119 	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1120 		return 0;
1121 
1122 	/* bypass asd if display hardware is not available */
1123 	if (!amdgpu_device_has_display_hardware(psp->adev) &&
1124 	    amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1125 		return 0;
1126 
1127 	psp->asd_context.mem_context.shared_mc_addr  = 0;
1128 	psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1129 	psp->asd_context.ta_load_type                = GFX_CMD_ID_LOAD_ASD;
1130 
1131 	ret = psp_ta_load(psp, &psp->asd_context);
1132 	if (!ret)
1133 		psp->asd_context.initialized = true;
1134 
1135 	return ret;
1136 }
1137 
1138 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1139 				       uint32_t session_id)
1140 {
1141 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1142 	cmd->cmd.cmd_unload_ta.session_id = session_id;
1143 }
1144 
1145 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1146 {
1147 	int ret;
1148 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1149 
1150 	psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1151 
1152 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1153 
1154 	context->resp_status = cmd->resp.status;
1155 
1156 	release_psp_cmd_buf(psp);
1157 
1158 	return ret;
1159 }
1160 
1161 static int psp_asd_terminate(struct psp_context *psp)
1162 {
1163 	int ret;
1164 
1165 	if (amdgpu_sriov_vf(psp->adev))
1166 		return 0;
1167 
1168 	if (!psp->asd_context.initialized)
1169 		return 0;
1170 
1171 	ret = psp_ta_unload(psp, &psp->asd_context);
1172 	if (!ret)
1173 		psp->asd_context.initialized = false;
1174 
1175 	return ret;
1176 }
1177 
1178 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1179 		uint32_t id, uint32_t value)
1180 {
1181 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1182 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1183 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1184 }
1185 
1186 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1187 		uint32_t value)
1188 {
1189 	struct psp_gfx_cmd_resp *cmd;
1190 	int ret = 0;
1191 
1192 	if (reg >= PSP_REG_LAST)
1193 		return -EINVAL;
1194 
1195 	cmd = acquire_psp_cmd_buf(psp);
1196 
1197 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1198 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1199 	if (ret)
1200 		dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1201 
1202 	release_psp_cmd_buf(psp);
1203 
1204 	return ret;
1205 }
1206 
1207 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1208 				     uint64_t ta_bin_mc,
1209 				     struct ta_context *context)
1210 {
1211 	cmd->cmd_id				= context->ta_load_type;
1212 	cmd->cmd.cmd_load_ta.app_phy_addr_lo	= lower_32_bits(ta_bin_mc);
1213 	cmd->cmd.cmd_load_ta.app_phy_addr_hi	= upper_32_bits(ta_bin_mc);
1214 	cmd->cmd.cmd_load_ta.app_len		= context->bin_desc.size_bytes;
1215 
1216 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1217 		lower_32_bits(context->mem_context.shared_mc_addr);
1218 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1219 		upper_32_bits(context->mem_context.shared_mc_addr);
1220 	cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1221 }
1222 
1223 int psp_ta_init_shared_buf(struct psp_context *psp,
1224 				  struct ta_mem_context *mem_ctx)
1225 {
1226 	/*
1227 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1228 	 * physical) for ta to host memory
1229 	 */
1230 	return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1231 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1232 				      AMDGPU_GEM_DOMAIN_GTT,
1233 				      &mem_ctx->shared_bo,
1234 				      &mem_ctx->shared_mc_addr,
1235 				      &mem_ctx->shared_buf);
1236 }
1237 
1238 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1239 				       uint32_t ta_cmd_id,
1240 				       uint32_t session_id)
1241 {
1242 	cmd->cmd_id				= GFX_CMD_ID_INVOKE_CMD;
1243 	cmd->cmd.cmd_invoke_cmd.session_id	= session_id;
1244 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id	= ta_cmd_id;
1245 }
1246 
1247 int psp_ta_invoke(struct psp_context *psp,
1248 		  uint32_t ta_cmd_id,
1249 		  struct ta_context *context)
1250 {
1251 	int ret;
1252 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1253 
1254 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1255 
1256 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1257 				 psp->fence_buf_mc_addr);
1258 
1259 	context->resp_status = cmd->resp.status;
1260 
1261 	release_psp_cmd_buf(psp);
1262 
1263 	return ret;
1264 }
1265 
1266 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1267 {
1268 	int ret;
1269 	struct psp_gfx_cmd_resp *cmd;
1270 
1271 	cmd = acquire_psp_cmd_buf(psp);
1272 
1273 	psp_copy_fw(psp, context->bin_desc.start_addr,
1274 		    context->bin_desc.size_bytes);
1275 
1276 	if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
1277 		context->mem_context.shared_bo)
1278 		context->mem_context.shared_mc_addr =
1279 			amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
1280 
1281 	psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1282 
1283 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1284 				 psp->fence_buf_mc_addr);
1285 
1286 	context->resp_status = cmd->resp.status;
1287 
1288 	if (!ret)
1289 		context->session_id = cmd->resp.session_id;
1290 
1291 	release_psp_cmd_buf(psp);
1292 
1293 	return ret;
1294 }
1295 
1296 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1297 {
1298 	return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1299 }
1300 
1301 int psp_xgmi_terminate(struct psp_context *psp)
1302 {
1303 	int ret;
1304 	struct amdgpu_device *adev = psp->adev;
1305 
1306 	/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1307 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1308 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1309 	     adev->gmc.xgmi.connected_to_cpu))
1310 		return 0;
1311 
1312 	if (!psp->xgmi_context.context.initialized)
1313 		return 0;
1314 
1315 	ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1316 
1317 	psp->xgmi_context.context.initialized = false;
1318 
1319 	return ret;
1320 }
1321 
1322 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1323 {
1324 	struct ta_xgmi_shared_memory *xgmi_cmd;
1325 	int ret;
1326 
1327 	if (!psp->ta_fw ||
1328 	    !psp->xgmi_context.context.bin_desc.size_bytes ||
1329 	    !psp->xgmi_context.context.bin_desc.start_addr)
1330 		return -ENOENT;
1331 
1332 	if (!load_ta)
1333 		goto invoke;
1334 
1335 	psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1336 	psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1337 
1338 	if (!psp->xgmi_context.context.mem_context.shared_buf) {
1339 		ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1340 		if (ret)
1341 			return ret;
1342 	}
1343 
1344 	/* Load XGMI TA */
1345 	ret = psp_ta_load(psp, &psp->xgmi_context.context);
1346 	if (!ret)
1347 		psp->xgmi_context.context.initialized = true;
1348 	else
1349 		return ret;
1350 
1351 invoke:
1352 	/* Initialize XGMI session */
1353 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1354 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1355 	xgmi_cmd->flag_extend_link_record = set_extended_data;
1356 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1357 
1358 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1359 	/* note down the capbility flag for XGMI TA */
1360 	psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1361 
1362 	return ret;
1363 }
1364 
1365 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1366 {
1367 	struct ta_xgmi_shared_memory *xgmi_cmd;
1368 	int ret;
1369 
1370 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1371 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1372 
1373 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1374 
1375 	/* Invoke xgmi ta to get hive id */
1376 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1377 	if (ret)
1378 		return ret;
1379 
1380 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1381 
1382 	return 0;
1383 }
1384 
1385 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1386 {
1387 	struct ta_xgmi_shared_memory *xgmi_cmd;
1388 	int ret;
1389 
1390 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1391 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1392 
1393 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1394 
1395 	/* Invoke xgmi ta to get the node id */
1396 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1397 	if (ret)
1398 		return ret;
1399 
1400 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1401 
1402 	return 0;
1403 }
1404 
1405 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1406 {
1407 	return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1408 			IP_VERSION(13, 0, 2) &&
1409 		psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1410 	       amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1411 		       IP_VERSION(13, 0, 6);
1412 }
1413 
1414 /*
1415  * Chips that support extended topology information require the driver to
1416  * reflect topology information in the opposite direction.  This is
1417  * because the TA has already exceeded its link record limit and if the
1418  * TA holds bi-directional information, the driver would have to do
1419  * multiple fetches instead of just two.
1420  */
1421 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1422 					struct psp_xgmi_node_info node_info)
1423 {
1424 	struct amdgpu_device *mirror_adev;
1425 	struct amdgpu_hive_info *hive;
1426 	uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1427 	uint64_t dst_node_id = node_info.node_id;
1428 	uint8_t dst_num_hops = node_info.num_hops;
1429 	uint8_t dst_num_links = node_info.num_links;
1430 
1431 	hive = amdgpu_get_xgmi_hive(psp->adev);
1432 	if (WARN_ON(!hive))
1433 		return;
1434 
1435 	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1436 		struct psp_xgmi_topology_info *mirror_top_info;
1437 		int j;
1438 
1439 		if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1440 			continue;
1441 
1442 		mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1443 		for (j = 0; j < mirror_top_info->num_nodes; j++) {
1444 			if (mirror_top_info->nodes[j].node_id != src_node_id)
1445 				continue;
1446 
1447 			mirror_top_info->nodes[j].num_hops = dst_num_hops;
1448 			/*
1449 			 * prevent 0 num_links value re-reflection since reflection
1450 			 * criteria is based on num_hops (direct or indirect).
1451 			 *
1452 			 */
1453 			if (dst_num_links)
1454 				mirror_top_info->nodes[j].num_links = dst_num_links;
1455 
1456 			break;
1457 		}
1458 
1459 		break;
1460 	}
1461 
1462 	amdgpu_put_xgmi_hive(hive);
1463 }
1464 
1465 int psp_xgmi_get_topology_info(struct psp_context *psp,
1466 			       int number_devices,
1467 			       struct psp_xgmi_topology_info *topology,
1468 			       bool get_extended_data)
1469 {
1470 	struct ta_xgmi_shared_memory *xgmi_cmd;
1471 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1472 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1473 	int i;
1474 	int ret;
1475 
1476 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1477 		return -EINVAL;
1478 
1479 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1480 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1481 	xgmi_cmd->flag_extend_link_record = get_extended_data;
1482 
1483 	/* Fill in the shared memory with topology information as input */
1484 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1485 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1486 	topology_info_input->num_nodes = number_devices;
1487 
1488 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1489 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1490 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1491 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1492 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1493 	}
1494 
1495 	/* Invoke xgmi ta to get the topology information */
1496 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1497 	if (ret)
1498 		return ret;
1499 
1500 	/* Read the output topology information from the shared memory */
1501 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1502 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1503 	for (i = 0; i < topology->num_nodes; i++) {
1504 		/* extended data will either be 0 or equal to non-extended data */
1505 		if (topology_info_output->nodes[i].num_hops)
1506 			topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1507 
1508 		/* non-extended data gets everything here so no need to update */
1509 		if (!get_extended_data) {
1510 			topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1511 			topology->nodes[i].is_sharing_enabled =
1512 					topology_info_output->nodes[i].is_sharing_enabled;
1513 			topology->nodes[i].sdma_engine =
1514 					topology_info_output->nodes[i].sdma_engine;
1515 		}
1516 
1517 	}
1518 
1519 	/* Invoke xgmi ta again to get the link information */
1520 	if (psp_xgmi_peer_link_info_supported(psp)) {
1521 		struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1522 		struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1523 		bool requires_reflection =
1524 			(psp->xgmi_context.supports_extended_data &&
1525 			 get_extended_data) ||
1526 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1527 				IP_VERSION(13, 0, 6) ||
1528 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1529 				IP_VERSION(13, 0, 14);
1530 		bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1531 				psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1532 
1533 		/* popluate the shared output buffer rather than the cmd input buffer
1534 		 * with node_ids as the input for GET_PEER_LINKS command execution.
1535 		 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1536 		 * The same requirement for GET_EXTEND_PEER_LINKS command.
1537 		 */
1538 		if (ta_port_num_support) {
1539 			link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1540 
1541 			for (i = 0; i < topology->num_nodes; i++)
1542 				link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1543 
1544 			link_extend_info_output->num_nodes = topology->num_nodes;
1545 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1546 		} else {
1547 			link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1548 
1549 			for (i = 0; i < topology->num_nodes; i++)
1550 				link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1551 
1552 			link_info_output->num_nodes = topology->num_nodes;
1553 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1554 		}
1555 
1556 		ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1557 		if (ret)
1558 			return ret;
1559 
1560 		for (i = 0; i < topology->num_nodes; i++) {
1561 			uint8_t node_num_links = ta_port_num_support ?
1562 				link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1563 			/* accumulate num_links on extended data */
1564 			if (get_extended_data) {
1565 				topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1566 			} else {
1567 				topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1568 								topology->nodes[i].num_links : node_num_links;
1569 			}
1570 			/* popluate the connected port num info if supported and available */
1571 			if (ta_port_num_support && topology->nodes[i].num_links) {
1572 				memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1573 				       sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1574 			}
1575 
1576 			/* reflect the topology information for bi-directionality */
1577 			if (requires_reflection && topology->nodes[i].num_hops)
1578 				psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1579 		}
1580 	}
1581 
1582 	return 0;
1583 }
1584 
1585 int psp_xgmi_set_topology_info(struct psp_context *psp,
1586 			       int number_devices,
1587 			       struct psp_xgmi_topology_info *topology)
1588 {
1589 	struct ta_xgmi_shared_memory *xgmi_cmd;
1590 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1591 	int i;
1592 
1593 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1594 		return -EINVAL;
1595 
1596 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1597 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1598 
1599 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1600 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1601 	topology_info_input->num_nodes = number_devices;
1602 
1603 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1604 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1605 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1606 		topology_info_input->nodes[i].is_sharing_enabled = 1;
1607 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1608 	}
1609 
1610 	/* Invoke xgmi ta to set topology information */
1611 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1612 }
1613 
1614 // ras begin
1615 static void psp_ras_ta_check_status(struct psp_context *psp)
1616 {
1617 	struct ta_ras_shared_memory *ras_cmd =
1618 		(struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1619 
1620 	switch (ras_cmd->ras_status) {
1621 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1622 		dev_warn(psp->adev->dev,
1623 			 "RAS WARNING: cmd failed due to unsupported ip\n");
1624 		break;
1625 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1626 		dev_warn(psp->adev->dev,
1627 			 "RAS WARNING: cmd failed due to unsupported error injection\n");
1628 		break;
1629 	case TA_RAS_STATUS__SUCCESS:
1630 		break;
1631 	case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1632 		if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1633 			dev_warn(psp->adev->dev,
1634 				 "RAS WARNING: Inject error to critical region is not allowed\n");
1635 		break;
1636 	default:
1637 		dev_warn(psp->adev->dev,
1638 			 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1639 		break;
1640 	}
1641 }
1642 
1643 static int psp_ras_send_cmd(struct psp_context *psp,
1644 		enum ras_command cmd_id, void *in, void *out)
1645 {
1646 	struct ta_ras_shared_memory *ras_cmd;
1647 	uint32_t cmd = cmd_id;
1648 	int ret = 0;
1649 
1650 	if (!in)
1651 		return -EINVAL;
1652 
1653 	mutex_lock(&psp->ras_context.mutex);
1654 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1655 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1656 
1657 	switch (cmd) {
1658 	case TA_RAS_COMMAND__ENABLE_FEATURES:
1659 	case TA_RAS_COMMAND__DISABLE_FEATURES:
1660 		memcpy(&ras_cmd->ras_in_message,
1661 			in, sizeof(ras_cmd->ras_in_message));
1662 		break;
1663 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1664 		memcpy(&ras_cmd->ras_in_message.trigger_error,
1665 			in, sizeof(ras_cmd->ras_in_message.trigger_error));
1666 		break;
1667 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1668 		memcpy(&ras_cmd->ras_in_message.address,
1669 			in, sizeof(ras_cmd->ras_in_message.address));
1670 		break;
1671 	default:
1672 		dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1673 		ret = -EINVAL;
1674 		goto err_out;
1675 	}
1676 
1677 	ras_cmd->cmd_id = cmd;
1678 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1679 
1680 	switch (cmd) {
1681 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1682 		if (!ret && out)
1683 			memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1684 		break;
1685 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1686 		if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1687 			ret = -EINVAL;
1688 		else if (out)
1689 			memcpy(out,
1690 				&ras_cmd->ras_out_message.address,
1691 				sizeof(ras_cmd->ras_out_message.address));
1692 		break;
1693 	default:
1694 		break;
1695 	}
1696 
1697 err_out:
1698 	mutex_unlock(&psp->ras_context.mutex);
1699 
1700 	return ret;
1701 }
1702 
1703 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1704 {
1705 	struct ta_ras_shared_memory *ras_cmd;
1706 	int ret;
1707 
1708 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1709 
1710 	/*
1711 	 * TODO: bypass the loading in sriov for now
1712 	 */
1713 	if (amdgpu_sriov_vf(psp->adev))
1714 		return 0;
1715 
1716 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1717 
1718 	if (amdgpu_ras_intr_triggered())
1719 		return ret;
1720 
1721 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1722 		dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1723 		return -EINVAL;
1724 	}
1725 
1726 	if (!ret) {
1727 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1728 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1729 
1730 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1731 		} else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1732 			dev_warn(psp->adev->dev,
1733 				 "RAS internal register access blocked\n");
1734 
1735 		psp_ras_ta_check_status(psp);
1736 	}
1737 
1738 	return ret;
1739 }
1740 
1741 int psp_ras_enable_features(struct psp_context *psp,
1742 		union ta_ras_cmd_input *info, bool enable)
1743 {
1744 	enum ras_command cmd_id;
1745 	int ret;
1746 
1747 	if (!psp->ras_context.context.initialized || !info)
1748 		return -EINVAL;
1749 
1750 	cmd_id = enable ?
1751 		TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1752 	ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
1753 	if (ret)
1754 		return -EINVAL;
1755 
1756 	return 0;
1757 }
1758 
1759 int psp_ras_terminate(struct psp_context *psp)
1760 {
1761 	int ret;
1762 
1763 	/*
1764 	 * TODO: bypass the terminate in sriov for now
1765 	 */
1766 	if (amdgpu_sriov_vf(psp->adev))
1767 		return 0;
1768 
1769 	if (!psp->ras_context.context.initialized)
1770 		return 0;
1771 
1772 	ret = psp_ta_unload(psp, &psp->ras_context.context);
1773 
1774 	psp->ras_context.context.initialized = false;
1775 
1776 	mutex_destroy(&psp->ras_context.mutex);
1777 
1778 	return ret;
1779 }
1780 
1781 int psp_ras_initialize(struct psp_context *psp)
1782 {
1783 	int ret;
1784 	uint32_t boot_cfg = 0xFF;
1785 	struct amdgpu_device *adev = psp->adev;
1786 	struct ta_ras_shared_memory *ras_cmd;
1787 
1788 	/*
1789 	 * TODO: bypass the initialize in sriov for now
1790 	 */
1791 	if (amdgpu_sriov_vf(adev))
1792 		return 0;
1793 
1794 	if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1795 	    !adev->psp.ras_context.context.bin_desc.start_addr) {
1796 		dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1797 		return 0;
1798 	}
1799 
1800 	if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1801 		/* query GECC enablement status from boot config
1802 		 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1803 		 */
1804 		ret = psp_boot_config_get(adev, &boot_cfg);
1805 		if (ret)
1806 			dev_warn(adev->dev, "PSP get boot config failed\n");
1807 
1808 		if (boot_cfg == 1 && !adev->ras_default_ecc_enabled &&
1809 		    amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1810 			dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n");
1811 			dev_warn(adev->dev,
1812 				"To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n");
1813 		} else {
1814 			if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) &&
1815 				amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1816 				if (boot_cfg == 1) {
1817 					dev_info(adev->dev, "GECC is enabled\n");
1818 				} else {
1819 					/* enable GECC in next boot cycle if it is disabled
1820 					 * in boot config, or force enable GECC if failed to
1821 					 * get boot configuration
1822 					 */
1823 					ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1824 					if (ret)
1825 						dev_warn(adev->dev, "PSP set boot config failed\n");
1826 					else
1827 						dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1828 				}
1829 			} else {
1830 				if (!boot_cfg) {
1831 					if (!adev->ras_default_ecc_enabled &&
1832 					    amdgpu_ras_enable != 1 &&
1833 					    amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
1834 						dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n");
1835 					else
1836 						dev_info(adev->dev, "GECC is disabled\n");
1837 				} else {
1838 					/* disable GECC in next boot cycle if ras is
1839 					 * disabled by module parameter amdgpu_ras_enable
1840 					 * and/or amdgpu_ras_mask, or boot_config_get call
1841 					 * is failed
1842 					 */
1843 					ret = psp_boot_config_set(adev, 0);
1844 					if (ret)
1845 						dev_warn(adev->dev, "PSP set boot config failed\n");
1846 					else
1847 						dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1848 				}
1849 			}
1850 		}
1851 	}
1852 
1853 	psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1854 	psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1855 
1856 	if (!psp->ras_context.context.mem_context.shared_buf) {
1857 		ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1858 		if (ret)
1859 			return ret;
1860 	}
1861 
1862 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1863 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1864 
1865 	if (amdgpu_ras_is_poison_mode_supported(adev))
1866 		ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1867 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1868 		ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1869 	ras_cmd->ras_in_message.init_flags.xcc_mask =
1870 		adev->gfx.xcc_mask;
1871 	ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1872 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
1873 		ras_cmd->ras_in_message.init_flags.nps_mode =
1874 			adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1875 	ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask;
1876 
1877 	ret = psp_ta_load(psp, &psp->ras_context.context);
1878 
1879 	if (!ret && !ras_cmd->ras_status) {
1880 		psp->ras_context.context.initialized = true;
1881 		mutex_init(&psp->ras_context.mutex);
1882 	} else {
1883 		if (ras_cmd->ras_status)
1884 			dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1885 
1886 		/* fail to load RAS TA */
1887 		psp->ras_context.context.initialized = false;
1888 	}
1889 
1890 	return ret;
1891 }
1892 
1893 int psp_ras_trigger_error(struct psp_context *psp,
1894 			  struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1895 {
1896 	struct amdgpu_device *adev = psp->adev;
1897 	int ret;
1898 	uint32_t dev_mask;
1899 	uint32_t ras_status = 0;
1900 
1901 	if (!psp->ras_context.context.initialized || !info)
1902 		return -EINVAL;
1903 
1904 	switch (info->block_id) {
1905 	case TA_RAS_BLOCK__GFX:
1906 		dev_mask = GET_MASK(GC, instance_mask);
1907 		break;
1908 	case TA_RAS_BLOCK__SDMA:
1909 		dev_mask = GET_MASK(SDMA0, instance_mask);
1910 		break;
1911 	case TA_RAS_BLOCK__VCN:
1912 	case TA_RAS_BLOCK__JPEG:
1913 		dev_mask = GET_MASK(VCN, instance_mask);
1914 		break;
1915 	default:
1916 		dev_mask = instance_mask;
1917 		break;
1918 	}
1919 
1920 	/* reuse sub_block_index for backward compatibility */
1921 	dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1922 	dev_mask &= AMDGPU_RAS_INST_MASK;
1923 	info->sub_block_index |= dev_mask;
1924 
1925 	ret = psp_ras_send_cmd(psp,
1926 			TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
1927 	if (ret)
1928 		return -EINVAL;
1929 
1930 	/* If err_event_athub occurs error inject was successful, however
1931 	 *  return status from TA is no long reliable
1932 	 */
1933 	if (amdgpu_ras_intr_triggered())
1934 		return 0;
1935 
1936 	if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1937 		return -EACCES;
1938 	else if (ras_status)
1939 		return -EINVAL;
1940 
1941 	return 0;
1942 }
1943 
1944 int psp_ras_query_address(struct psp_context *psp,
1945 			  struct ta_ras_query_address_input *addr_in,
1946 			  struct ta_ras_query_address_output *addr_out)
1947 {
1948 	int ret;
1949 
1950 	if (!psp->ras_context.context.initialized ||
1951 		!addr_in || !addr_out)
1952 		return -EINVAL;
1953 
1954 	ret = psp_ras_send_cmd(psp,
1955 			TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
1956 
1957 	return ret;
1958 }
1959 // ras end
1960 
1961 // HDCP start
1962 static int psp_hdcp_initialize(struct psp_context *psp)
1963 {
1964 	int ret;
1965 
1966 	/*
1967 	 * TODO: bypass the initialize in sriov for now
1968 	 */
1969 	if (amdgpu_sriov_vf(psp->adev))
1970 		return 0;
1971 
1972 	/* bypass hdcp initialization if dmu is harvested */
1973 	if (!amdgpu_device_has_display_hardware(psp->adev))
1974 		return 0;
1975 
1976 	if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1977 	    !psp->hdcp_context.context.bin_desc.start_addr) {
1978 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1979 		return 0;
1980 	}
1981 
1982 	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1983 	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1984 
1985 	if (!psp->hdcp_context.context.mem_context.shared_buf) {
1986 		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1987 		if (ret)
1988 			return ret;
1989 	}
1990 
1991 	ret = psp_ta_load(psp, &psp->hdcp_context.context);
1992 	if (!ret) {
1993 		psp->hdcp_context.context.initialized = true;
1994 		mutex_init(&psp->hdcp_context.mutex);
1995 	}
1996 
1997 	return ret;
1998 }
1999 
2000 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2001 {
2002 	/*
2003 	 * TODO: bypass the loading in sriov for now
2004 	 */
2005 	if (amdgpu_sriov_vf(psp->adev))
2006 		return 0;
2007 
2008 	if (!psp->hdcp_context.context.initialized)
2009 		return 0;
2010 
2011 	return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
2012 }
2013 
2014 static int psp_hdcp_terminate(struct psp_context *psp)
2015 {
2016 	int ret;
2017 
2018 	/*
2019 	 * TODO: bypass the terminate in sriov for now
2020 	 */
2021 	if (amdgpu_sriov_vf(psp->adev))
2022 		return 0;
2023 
2024 	if (!psp->hdcp_context.context.initialized)
2025 		return 0;
2026 
2027 	ret = psp_ta_unload(psp, &psp->hdcp_context.context);
2028 
2029 	psp->hdcp_context.context.initialized = false;
2030 
2031 	return ret;
2032 }
2033 // HDCP end
2034 
2035 // DTM start
2036 static int psp_dtm_initialize(struct psp_context *psp)
2037 {
2038 	int ret;
2039 
2040 	/*
2041 	 * TODO: bypass the initialize in sriov for now
2042 	 */
2043 	if (amdgpu_sriov_vf(psp->adev))
2044 		return 0;
2045 
2046 	/* bypass dtm initialization if dmu is harvested */
2047 	if (!amdgpu_device_has_display_hardware(psp->adev))
2048 		return 0;
2049 
2050 	if (!psp->dtm_context.context.bin_desc.size_bytes ||
2051 	    !psp->dtm_context.context.bin_desc.start_addr) {
2052 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
2053 		return 0;
2054 	}
2055 
2056 	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
2057 	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2058 
2059 	if (!psp->dtm_context.context.mem_context.shared_buf) {
2060 		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
2061 		if (ret)
2062 			return ret;
2063 	}
2064 
2065 	ret = psp_ta_load(psp, &psp->dtm_context.context);
2066 	if (!ret) {
2067 		psp->dtm_context.context.initialized = true;
2068 		mutex_init(&psp->dtm_context.mutex);
2069 	}
2070 
2071 	return ret;
2072 }
2073 
2074 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2075 {
2076 	/*
2077 	 * TODO: bypass the loading in sriov for now
2078 	 */
2079 	if (amdgpu_sriov_vf(psp->adev))
2080 		return 0;
2081 
2082 	if (!psp->dtm_context.context.initialized)
2083 		return 0;
2084 
2085 	return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
2086 }
2087 
2088 static int psp_dtm_terminate(struct psp_context *psp)
2089 {
2090 	int ret;
2091 
2092 	/*
2093 	 * TODO: bypass the terminate in sriov for now
2094 	 */
2095 	if (amdgpu_sriov_vf(psp->adev))
2096 		return 0;
2097 
2098 	if (!psp->dtm_context.context.initialized)
2099 		return 0;
2100 
2101 	ret = psp_ta_unload(psp, &psp->dtm_context.context);
2102 
2103 	psp->dtm_context.context.initialized = false;
2104 
2105 	return ret;
2106 }
2107 // DTM end
2108 
2109 // RAP start
2110 static int psp_rap_initialize(struct psp_context *psp)
2111 {
2112 	int ret;
2113 	enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2114 
2115 	/*
2116 	 * TODO: bypass the initialize in sriov for now
2117 	 */
2118 	if (amdgpu_sriov_vf(psp->adev))
2119 		return 0;
2120 
2121 	if (!psp->rap_context.context.bin_desc.size_bytes ||
2122 	    !psp->rap_context.context.bin_desc.start_addr) {
2123 		dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2124 		return 0;
2125 	}
2126 
2127 	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2128 	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2129 
2130 	if (!psp->rap_context.context.mem_context.shared_buf) {
2131 		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2132 		if (ret)
2133 			return ret;
2134 	}
2135 
2136 	ret = psp_ta_load(psp, &psp->rap_context.context);
2137 	if (!ret) {
2138 		psp->rap_context.context.initialized = true;
2139 		mutex_init(&psp->rap_context.mutex);
2140 	} else
2141 		return ret;
2142 
2143 	ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2144 	if (ret || status != TA_RAP_STATUS__SUCCESS) {
2145 		psp_rap_terminate(psp);
2146 		/* free rap shared memory */
2147 		psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2148 
2149 		dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2150 			 ret, status);
2151 
2152 		return ret;
2153 	}
2154 
2155 	return 0;
2156 }
2157 
2158 static int psp_rap_terminate(struct psp_context *psp)
2159 {
2160 	int ret;
2161 
2162 	if (!psp->rap_context.context.initialized)
2163 		return 0;
2164 
2165 	ret = psp_ta_unload(psp, &psp->rap_context.context);
2166 
2167 	psp->rap_context.context.initialized = false;
2168 
2169 	return ret;
2170 }
2171 
2172 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2173 {
2174 	struct ta_rap_shared_memory *rap_cmd;
2175 	int ret = 0;
2176 
2177 	if (!psp->rap_context.context.initialized)
2178 		return 0;
2179 
2180 	if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2181 	    ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2182 		return -EINVAL;
2183 
2184 	mutex_lock(&psp->rap_context.mutex);
2185 
2186 	rap_cmd = (struct ta_rap_shared_memory *)
2187 		  psp->rap_context.context.mem_context.shared_buf;
2188 	memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2189 
2190 	rap_cmd->cmd_id = ta_cmd_id;
2191 	rap_cmd->validation_method_id = METHOD_A;
2192 
2193 	ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2194 	if (ret)
2195 		goto out_unlock;
2196 
2197 	if (status)
2198 		*status = rap_cmd->rap_status;
2199 
2200 out_unlock:
2201 	mutex_unlock(&psp->rap_context.mutex);
2202 
2203 	return ret;
2204 }
2205 // RAP end
2206 
2207 /* securedisplay start */
2208 static int psp_securedisplay_initialize(struct psp_context *psp)
2209 {
2210 	int ret;
2211 	struct ta_securedisplay_cmd *securedisplay_cmd;
2212 
2213 	/*
2214 	 * TODO: bypass the initialize in sriov for now
2215 	 */
2216 	if (amdgpu_sriov_vf(psp->adev))
2217 		return 0;
2218 
2219 	/* bypass securedisplay initialization if dmu is harvested */
2220 	if (!amdgpu_device_has_display_hardware(psp->adev))
2221 		return 0;
2222 
2223 	if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2224 	    !psp->securedisplay_context.context.bin_desc.start_addr) {
2225 		dev_info(psp->adev->dev,
2226 			 "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
2227 		return 0;
2228 	}
2229 
2230 	psp->securedisplay_context.context.mem_context.shared_mem_size =
2231 		PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2232 	psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2233 
2234 	if (!psp->securedisplay_context.context.initialized) {
2235 		ret = psp_ta_init_shared_buf(psp,
2236 					     &psp->securedisplay_context.context.mem_context);
2237 		if (ret)
2238 			return ret;
2239 	}
2240 
2241 	ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2242 	if (!ret) {
2243 		psp->securedisplay_context.context.initialized = true;
2244 		mutex_init(&psp->securedisplay_context.mutex);
2245 	} else
2246 		return ret;
2247 
2248 	mutex_lock(&psp->securedisplay_context.mutex);
2249 
2250 	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2251 			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2252 
2253 	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2254 
2255 	mutex_unlock(&psp->securedisplay_context.mutex);
2256 
2257 	if (ret) {
2258 		psp_securedisplay_terminate(psp);
2259 		/* free securedisplay shared memory */
2260 		psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2261 		dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2262 		return -EINVAL;
2263 	}
2264 
2265 	if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2266 		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2267 		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2268 			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2269 		/* don't try again */
2270 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2271 	}
2272 
2273 	return 0;
2274 }
2275 
2276 static int psp_securedisplay_terminate(struct psp_context *psp)
2277 {
2278 	int ret;
2279 
2280 	/*
2281 	 * TODO:bypass the terminate in sriov for now
2282 	 */
2283 	if (amdgpu_sriov_vf(psp->adev))
2284 		return 0;
2285 
2286 	if (!psp->securedisplay_context.context.initialized)
2287 		return 0;
2288 
2289 	ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2290 
2291 	psp->securedisplay_context.context.initialized = false;
2292 
2293 	return ret;
2294 }
2295 
2296 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2297 {
2298 	int ret;
2299 
2300 	if (!psp->securedisplay_context.context.initialized)
2301 		return -EINVAL;
2302 
2303 	if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2304 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
2305 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
2306 		return -EINVAL;
2307 
2308 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2309 
2310 	return ret;
2311 }
2312 /* SECUREDISPLAY end */
2313 
2314 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2315 {
2316 	struct psp_context *psp = &adev->psp;
2317 	int ret = 0;
2318 
2319 	if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2320 		ret = psp->funcs->wait_for_bootloader(psp);
2321 
2322 	return ret;
2323 }
2324 
2325 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2326 {
2327 	if (psp->funcs &&
2328 	    psp->funcs->get_ras_capability) {
2329 		return psp->funcs->get_ras_capability(psp);
2330 	} else {
2331 		return false;
2332 	}
2333 }
2334 
2335 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
2336 {
2337 	struct psp_context *psp = &adev->psp;
2338 
2339 	if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
2340 		return false;
2341 
2342 	if (psp->funcs && psp->funcs->is_reload_needed)
2343 		return psp->funcs->is_reload_needed(psp);
2344 
2345 	return false;
2346 }
2347 
2348 static void psp_update_gpu_addresses(struct amdgpu_device *adev)
2349 {
2350 	struct psp_context *psp = &adev->psp;
2351 
2352 	if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
2353 		psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
2354 		psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
2355 		psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
2356 	}
2357 	if (adev->firmware.rbuf && psp->km_ring.ring_mem)
2358 		psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
2359 }
2360 
2361 static int psp_hw_start(struct psp_context *psp)
2362 {
2363 	struct amdgpu_device *adev = psp->adev;
2364 	int ret;
2365 
2366 	if (amdgpu_virt_xgmi_migrate_enabled(adev))
2367 		psp_update_gpu_addresses(adev);
2368 
2369 	if (!amdgpu_sriov_vf(adev)) {
2370 		if ((is_psp_fw_valid(psp->kdb)) &&
2371 		    (psp->funcs->bootloader_load_kdb != NULL)) {
2372 			ret = psp_bootloader_load_kdb(psp);
2373 			if (ret) {
2374 				dev_err(adev->dev, "PSP load kdb failed!\n");
2375 				return ret;
2376 			}
2377 		}
2378 
2379 		if ((is_psp_fw_valid(psp->spl)) &&
2380 		    (psp->funcs->bootloader_load_spl != NULL)) {
2381 			ret = psp_bootloader_load_spl(psp);
2382 			if (ret) {
2383 				dev_err(adev->dev, "PSP load spl failed!\n");
2384 				return ret;
2385 			}
2386 		}
2387 
2388 		if ((is_psp_fw_valid(psp->sys)) &&
2389 		    (psp->funcs->bootloader_load_sysdrv != NULL)) {
2390 			ret = psp_bootloader_load_sysdrv(psp);
2391 			if (ret) {
2392 				dev_err(adev->dev, "PSP load sys drv failed!\n");
2393 				return ret;
2394 			}
2395 		}
2396 
2397 		if ((is_psp_fw_valid(psp->soc_drv)) &&
2398 		    (psp->funcs->bootloader_load_soc_drv != NULL)) {
2399 			ret = psp_bootloader_load_soc_drv(psp);
2400 			if (ret) {
2401 				dev_err(adev->dev, "PSP load soc drv failed!\n");
2402 				return ret;
2403 			}
2404 		}
2405 
2406 		if ((is_psp_fw_valid(psp->intf_drv)) &&
2407 		    (psp->funcs->bootloader_load_intf_drv != NULL)) {
2408 			ret = psp_bootloader_load_intf_drv(psp);
2409 			if (ret) {
2410 				dev_err(adev->dev, "PSP load intf drv failed!\n");
2411 				return ret;
2412 			}
2413 		}
2414 
2415 		if ((is_psp_fw_valid(psp->dbg_drv)) &&
2416 		    (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2417 			ret = psp_bootloader_load_dbg_drv(psp);
2418 			if (ret) {
2419 				dev_err(adev->dev, "PSP load dbg drv failed!\n");
2420 				return ret;
2421 			}
2422 		}
2423 
2424 		if ((is_psp_fw_valid(psp->ras_drv)) &&
2425 		    (psp->funcs->bootloader_load_ras_drv != NULL)) {
2426 			ret = psp_bootloader_load_ras_drv(psp);
2427 			if (ret) {
2428 				dev_err(adev->dev, "PSP load ras_drv failed!\n");
2429 				return ret;
2430 			}
2431 		}
2432 
2433 		if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2434 		    (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2435 			ret = psp_bootloader_load_ipkeymgr_drv(psp);
2436 			if (ret) {
2437 				dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2438 				return ret;
2439 			}
2440 		}
2441 
2442 		if ((is_psp_fw_valid(psp->spdm_drv)) &&
2443 		    (psp->funcs->bootloader_load_spdm_drv != NULL)) {
2444 			ret = psp_bootloader_load_spdm_drv(psp);
2445 			if (ret) {
2446 				dev_err(adev->dev, "PSP load spdm_drv failed!\n");
2447 				return ret;
2448 			}
2449 		}
2450 
2451 		if ((is_psp_fw_valid(psp->sos)) &&
2452 		    (psp->funcs->bootloader_load_sos != NULL)) {
2453 			ret = psp_bootloader_load_sos(psp);
2454 			if (ret) {
2455 				dev_err(adev->dev, "PSP load sos failed!\n");
2456 				return ret;
2457 			}
2458 		}
2459 	}
2460 
2461 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2462 	if (ret) {
2463 		dev_err(adev->dev, "PSP create ring failed!\n");
2464 		return ret;
2465 	}
2466 
2467 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2468 		goto skip_pin_bo;
2469 
2470 	if (!psp->boot_time_tmr || psp->autoload_supported) {
2471 		ret = psp_tmr_init(psp);
2472 		if (ret) {
2473 			dev_err(adev->dev, "PSP tmr init failed!\n");
2474 			return ret;
2475 		}
2476 	}
2477 
2478 skip_pin_bo:
2479 	/*
2480 	 * For ASICs with DF Cstate management centralized
2481 	 * to PMFW, TMR setup should be performed after PMFW
2482 	 * loaded and before other non-psp firmware loaded.
2483 	 */
2484 	if (psp->pmfw_centralized_cstate_management) {
2485 		ret = psp_load_smu_fw(psp);
2486 		if (ret)
2487 			return ret;
2488 	}
2489 
2490 	if (!psp->boot_time_tmr || !psp->autoload_supported) {
2491 		ret = psp_tmr_load(psp);
2492 		if (ret) {
2493 			dev_err(adev->dev, "PSP load tmr failed!\n");
2494 			return ret;
2495 		}
2496 	}
2497 
2498 	return 0;
2499 }
2500 
2501 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2502 			   enum psp_gfx_fw_type *type)
2503 {
2504 	switch (ucode->ucode_id) {
2505 	case AMDGPU_UCODE_ID_CAP:
2506 		*type = GFX_FW_TYPE_CAP;
2507 		break;
2508 	case AMDGPU_UCODE_ID_SDMA0:
2509 		*type = GFX_FW_TYPE_SDMA0;
2510 		break;
2511 	case AMDGPU_UCODE_ID_SDMA1:
2512 		*type = GFX_FW_TYPE_SDMA1;
2513 		break;
2514 	case AMDGPU_UCODE_ID_SDMA2:
2515 		*type = GFX_FW_TYPE_SDMA2;
2516 		break;
2517 	case AMDGPU_UCODE_ID_SDMA3:
2518 		*type = GFX_FW_TYPE_SDMA3;
2519 		break;
2520 	case AMDGPU_UCODE_ID_SDMA4:
2521 		*type = GFX_FW_TYPE_SDMA4;
2522 		break;
2523 	case AMDGPU_UCODE_ID_SDMA5:
2524 		*type = GFX_FW_TYPE_SDMA5;
2525 		break;
2526 	case AMDGPU_UCODE_ID_SDMA6:
2527 		*type = GFX_FW_TYPE_SDMA6;
2528 		break;
2529 	case AMDGPU_UCODE_ID_SDMA7:
2530 		*type = GFX_FW_TYPE_SDMA7;
2531 		break;
2532 	case AMDGPU_UCODE_ID_CP_MES:
2533 		*type = GFX_FW_TYPE_CP_MES;
2534 		break;
2535 	case AMDGPU_UCODE_ID_CP_MES_DATA:
2536 		*type = GFX_FW_TYPE_MES_STACK;
2537 		break;
2538 	case AMDGPU_UCODE_ID_CP_MES1:
2539 		*type = GFX_FW_TYPE_CP_MES_KIQ;
2540 		break;
2541 	case AMDGPU_UCODE_ID_CP_MES1_DATA:
2542 		*type = GFX_FW_TYPE_MES_KIQ_STACK;
2543 		break;
2544 	case AMDGPU_UCODE_ID_CP_CE:
2545 		*type = GFX_FW_TYPE_CP_CE;
2546 		break;
2547 	case AMDGPU_UCODE_ID_CP_PFP:
2548 		*type = GFX_FW_TYPE_CP_PFP;
2549 		break;
2550 	case AMDGPU_UCODE_ID_CP_ME:
2551 		*type = GFX_FW_TYPE_CP_ME;
2552 		break;
2553 	case AMDGPU_UCODE_ID_CP_MEC1:
2554 		*type = GFX_FW_TYPE_CP_MEC;
2555 		break;
2556 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
2557 		*type = GFX_FW_TYPE_CP_MEC_ME1;
2558 		break;
2559 	case AMDGPU_UCODE_ID_CP_MEC2:
2560 		*type = GFX_FW_TYPE_CP_MEC;
2561 		break;
2562 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
2563 		*type = GFX_FW_TYPE_CP_MEC_ME2;
2564 		break;
2565 	case AMDGPU_UCODE_ID_RLC_P:
2566 		*type = GFX_FW_TYPE_RLC_P;
2567 		break;
2568 	case AMDGPU_UCODE_ID_RLC_V:
2569 		*type = GFX_FW_TYPE_RLC_V;
2570 		break;
2571 	case AMDGPU_UCODE_ID_RLC_G:
2572 		*type = GFX_FW_TYPE_RLC_G;
2573 		break;
2574 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2575 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2576 		break;
2577 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2578 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2579 		break;
2580 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2581 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2582 		break;
2583 	case AMDGPU_UCODE_ID_RLC_IRAM:
2584 		*type = GFX_FW_TYPE_RLC_IRAM;
2585 		break;
2586 	case AMDGPU_UCODE_ID_RLC_DRAM:
2587 		*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2588 		break;
2589 	case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2590 		*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2591 		break;
2592 	case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2593 		*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2594 		break;
2595 	case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2596 		*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2597 		break;
2598 	case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2599 		*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2600 		break;
2601 	case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2602 		*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2603 		break;
2604 	case AMDGPU_UCODE_ID_SMC:
2605 		*type = GFX_FW_TYPE_SMU;
2606 		break;
2607 	case AMDGPU_UCODE_ID_PPTABLE:
2608 		*type = GFX_FW_TYPE_PPTABLE;
2609 		break;
2610 	case AMDGPU_UCODE_ID_UVD:
2611 		*type = GFX_FW_TYPE_UVD;
2612 		break;
2613 	case AMDGPU_UCODE_ID_UVD1:
2614 		*type = GFX_FW_TYPE_UVD1;
2615 		break;
2616 	case AMDGPU_UCODE_ID_VCE:
2617 		*type = GFX_FW_TYPE_VCE;
2618 		break;
2619 	case AMDGPU_UCODE_ID_VCN:
2620 		*type = GFX_FW_TYPE_VCN;
2621 		break;
2622 	case AMDGPU_UCODE_ID_VCN1:
2623 		*type = GFX_FW_TYPE_VCN1;
2624 		break;
2625 	case AMDGPU_UCODE_ID_DMCU_ERAM:
2626 		*type = GFX_FW_TYPE_DMCU_ERAM;
2627 		break;
2628 	case AMDGPU_UCODE_ID_DMCU_INTV:
2629 		*type = GFX_FW_TYPE_DMCU_ISR;
2630 		break;
2631 	case AMDGPU_UCODE_ID_VCN0_RAM:
2632 		*type = GFX_FW_TYPE_VCN0_RAM;
2633 		break;
2634 	case AMDGPU_UCODE_ID_VCN1_RAM:
2635 		*type = GFX_FW_TYPE_VCN1_RAM;
2636 		break;
2637 	case AMDGPU_UCODE_ID_DMCUB:
2638 		*type = GFX_FW_TYPE_DMUB;
2639 		break;
2640 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2641 	case AMDGPU_UCODE_ID_SDMA_RS64:
2642 		*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2643 		break;
2644 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2645 		*type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2646 		break;
2647 	case AMDGPU_UCODE_ID_IMU_I:
2648 		*type = GFX_FW_TYPE_IMU_I;
2649 		break;
2650 	case AMDGPU_UCODE_ID_IMU_D:
2651 		*type = GFX_FW_TYPE_IMU_D;
2652 		break;
2653 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
2654 		*type = GFX_FW_TYPE_RS64_PFP;
2655 		break;
2656 	case AMDGPU_UCODE_ID_CP_RS64_ME:
2657 		*type = GFX_FW_TYPE_RS64_ME;
2658 		break;
2659 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
2660 		*type = GFX_FW_TYPE_RS64_MEC;
2661 		break;
2662 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2663 		*type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2664 		break;
2665 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2666 		*type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2667 		break;
2668 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2669 		*type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2670 		break;
2671 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2672 		*type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2673 		break;
2674 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2675 		*type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2676 		break;
2677 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2678 		*type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2679 		break;
2680 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2681 		*type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2682 		break;
2683 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2684 		*type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2685 		break;
2686 	case AMDGPU_UCODE_ID_VPE_CTX:
2687 		*type = GFX_FW_TYPE_VPEC_FW1;
2688 		break;
2689 	case AMDGPU_UCODE_ID_VPE_CTL:
2690 		*type = GFX_FW_TYPE_VPEC_FW2;
2691 		break;
2692 	case AMDGPU_UCODE_ID_VPE:
2693 		*type = GFX_FW_TYPE_VPE;
2694 		break;
2695 	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2696 		*type = GFX_FW_TYPE_UMSCH_UCODE;
2697 		break;
2698 	case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2699 		*type = GFX_FW_TYPE_UMSCH_DATA;
2700 		break;
2701 	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2702 		*type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2703 		break;
2704 	case AMDGPU_UCODE_ID_P2S_TABLE:
2705 		*type = GFX_FW_TYPE_P2S_TABLE;
2706 		break;
2707 	case AMDGPU_UCODE_ID_JPEG_RAM:
2708 		*type = GFX_FW_TYPE_JPEG_RAM;
2709 		break;
2710 	case AMDGPU_UCODE_ID_ISP:
2711 		*type = GFX_FW_TYPE_ISP;
2712 		break;
2713 	case AMDGPU_UCODE_ID_MAXIMUM:
2714 	default:
2715 		return -EINVAL;
2716 	}
2717 
2718 	return 0;
2719 }
2720 
2721 static void psp_print_fw_hdr(struct psp_context *psp,
2722 			     struct amdgpu_firmware_info *ucode)
2723 {
2724 	struct amdgpu_device *adev = psp->adev;
2725 	struct common_firmware_header *hdr;
2726 
2727 	switch (ucode->ucode_id) {
2728 	case AMDGPU_UCODE_ID_SDMA0:
2729 	case AMDGPU_UCODE_ID_SDMA1:
2730 	case AMDGPU_UCODE_ID_SDMA2:
2731 	case AMDGPU_UCODE_ID_SDMA3:
2732 	case AMDGPU_UCODE_ID_SDMA4:
2733 	case AMDGPU_UCODE_ID_SDMA5:
2734 	case AMDGPU_UCODE_ID_SDMA6:
2735 	case AMDGPU_UCODE_ID_SDMA7:
2736 		hdr = (struct common_firmware_header *)
2737 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2738 		amdgpu_ucode_print_sdma_hdr(hdr);
2739 		break;
2740 	case AMDGPU_UCODE_ID_CP_CE:
2741 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2742 		amdgpu_ucode_print_gfx_hdr(hdr);
2743 		break;
2744 	case AMDGPU_UCODE_ID_CP_PFP:
2745 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2746 		amdgpu_ucode_print_gfx_hdr(hdr);
2747 		break;
2748 	case AMDGPU_UCODE_ID_CP_ME:
2749 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2750 		amdgpu_ucode_print_gfx_hdr(hdr);
2751 		break;
2752 	case AMDGPU_UCODE_ID_CP_MEC1:
2753 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2754 		amdgpu_ucode_print_gfx_hdr(hdr);
2755 		break;
2756 	case AMDGPU_UCODE_ID_RLC_G:
2757 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2758 		amdgpu_ucode_print_rlc_hdr(hdr);
2759 		break;
2760 	case AMDGPU_UCODE_ID_SMC:
2761 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
2762 		amdgpu_ucode_print_smc_hdr(hdr);
2763 		break;
2764 	default:
2765 		break;
2766 	}
2767 }
2768 
2769 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2770 				       struct amdgpu_firmware_info *ucode,
2771 				       struct psp_gfx_cmd_resp *cmd)
2772 {
2773 	int ret;
2774 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
2775 
2776 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2777 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2778 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2779 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2780 
2781 	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2782 	if (ret)
2783 		dev_err(psp->adev->dev, "Unknown firmware type\n");
2784 
2785 	return ret;
2786 }
2787 
2788 int psp_execute_ip_fw_load(struct psp_context *psp,
2789 			   struct amdgpu_firmware_info *ucode)
2790 {
2791 	int ret = 0;
2792 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2793 
2794 	ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2795 	if (!ret) {
2796 		ret = psp_cmd_submit_buf(psp, ucode, cmd,
2797 					 psp->fence_buf_mc_addr);
2798 	}
2799 
2800 	release_psp_cmd_buf(psp);
2801 
2802 	return ret;
2803 }
2804 
2805 static int psp_load_p2s_table(struct psp_context *psp)
2806 {
2807 	int ret;
2808 	struct amdgpu_device *adev = psp->adev;
2809 	struct amdgpu_firmware_info *ucode =
2810 		&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2811 
2812 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2813 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2814 		return 0;
2815 
2816 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2817 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2818 		uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2819 								0x0036003C;
2820 		if (psp->sos.fw_version < supp_vers)
2821 			return 0;
2822 	}
2823 
2824 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2825 		return 0;
2826 
2827 	ret = psp_execute_ip_fw_load(psp, ucode);
2828 
2829 	return ret;
2830 }
2831 
2832 static int psp_load_smu_fw(struct psp_context *psp)
2833 {
2834 	int ret;
2835 	struct amdgpu_device *adev = psp->adev;
2836 	struct amdgpu_firmware_info *ucode =
2837 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2838 	struct amdgpu_ras *ras = psp->ras_context.ras;
2839 
2840 	/*
2841 	 * Skip SMU FW reloading in case of using BACO for runpm only,
2842 	 * as SMU is always alive.
2843 	 */
2844 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2845 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2846 		return 0;
2847 
2848 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2849 		return 0;
2850 
2851 	if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2852 	     (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2853 	      amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2854 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2855 		if (ret)
2856 			dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2857 	}
2858 
2859 	ret = psp_execute_ip_fw_load(psp, ucode);
2860 
2861 	if (ret)
2862 		dev_err(adev->dev, "PSP load smu failed!\n");
2863 
2864 	return ret;
2865 }
2866 
2867 static bool fw_load_skip_check(struct psp_context *psp,
2868 			       struct amdgpu_firmware_info *ucode)
2869 {
2870 	if (!ucode->fw || !ucode->ucode_size)
2871 		return true;
2872 
2873 	if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2874 		return true;
2875 
2876 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2877 	    (psp_smu_reload_quirk(psp) ||
2878 	     psp->autoload_supported ||
2879 	     psp->pmfw_centralized_cstate_management))
2880 		return true;
2881 
2882 	if (amdgpu_sriov_vf(psp->adev) &&
2883 	    amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2884 		return true;
2885 
2886 	if (psp->autoload_supported &&
2887 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2888 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2889 		/* skip mec JT when autoload is enabled */
2890 		return true;
2891 
2892 	return false;
2893 }
2894 
2895 int psp_load_fw_list(struct psp_context *psp,
2896 		     struct amdgpu_firmware_info **ucode_list, int ucode_count)
2897 {
2898 	int ret = 0, i;
2899 	struct amdgpu_firmware_info *ucode;
2900 
2901 	for (i = 0; i < ucode_count; ++i) {
2902 		ucode = ucode_list[i];
2903 		psp_print_fw_hdr(psp, ucode);
2904 		ret = psp_execute_ip_fw_load(psp, ucode);
2905 		if (ret)
2906 			return ret;
2907 	}
2908 	return ret;
2909 }
2910 
2911 static int psp_load_non_psp_fw(struct psp_context *psp)
2912 {
2913 	int i, ret;
2914 	struct amdgpu_firmware_info *ucode;
2915 	struct amdgpu_device *adev = psp->adev;
2916 
2917 	if (psp->autoload_supported &&
2918 	    !psp->pmfw_centralized_cstate_management) {
2919 		ret = psp_load_smu_fw(psp);
2920 		if (ret)
2921 			return ret;
2922 	}
2923 
2924 	/* Load P2S table first if it's available */
2925 	psp_load_p2s_table(psp);
2926 
2927 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
2928 		ucode = &adev->firmware.ucode[i];
2929 
2930 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2931 		    !fw_load_skip_check(psp, ucode)) {
2932 			ret = psp_load_smu_fw(psp);
2933 			if (ret)
2934 				return ret;
2935 			continue;
2936 		}
2937 
2938 		if (fw_load_skip_check(psp, ucode))
2939 			continue;
2940 
2941 		if (psp->autoload_supported &&
2942 		    (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2943 			     IP_VERSION(11, 0, 7) ||
2944 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2945 			     IP_VERSION(11, 0, 11) ||
2946 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2947 			     IP_VERSION(11, 0, 12)) &&
2948 		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2949 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2950 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2951 			/* PSP only receive one SDMA fw for sienna_cichlid,
2952 			 * as all four sdma fw are same
2953 			 */
2954 			continue;
2955 
2956 		psp_print_fw_hdr(psp, ucode);
2957 
2958 		ret = psp_execute_ip_fw_load(psp, ucode);
2959 		if (ret)
2960 			return ret;
2961 
2962 		/* Start rlc autoload after psp received all the gfx firmware */
2963 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2964 		    adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2965 			ret = psp_rlc_autoload_start(psp);
2966 			if (ret) {
2967 				dev_err(adev->dev, "Failed to start rlc autoload\n");
2968 				return ret;
2969 			}
2970 		}
2971 	}
2972 
2973 	return 0;
2974 }
2975 
2976 static int psp_load_fw(struct amdgpu_device *adev)
2977 {
2978 	int ret;
2979 	struct psp_context *psp = &adev->psp;
2980 
2981 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2982 		/* should not destroy ring, only stop */
2983 		psp_ring_stop(psp, PSP_RING_TYPE__KM);
2984 	} else {
2985 		memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2986 
2987 		ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2988 		if (ret) {
2989 			dev_err(adev->dev, "PSP ring init failed!\n");
2990 			goto failed;
2991 		}
2992 	}
2993 
2994 	ret = psp_hw_start(psp);
2995 	if (ret)
2996 		goto failed;
2997 
2998 	ret = psp_load_non_psp_fw(psp);
2999 	if (ret)
3000 		goto failed1;
3001 
3002 	ret = psp_asd_initialize(psp);
3003 	if (ret) {
3004 		dev_err(adev->dev, "PSP load asd failed!\n");
3005 		goto failed1;
3006 	}
3007 
3008 	ret = psp_rl_load(adev);
3009 	if (ret) {
3010 		dev_err(adev->dev, "PSP load RL failed!\n");
3011 		goto failed1;
3012 	}
3013 
3014 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3015 		if (adev->gmc.xgmi.num_physical_nodes > 1) {
3016 			ret = psp_xgmi_initialize(psp, false, true);
3017 			/* Warning the XGMI seesion initialize failure
3018 			 * Instead of stop driver initialization
3019 			 */
3020 			if (ret)
3021 				dev_err(psp->adev->dev,
3022 					"XGMI: Failed to initialize XGMI session\n");
3023 		}
3024 	}
3025 
3026 	if (psp->ta_fw) {
3027 		ret = psp_ras_initialize(psp);
3028 		if (ret)
3029 			dev_err(psp->adev->dev,
3030 				"RAS: Failed to initialize RAS\n");
3031 
3032 		ret = psp_hdcp_initialize(psp);
3033 		if (ret)
3034 			dev_err(psp->adev->dev,
3035 				"HDCP: Failed to initialize HDCP\n");
3036 
3037 		ret = psp_dtm_initialize(psp);
3038 		if (ret)
3039 			dev_err(psp->adev->dev,
3040 				"DTM: Failed to initialize DTM\n");
3041 
3042 		ret = psp_rap_initialize(psp);
3043 		if (ret)
3044 			dev_err(psp->adev->dev,
3045 				"RAP: Failed to initialize RAP\n");
3046 
3047 		ret = psp_securedisplay_initialize(psp);
3048 		if (ret)
3049 			dev_err(psp->adev->dev,
3050 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3051 	}
3052 
3053 	return 0;
3054 
3055 failed1:
3056 	psp_free_shared_bufs(psp);
3057 failed:
3058 	/*
3059 	 * all cleanup jobs (xgmi terminate, ras terminate,
3060 	 * ring destroy, cmd/fence/fw buffers destory,
3061 	 * psp->cmd destory) are delayed to psp_hw_fini
3062 	 */
3063 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3064 	return ret;
3065 }
3066 
3067 static int psp_hw_init(struct amdgpu_ip_block *ip_block)
3068 {
3069 	int ret;
3070 	struct amdgpu_device *adev = ip_block->adev;
3071 
3072 	mutex_lock(&adev->firmware.mutex);
3073 
3074 	ret = amdgpu_ucode_init_bo(adev);
3075 	if (ret)
3076 		goto failed;
3077 
3078 	ret = psp_load_fw(adev);
3079 	if (ret) {
3080 		dev_err(adev->dev, "PSP firmware loading failed\n");
3081 		goto failed;
3082 	}
3083 
3084 	mutex_unlock(&adev->firmware.mutex);
3085 	return 0;
3086 
3087 failed:
3088 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
3089 	mutex_unlock(&adev->firmware.mutex);
3090 	return -EINVAL;
3091 }
3092 
3093 static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
3094 {
3095 	struct amdgpu_device *adev = ip_block->adev;
3096 	struct psp_context *psp = &adev->psp;
3097 
3098 	if (psp->ta_fw) {
3099 		psp_ras_terminate(psp);
3100 		psp_securedisplay_terminate(psp);
3101 		psp_rap_terminate(psp);
3102 		psp_dtm_terminate(psp);
3103 		psp_hdcp_terminate(psp);
3104 
3105 		if (adev->gmc.xgmi.num_physical_nodes > 1)
3106 			psp_xgmi_terminate(psp);
3107 	}
3108 
3109 	psp_asd_terminate(psp);
3110 	psp_tmr_terminate(psp);
3111 
3112 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3113 
3114 	return 0;
3115 }
3116 
3117 static int psp_suspend(struct amdgpu_ip_block *ip_block)
3118 {
3119 	int ret = 0;
3120 	struct amdgpu_device *adev = ip_block->adev;
3121 	struct psp_context *psp = &adev->psp;
3122 
3123 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3124 	    psp->xgmi_context.context.initialized) {
3125 		ret = psp_xgmi_terminate(psp);
3126 		if (ret) {
3127 			dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3128 			goto out;
3129 		}
3130 	}
3131 
3132 	if (psp->ta_fw) {
3133 		ret = psp_ras_terminate(psp);
3134 		if (ret) {
3135 			dev_err(adev->dev, "Failed to terminate ras ta\n");
3136 			goto out;
3137 		}
3138 		ret = psp_hdcp_terminate(psp);
3139 		if (ret) {
3140 			dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3141 			goto out;
3142 		}
3143 		ret = psp_dtm_terminate(psp);
3144 		if (ret) {
3145 			dev_err(adev->dev, "Failed to terminate dtm ta\n");
3146 			goto out;
3147 		}
3148 		ret = psp_rap_terminate(psp);
3149 		if (ret) {
3150 			dev_err(adev->dev, "Failed to terminate rap ta\n");
3151 			goto out;
3152 		}
3153 		ret = psp_securedisplay_terminate(psp);
3154 		if (ret) {
3155 			dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3156 			goto out;
3157 		}
3158 	}
3159 
3160 	ret = psp_asd_terminate(psp);
3161 	if (ret) {
3162 		dev_err(adev->dev, "Failed to terminate asd\n");
3163 		goto out;
3164 	}
3165 
3166 	ret = psp_tmr_terminate(psp);
3167 	if (ret) {
3168 		dev_err(adev->dev, "Failed to terminate tmr\n");
3169 		goto out;
3170 	}
3171 
3172 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3173 	if (ret)
3174 		dev_err(adev->dev, "PSP ring stop failed\n");
3175 
3176 out:
3177 	return ret;
3178 }
3179 
3180 static int psp_resume(struct amdgpu_ip_block *ip_block)
3181 {
3182 	int ret;
3183 	struct amdgpu_device *adev = ip_block->adev;
3184 	struct psp_context *psp = &adev->psp;
3185 
3186 	dev_info(adev->dev, "PSP is resuming...\n");
3187 
3188 	if (psp->mem_train_ctx.enable_mem_training) {
3189 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3190 		if (ret) {
3191 			dev_err(adev->dev, "Failed to process memory training!\n");
3192 			return ret;
3193 		}
3194 	}
3195 
3196 	mutex_lock(&adev->firmware.mutex);
3197 
3198 	ret = amdgpu_ucode_init_bo(adev);
3199 	if (ret)
3200 		goto failed;
3201 
3202 	ret = psp_hw_start(psp);
3203 	if (ret)
3204 		goto failed;
3205 
3206 	ret = psp_load_non_psp_fw(psp);
3207 	if (ret)
3208 		goto failed;
3209 
3210 	ret = psp_asd_initialize(psp);
3211 	if (ret) {
3212 		dev_err(adev->dev, "PSP load asd failed!\n");
3213 		goto failed;
3214 	}
3215 
3216 	ret = psp_rl_load(adev);
3217 	if (ret) {
3218 		dev_err(adev->dev, "PSP load RL failed!\n");
3219 		goto failed;
3220 	}
3221 
3222 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3223 		ret = psp_xgmi_initialize(psp, false, true);
3224 		/* Warning the XGMI seesion initialize failure
3225 		 * Instead of stop driver initialization
3226 		 */
3227 		if (ret)
3228 			dev_err(psp->adev->dev,
3229 				"XGMI: Failed to initialize XGMI session\n");
3230 	}
3231 
3232 	if (psp->ta_fw) {
3233 		ret = psp_ras_initialize(psp);
3234 		if (ret)
3235 			dev_err(psp->adev->dev,
3236 				"RAS: Failed to initialize RAS\n");
3237 
3238 		ret = psp_hdcp_initialize(psp);
3239 		if (ret)
3240 			dev_err(psp->adev->dev,
3241 				"HDCP: Failed to initialize HDCP\n");
3242 
3243 		ret = psp_dtm_initialize(psp);
3244 		if (ret)
3245 			dev_err(psp->adev->dev,
3246 				"DTM: Failed to initialize DTM\n");
3247 
3248 		ret = psp_rap_initialize(psp);
3249 		if (ret)
3250 			dev_err(psp->adev->dev,
3251 				"RAP: Failed to initialize RAP\n");
3252 
3253 		ret = psp_securedisplay_initialize(psp);
3254 		if (ret)
3255 			dev_err(psp->adev->dev,
3256 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3257 	}
3258 
3259 	mutex_unlock(&adev->firmware.mutex);
3260 
3261 	return 0;
3262 
3263 failed:
3264 	dev_err(adev->dev, "PSP resume failed\n");
3265 	mutex_unlock(&adev->firmware.mutex);
3266 	return ret;
3267 }
3268 
3269 int psp_gpu_reset(struct amdgpu_device *adev)
3270 {
3271 	int ret;
3272 
3273 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3274 		return 0;
3275 
3276 	mutex_lock(&adev->psp.mutex);
3277 	ret = psp_mode1_reset(&adev->psp);
3278 	mutex_unlock(&adev->psp.mutex);
3279 
3280 	return ret;
3281 }
3282 
3283 int psp_rlc_autoload_start(struct psp_context *psp)
3284 {
3285 	int ret;
3286 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3287 
3288 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3289 
3290 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
3291 				 psp->fence_buf_mc_addr);
3292 
3293 	release_psp_cmd_buf(psp);
3294 
3295 	return ret;
3296 }
3297 
3298 int psp_ring_cmd_submit(struct psp_context *psp,
3299 			uint64_t cmd_buf_mc_addr,
3300 			uint64_t fence_mc_addr,
3301 			int index)
3302 {
3303 	unsigned int psp_write_ptr_reg = 0;
3304 	struct psp_gfx_rb_frame *write_frame;
3305 	struct psp_ring *ring = &psp->km_ring;
3306 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3307 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3308 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3309 	struct amdgpu_device *adev = psp->adev;
3310 	uint32_t ring_size_dw = ring->ring_size / 4;
3311 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3312 
3313 	/* KM (GPCOM) prepare write pointer */
3314 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
3315 
3316 	/* Update KM RB frame pointer to new frame */
3317 	/* write_frame ptr increments by size of rb_frame in bytes */
3318 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3319 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
3320 		write_frame = ring_buffer_start;
3321 	else
3322 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3323 	/* Check invalid write_frame ptr address */
3324 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3325 		dev_err(adev->dev,
3326 			"ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3327 			ring_buffer_start, ring_buffer_end, write_frame);
3328 		dev_err(adev->dev,
3329 			"write_frame is pointing to address out of bounds\n");
3330 		return -EINVAL;
3331 	}
3332 
3333 	/* Initialize KM RB frame */
3334 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3335 
3336 	/* Update KM RB frame */
3337 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3338 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3339 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3340 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3341 	write_frame->fence_value = index;
3342 	amdgpu_device_flush_hdp(adev, NULL);
3343 
3344 	/* Update the write Pointer in DWORDs */
3345 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3346 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
3347 	return 0;
3348 }
3349 
3350 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3351 {
3352 	struct amdgpu_device *adev = psp->adev;
3353 	const struct psp_firmware_header_v1_0 *asd_hdr;
3354 	int err = 0;
3355 
3356 	err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
3357 				   "amdgpu/%s_asd.bin", chip_name);
3358 	if (err)
3359 		goto out;
3360 
3361 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3362 	adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3363 	adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3364 	adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3365 	adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3366 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3367 	return 0;
3368 out:
3369 	amdgpu_ucode_release(&adev->psp.asd_fw);
3370 	return err;
3371 }
3372 
3373 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3374 {
3375 	struct amdgpu_device *adev = psp->adev;
3376 	const struct psp_firmware_header_v1_0 *toc_hdr;
3377 	int err = 0;
3378 
3379 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
3380 				   "amdgpu/%s_toc.bin", chip_name);
3381 	if (err)
3382 		goto out;
3383 
3384 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3385 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3386 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3387 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3388 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3389 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3390 	return 0;
3391 out:
3392 	amdgpu_ucode_release(&adev->psp.toc_fw);
3393 	return err;
3394 }
3395 
3396 static int parse_sos_bin_descriptor(struct psp_context *psp,
3397 				   const struct psp_fw_bin_desc *desc,
3398 				   const struct psp_firmware_header_v2_0 *sos_hdr)
3399 {
3400 	uint8_t *ucode_start_addr  = NULL;
3401 
3402 	if (!psp || !desc || !sos_hdr)
3403 		return -EINVAL;
3404 
3405 	ucode_start_addr  = (uint8_t *)sos_hdr +
3406 			    le32_to_cpu(desc->offset_bytes) +
3407 			    le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3408 
3409 	switch (desc->fw_type) {
3410 	case PSP_FW_TYPE_PSP_SOS:
3411 		psp->sos.fw_version        = le32_to_cpu(desc->fw_version);
3412 		psp->sos.feature_version   = le32_to_cpu(desc->fw_version);
3413 		psp->sos.size_bytes        = le32_to_cpu(desc->size_bytes);
3414 		psp->sos.start_addr	   = ucode_start_addr;
3415 		break;
3416 	case PSP_FW_TYPE_PSP_SYS_DRV:
3417 		psp->sys.fw_version        = le32_to_cpu(desc->fw_version);
3418 		psp->sys.feature_version   = le32_to_cpu(desc->fw_version);
3419 		psp->sys.size_bytes        = le32_to_cpu(desc->size_bytes);
3420 		psp->sys.start_addr        = ucode_start_addr;
3421 		break;
3422 	case PSP_FW_TYPE_PSP_KDB:
3423 		psp->kdb.fw_version        = le32_to_cpu(desc->fw_version);
3424 		psp->kdb.feature_version   = le32_to_cpu(desc->fw_version);
3425 		psp->kdb.size_bytes        = le32_to_cpu(desc->size_bytes);
3426 		psp->kdb.start_addr        = ucode_start_addr;
3427 		break;
3428 	case PSP_FW_TYPE_PSP_TOC:
3429 		psp->toc.fw_version        = le32_to_cpu(desc->fw_version);
3430 		psp->toc.feature_version   = le32_to_cpu(desc->fw_version);
3431 		psp->toc.size_bytes        = le32_to_cpu(desc->size_bytes);
3432 		psp->toc.start_addr        = ucode_start_addr;
3433 		break;
3434 	case PSP_FW_TYPE_PSP_SPL:
3435 		psp->spl.fw_version        = le32_to_cpu(desc->fw_version);
3436 		psp->spl.feature_version   = le32_to_cpu(desc->fw_version);
3437 		psp->spl.size_bytes        = le32_to_cpu(desc->size_bytes);
3438 		psp->spl.start_addr        = ucode_start_addr;
3439 		break;
3440 	case PSP_FW_TYPE_PSP_RL:
3441 		psp->rl.fw_version         = le32_to_cpu(desc->fw_version);
3442 		psp->rl.feature_version    = le32_to_cpu(desc->fw_version);
3443 		psp->rl.size_bytes         = le32_to_cpu(desc->size_bytes);
3444 		psp->rl.start_addr         = ucode_start_addr;
3445 		break;
3446 	case PSP_FW_TYPE_PSP_SOC_DRV:
3447 		psp->soc_drv.fw_version         = le32_to_cpu(desc->fw_version);
3448 		psp->soc_drv.feature_version    = le32_to_cpu(desc->fw_version);
3449 		psp->soc_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3450 		psp->soc_drv.start_addr         = ucode_start_addr;
3451 		break;
3452 	case PSP_FW_TYPE_PSP_INTF_DRV:
3453 		psp->intf_drv.fw_version        = le32_to_cpu(desc->fw_version);
3454 		psp->intf_drv.feature_version   = le32_to_cpu(desc->fw_version);
3455 		psp->intf_drv.size_bytes        = le32_to_cpu(desc->size_bytes);
3456 		psp->intf_drv.start_addr        = ucode_start_addr;
3457 		break;
3458 	case PSP_FW_TYPE_PSP_DBG_DRV:
3459 		psp->dbg_drv.fw_version         = le32_to_cpu(desc->fw_version);
3460 		psp->dbg_drv.feature_version    = le32_to_cpu(desc->fw_version);
3461 		psp->dbg_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3462 		psp->dbg_drv.start_addr         = ucode_start_addr;
3463 		break;
3464 	case PSP_FW_TYPE_PSP_RAS_DRV:
3465 		psp->ras_drv.fw_version         = le32_to_cpu(desc->fw_version);
3466 		psp->ras_drv.feature_version    = le32_to_cpu(desc->fw_version);
3467 		psp->ras_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3468 		psp->ras_drv.start_addr         = ucode_start_addr;
3469 		break;
3470 	case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3471 		psp->ipkeymgr_drv.fw_version         = le32_to_cpu(desc->fw_version);
3472 		psp->ipkeymgr_drv.feature_version    = le32_to_cpu(desc->fw_version);
3473 		psp->ipkeymgr_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3474 		psp->ipkeymgr_drv.start_addr         = ucode_start_addr;
3475 		break;
3476 	case PSP_FW_TYPE_PSP_SPDM_DRV:
3477 		psp->spdm_drv.fw_version	= le32_to_cpu(desc->fw_version);
3478 		psp->spdm_drv.feature_version	= le32_to_cpu(desc->fw_version);
3479 		psp->spdm_drv.size_bytes	= le32_to_cpu(desc->size_bytes);
3480 		psp->spdm_drv.start_addr	= ucode_start_addr;
3481 		break;
3482 	default:
3483 		dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3484 		break;
3485 	}
3486 
3487 	return 0;
3488 }
3489 
3490 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3491 {
3492 	const struct psp_firmware_header_v1_0 *sos_hdr;
3493 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3494 	uint8_t *ucode_array_start_addr;
3495 
3496 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3497 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3498 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3499 
3500 	if (adev->gmc.xgmi.connected_to_cpu ||
3501 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3502 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3503 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3504 
3505 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3506 		adev->psp.sys.start_addr = ucode_array_start_addr;
3507 
3508 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3509 		adev->psp.sos.start_addr = ucode_array_start_addr +
3510 				le32_to_cpu(sos_hdr->sos.offset_bytes);
3511 	} else {
3512 		/* Load alternate PSP SOS FW */
3513 		sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3514 
3515 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3516 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3517 
3518 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3519 		adev->psp.sys.start_addr = ucode_array_start_addr +
3520 			le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3521 
3522 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3523 		adev->psp.sos.start_addr = ucode_array_start_addr +
3524 			le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3525 	}
3526 
3527 	if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3528 		dev_warn(adev->dev, "PSP SOS FW not available");
3529 		return -EINVAL;
3530 	}
3531 
3532 	return 0;
3533 }
3534 
3535 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3536 {
3537 	struct amdgpu_device *adev = psp->adev;
3538 	const struct psp_firmware_header_v1_0 *sos_hdr;
3539 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3540 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3541 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3542 	const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3543 	const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
3544 	int fw_index, fw_bin_count, start_index = 0;
3545 	const struct psp_fw_bin_desc *fw_bin;
3546 	uint8_t *ucode_array_start_addr;
3547 	int err = 0;
3548 
3549 	err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3550 				   "amdgpu/%s_sos.bin", chip_name);
3551 	if (err)
3552 		goto out;
3553 
3554 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3555 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3556 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3557 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3558 
3559 	switch (sos_hdr->header.header_version_major) {
3560 	case 1:
3561 		err = psp_init_sos_base_fw(adev);
3562 		if (err)
3563 			goto out;
3564 
3565 		if (sos_hdr->header.header_version_minor == 1) {
3566 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3567 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3568 			adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3569 					le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3570 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3571 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3572 					le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3573 		}
3574 		if (sos_hdr->header.header_version_minor == 2) {
3575 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3576 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3577 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3578 						    le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3579 		}
3580 		if (sos_hdr->header.header_version_minor == 3) {
3581 			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3582 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3583 			adev->psp.toc.start_addr = ucode_array_start_addr +
3584 				le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3585 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3586 			adev->psp.kdb.start_addr = ucode_array_start_addr +
3587 				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3588 			adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3589 			adev->psp.spl.start_addr = ucode_array_start_addr +
3590 				le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3591 			adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3592 			adev->psp.rl.start_addr = ucode_array_start_addr +
3593 				le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3594 		}
3595 		break;
3596 	case 2:
3597 		sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3598 
3599 		fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
3600 
3601 		if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
3602 			dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3603 			err = -EINVAL;
3604 			goto out;
3605 		}
3606 
3607 		if (sos_hdr_v2_0->header.header_version_minor == 1) {
3608 			sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
3609 
3610 			fw_bin = sos_hdr_v2_1->psp_fw_bin;
3611 
3612 			if (psp_is_aux_sos_load_required(psp))
3613 				start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3614 			else
3615 				fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3616 
3617 		} else {
3618 			fw_bin = sos_hdr_v2_0->psp_fw_bin;
3619 		}
3620 
3621 		for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
3622 			err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
3623 						       sos_hdr_v2_0);
3624 			if (err)
3625 				goto out;
3626 		}
3627 		break;
3628 	default:
3629 		dev_err(adev->dev,
3630 			"unsupported psp sos firmware\n");
3631 		err = -EINVAL;
3632 		goto out;
3633 	}
3634 
3635 	return 0;
3636 out:
3637 	amdgpu_ucode_release(&adev->psp.sos_fw);
3638 
3639 	return err;
3640 }
3641 
3642 static bool is_ta_fw_applicable(struct psp_context *psp,
3643 			     const struct psp_fw_bin_desc *desc)
3644 {
3645 	struct amdgpu_device *adev = psp->adev;
3646 	uint32_t fw_version;
3647 
3648 	switch (desc->fw_type) {
3649 	case TA_FW_TYPE_PSP_XGMI:
3650 	case TA_FW_TYPE_PSP_XGMI_AUX:
3651 		/* for now, AUX TA only exists on 13.0.6 ta bin,
3652 		 * from v20.00.0x.14
3653 		 */
3654 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3655 		    IP_VERSION(13, 0, 6)) {
3656 			fw_version = le32_to_cpu(desc->fw_version);
3657 
3658 			if (adev->flags & AMD_IS_APU &&
3659 			    (fw_version & 0xff) >= 0x14)
3660 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
3661 			else
3662 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
3663 		}
3664 		break;
3665 	default:
3666 		break;
3667 	}
3668 
3669 	return true;
3670 }
3671 
3672 static int parse_ta_bin_descriptor(struct psp_context *psp,
3673 				   const struct psp_fw_bin_desc *desc,
3674 				   const struct ta_firmware_header_v2_0 *ta_hdr)
3675 {
3676 	uint8_t *ucode_start_addr  = NULL;
3677 
3678 	if (!psp || !desc || !ta_hdr)
3679 		return -EINVAL;
3680 
3681 	if (!is_ta_fw_applicable(psp, desc))
3682 		return 0;
3683 
3684 	ucode_start_addr  = (uint8_t *)ta_hdr +
3685 			    le32_to_cpu(desc->offset_bytes) +
3686 			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3687 
3688 	switch (desc->fw_type) {
3689 	case TA_FW_TYPE_PSP_ASD:
3690 		psp->asd_context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3691 		psp->asd_context.bin_desc.feature_version   = le32_to_cpu(desc->fw_version);
3692 		psp->asd_context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3693 		psp->asd_context.bin_desc.start_addr        = ucode_start_addr;
3694 		break;
3695 	case TA_FW_TYPE_PSP_XGMI:
3696 	case TA_FW_TYPE_PSP_XGMI_AUX:
3697 		psp->xgmi_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3698 		psp->xgmi_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3699 		psp->xgmi_context.context.bin_desc.start_addr       = ucode_start_addr;
3700 		break;
3701 	case TA_FW_TYPE_PSP_RAS:
3702 		psp->ras_context.context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3703 		psp->ras_context.context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3704 		psp->ras_context.context.bin_desc.start_addr        = ucode_start_addr;
3705 		break;
3706 	case TA_FW_TYPE_PSP_HDCP:
3707 		psp->hdcp_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3708 		psp->hdcp_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3709 		psp->hdcp_context.context.bin_desc.start_addr       = ucode_start_addr;
3710 		break;
3711 	case TA_FW_TYPE_PSP_DTM:
3712 		psp->dtm_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3713 		psp->dtm_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3714 		psp->dtm_context.context.bin_desc.start_addr       = ucode_start_addr;
3715 		break;
3716 	case TA_FW_TYPE_PSP_RAP:
3717 		psp->rap_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3718 		psp->rap_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3719 		psp->rap_context.context.bin_desc.start_addr       = ucode_start_addr;
3720 		break;
3721 	case TA_FW_TYPE_PSP_SECUREDISPLAY:
3722 		psp->securedisplay_context.context.bin_desc.fw_version =
3723 			le32_to_cpu(desc->fw_version);
3724 		psp->securedisplay_context.context.bin_desc.size_bytes =
3725 			le32_to_cpu(desc->size_bytes);
3726 		psp->securedisplay_context.context.bin_desc.start_addr =
3727 			ucode_start_addr;
3728 		break;
3729 	default:
3730 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3731 		break;
3732 	}
3733 
3734 	return 0;
3735 }
3736 
3737 static int parse_ta_v1_microcode(struct psp_context *psp)
3738 {
3739 	const struct ta_firmware_header_v1_0 *ta_hdr;
3740 	struct amdgpu_device *adev = psp->adev;
3741 
3742 	ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3743 
3744 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3745 		return -EINVAL;
3746 
3747 	adev->psp.xgmi_context.context.bin_desc.fw_version =
3748 		le32_to_cpu(ta_hdr->xgmi.fw_version);
3749 	adev->psp.xgmi_context.context.bin_desc.size_bytes =
3750 		le32_to_cpu(ta_hdr->xgmi.size_bytes);
3751 	adev->psp.xgmi_context.context.bin_desc.start_addr =
3752 		(uint8_t *)ta_hdr +
3753 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3754 
3755 	adev->psp.ras_context.context.bin_desc.fw_version =
3756 		le32_to_cpu(ta_hdr->ras.fw_version);
3757 	adev->psp.ras_context.context.bin_desc.size_bytes =
3758 		le32_to_cpu(ta_hdr->ras.size_bytes);
3759 	adev->psp.ras_context.context.bin_desc.start_addr =
3760 		(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3761 		le32_to_cpu(ta_hdr->ras.offset_bytes);
3762 
3763 	adev->psp.hdcp_context.context.bin_desc.fw_version =
3764 		le32_to_cpu(ta_hdr->hdcp.fw_version);
3765 	adev->psp.hdcp_context.context.bin_desc.size_bytes =
3766 		le32_to_cpu(ta_hdr->hdcp.size_bytes);
3767 	adev->psp.hdcp_context.context.bin_desc.start_addr =
3768 		(uint8_t *)ta_hdr +
3769 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3770 
3771 	adev->psp.dtm_context.context.bin_desc.fw_version =
3772 		le32_to_cpu(ta_hdr->dtm.fw_version);
3773 	adev->psp.dtm_context.context.bin_desc.size_bytes =
3774 		le32_to_cpu(ta_hdr->dtm.size_bytes);
3775 	adev->psp.dtm_context.context.bin_desc.start_addr =
3776 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3777 		le32_to_cpu(ta_hdr->dtm.offset_bytes);
3778 
3779 	adev->psp.securedisplay_context.context.bin_desc.fw_version =
3780 		le32_to_cpu(ta_hdr->securedisplay.fw_version);
3781 	adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3782 		le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3783 	adev->psp.securedisplay_context.context.bin_desc.start_addr =
3784 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3785 		le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3786 
3787 	adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3788 
3789 	return 0;
3790 }
3791 
3792 static int parse_ta_v2_microcode(struct psp_context *psp)
3793 {
3794 	const struct ta_firmware_header_v2_0 *ta_hdr;
3795 	struct amdgpu_device *adev = psp->adev;
3796 	int err = 0;
3797 	int ta_index = 0;
3798 
3799 	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3800 
3801 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3802 		return -EINVAL;
3803 
3804 	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3805 		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3806 		return -EINVAL;
3807 	}
3808 
3809 	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3810 		err = parse_ta_bin_descriptor(psp,
3811 					      &ta_hdr->ta_fw_bin[ta_index],
3812 					      ta_hdr);
3813 		if (err)
3814 			return err;
3815 	}
3816 
3817 	return 0;
3818 }
3819 
3820 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3821 {
3822 	const struct common_firmware_header *hdr;
3823 	struct amdgpu_device *adev = psp->adev;
3824 	int err;
3825 
3826 	err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3827 				   "amdgpu/%s_ta.bin", chip_name);
3828 	if (err)
3829 		return err;
3830 
3831 	hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3832 	switch (le16_to_cpu(hdr->header_version_major)) {
3833 	case 1:
3834 		err = parse_ta_v1_microcode(psp);
3835 		break;
3836 	case 2:
3837 		err = parse_ta_v2_microcode(psp);
3838 		break;
3839 	default:
3840 		dev_err(adev->dev, "unsupported TA header version\n");
3841 		err = -EINVAL;
3842 	}
3843 
3844 	if (err)
3845 		amdgpu_ucode_release(&adev->psp.ta_fw);
3846 
3847 	return err;
3848 }
3849 
3850 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3851 {
3852 	struct amdgpu_device *adev = psp->adev;
3853 	const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3854 	struct amdgpu_firmware_info *info = NULL;
3855 	int err = 0;
3856 
3857 	if (!amdgpu_sriov_vf(adev)) {
3858 		dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3859 		return -EINVAL;
3860 	}
3861 
3862 	err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
3863 				   "amdgpu/%s_cap.bin", chip_name);
3864 	if (err) {
3865 		if (err == -ENODEV) {
3866 			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3867 			err = 0;
3868 		} else {
3869 			dev_err(adev->dev, "fail to initialize cap microcode\n");
3870 		}
3871 		goto out;
3872 	}
3873 
3874 	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3875 	info->ucode_id = AMDGPU_UCODE_ID_CAP;
3876 	info->fw = adev->psp.cap_fw;
3877 	cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3878 		adev->psp.cap_fw->data;
3879 	adev->firmware.fw_size += ALIGN(
3880 			le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3881 	adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3882 	adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3883 	adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3884 
3885 	return 0;
3886 
3887 out:
3888 	amdgpu_ucode_release(&adev->psp.cap_fw);
3889 	return err;
3890 }
3891 
3892 int psp_config_sq_perfmon(struct psp_context *psp,
3893 		uint32_t xcp_id, bool core_override_enable,
3894 		bool reg_override_enable, bool perfmon_override_enable)
3895 {
3896 	int ret;
3897 
3898 	if (amdgpu_sriov_vf(psp->adev))
3899 		return 0;
3900 
3901 	if (xcp_id > MAX_XCP) {
3902 		dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
3903 		return -EINVAL;
3904 	}
3905 
3906 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
3907 		dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
3908 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
3909 		return -EINVAL;
3910 	}
3911 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3912 
3913 	cmd->cmd_id	=	GFX_CMD_ID_CONFIG_SQ_PERFMON;
3914 	cmd->cmd.config_sq_perfmon.gfx_xcp_mask	=	BIT_MASK(xcp_id);
3915 	cmd->cmd.config_sq_perfmon.core_override	=	core_override_enable;
3916 	cmd->cmd.config_sq_perfmon.reg_override	=	reg_override_enable;
3917 	cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
3918 
3919 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
3920 	if (ret)
3921 		dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
3922 			xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
3923 
3924 	release_psp_cmd_buf(psp);
3925 	return ret;
3926 }
3927 
3928 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3929 					enum amd_clockgating_state state)
3930 {
3931 	return 0;
3932 }
3933 
3934 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
3935 				     enum amd_powergating_state state)
3936 {
3937 	return 0;
3938 }
3939 
3940 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3941 					 struct device_attribute *attr,
3942 					 char *buf)
3943 {
3944 	struct drm_device *ddev = dev_get_drvdata(dev);
3945 	struct amdgpu_device *adev = drm_to_adev(ddev);
3946 	struct amdgpu_ip_block *ip_block;
3947 	uint32_t fw_ver;
3948 	int ret;
3949 
3950 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
3951 	if (!ip_block || !ip_block->status.late_initialized) {
3952 		dev_info(adev->dev, "PSP block is not ready yet\n.");
3953 		return -EBUSY;
3954 	}
3955 
3956 	mutex_lock(&adev->psp.mutex);
3957 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3958 	mutex_unlock(&adev->psp.mutex);
3959 
3960 	if (ret) {
3961 		dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
3962 		return ret;
3963 	}
3964 
3965 	return sysfs_emit(buf, "%x\n", fw_ver);
3966 }
3967 
3968 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3969 						       struct device_attribute *attr,
3970 						       const char *buf,
3971 						       size_t count)
3972 {
3973 	struct drm_device *ddev = dev_get_drvdata(dev);
3974 	struct amdgpu_device *adev = drm_to_adev(ddev);
3975 	int ret, idx;
3976 	const struct firmware *usbc_pd_fw;
3977 	struct amdgpu_bo *fw_buf_bo = NULL;
3978 	uint64_t fw_pri_mc_addr;
3979 	void *fw_pri_cpu_addr;
3980 	struct amdgpu_ip_block *ip_block;
3981 
3982 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
3983 	if (!ip_block || !ip_block->status.late_initialized) {
3984 		dev_err(adev->dev, "PSP block is not ready yet.");
3985 		return -EBUSY;
3986 	}
3987 
3988 	if (!drm_dev_enter(ddev, &idx))
3989 		return -ENODEV;
3990 
3991 	ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
3992 				   "amdgpu/%s", buf);
3993 	if (ret)
3994 		goto fail;
3995 
3996 	/* LFB address which is aligned to 1MB boundary per PSP request */
3997 	ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3998 				      AMDGPU_GEM_DOMAIN_VRAM |
3999 				      AMDGPU_GEM_DOMAIN_GTT,
4000 				      &fw_buf_bo, &fw_pri_mc_addr,
4001 				      &fw_pri_cpu_addr);
4002 	if (ret)
4003 		goto rel_buf;
4004 
4005 	memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
4006 
4007 	mutex_lock(&adev->psp.mutex);
4008 	ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
4009 	mutex_unlock(&adev->psp.mutex);
4010 
4011 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4012 
4013 rel_buf:
4014 	amdgpu_ucode_release(&usbc_pd_fw);
4015 fail:
4016 	if (ret) {
4017 		dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
4018 		count = ret;
4019 	}
4020 
4021 	drm_dev_exit(idx);
4022 	return count;
4023 }
4024 
4025 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
4026 {
4027 	int idx;
4028 
4029 	if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
4030 		return;
4031 
4032 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
4033 	memcpy(psp->fw_pri_buf, start_addr, bin_size);
4034 
4035 	drm_dev_exit(idx);
4036 }
4037 
4038 /**
4039  * DOC: usbc_pd_fw
4040  * Reading from this file will retrieve the USB-C PD firmware version. Writing to
4041  * this file will trigger the update process.
4042  */
4043 static DEVICE_ATTR(usbc_pd_fw, 0644,
4044 		   psp_usbc_pd_fw_sysfs_read,
4045 		   psp_usbc_pd_fw_sysfs_write);
4046 
4047 int is_psp_fw_valid(struct psp_bin_desc bin)
4048 {
4049 	return bin.size_bytes;
4050 }
4051 
4052 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
4053 					const struct bin_attribute *bin_attr,
4054 					char *buffer, loff_t pos, size_t count)
4055 {
4056 	struct device *dev = kobj_to_dev(kobj);
4057 	struct drm_device *ddev = dev_get_drvdata(dev);
4058 	struct amdgpu_device *adev = drm_to_adev(ddev);
4059 
4060 	adev->psp.vbflash_done = false;
4061 
4062 	/* Safeguard against memory drain */
4063 	if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
4064 		dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
4065 		kvfree(adev->psp.vbflash_tmp_buf);
4066 		adev->psp.vbflash_tmp_buf = NULL;
4067 		adev->psp.vbflash_image_size = 0;
4068 		return -ENOMEM;
4069 	}
4070 
4071 	/* TODO Just allocate max for now and optimize to realloc later if needed */
4072 	if (!adev->psp.vbflash_tmp_buf) {
4073 		adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
4074 		if (!adev->psp.vbflash_tmp_buf)
4075 			return -ENOMEM;
4076 	}
4077 
4078 	mutex_lock(&adev->psp.mutex);
4079 	memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
4080 	adev->psp.vbflash_image_size += count;
4081 	mutex_unlock(&adev->psp.mutex);
4082 
4083 	dev_dbg(adev->dev, "IFWI staged for update\n");
4084 
4085 	return count;
4086 }
4087 
4088 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
4089 				       const struct bin_attribute *bin_attr, char *buffer,
4090 				       loff_t pos, size_t count)
4091 {
4092 	struct device *dev = kobj_to_dev(kobj);
4093 	struct drm_device *ddev = dev_get_drvdata(dev);
4094 	struct amdgpu_device *adev = drm_to_adev(ddev);
4095 	struct amdgpu_bo *fw_buf_bo = NULL;
4096 	uint64_t fw_pri_mc_addr;
4097 	void *fw_pri_cpu_addr;
4098 	int ret;
4099 
4100 	if (adev->psp.vbflash_image_size == 0)
4101 		return -EINVAL;
4102 
4103 	dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
4104 
4105 	ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
4106 					AMDGPU_GPU_PAGE_SIZE,
4107 					AMDGPU_GEM_DOMAIN_VRAM,
4108 					&fw_buf_bo,
4109 					&fw_pri_mc_addr,
4110 					&fw_pri_cpu_addr);
4111 	if (ret)
4112 		goto rel_buf;
4113 
4114 	memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
4115 
4116 	mutex_lock(&adev->psp.mutex);
4117 	ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
4118 	mutex_unlock(&adev->psp.mutex);
4119 
4120 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4121 
4122 rel_buf:
4123 	kvfree(adev->psp.vbflash_tmp_buf);
4124 	adev->psp.vbflash_tmp_buf = NULL;
4125 	adev->psp.vbflash_image_size = 0;
4126 
4127 	if (ret) {
4128 		dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
4129 		return ret;
4130 	}
4131 
4132 	dev_dbg(adev->dev, "PSP IFWI flash process done\n");
4133 	return 0;
4134 }
4135 
4136 /**
4137  * DOC: psp_vbflash
4138  * Writing to this file will stage an IFWI for update. Reading from this file
4139  * will trigger the update process.
4140  */
4141 static const struct bin_attribute psp_vbflash_bin_attr = {
4142 	.attr = {.name = "psp_vbflash", .mode = 0660},
4143 	.size = 0,
4144 	.write_new = amdgpu_psp_vbflash_write,
4145 	.read_new = amdgpu_psp_vbflash_read,
4146 };
4147 
4148 /**
4149  * DOC: psp_vbflash_status
4150  * The status of the flash process.
4151  * 0: IFWI flash not complete.
4152  * 1: IFWI flash complete.
4153  */
4154 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
4155 					 struct device_attribute *attr,
4156 					 char *buf)
4157 {
4158 	struct drm_device *ddev = dev_get_drvdata(dev);
4159 	struct amdgpu_device *adev = drm_to_adev(ddev);
4160 	uint32_t vbflash_status;
4161 
4162 	vbflash_status = psp_vbflash_status(&adev->psp);
4163 	if (!adev->psp.vbflash_done)
4164 		vbflash_status = 0;
4165 	else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
4166 		vbflash_status = 1;
4167 
4168 	return sysfs_emit(buf, "0x%x\n", vbflash_status);
4169 }
4170 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
4171 
4172 static const struct bin_attribute *const bin_flash_attrs[] = {
4173 	&psp_vbflash_bin_attr,
4174 	NULL
4175 };
4176 
4177 static struct attribute *flash_attrs[] = {
4178 	&dev_attr_psp_vbflash_status.attr,
4179 	&dev_attr_usbc_pd_fw.attr,
4180 	NULL
4181 };
4182 
4183 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
4184 {
4185 	struct device *dev = kobj_to_dev(kobj);
4186 	struct drm_device *ddev = dev_get_drvdata(dev);
4187 	struct amdgpu_device *adev = drm_to_adev(ddev);
4188 
4189 	if (attr == &dev_attr_usbc_pd_fw.attr)
4190 		return adev->psp.sup_pd_fw_up ? 0660 : 0;
4191 
4192 	return adev->psp.sup_ifwi_up ? 0440 : 0;
4193 }
4194 
4195 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
4196 						const struct bin_attribute *attr,
4197 						int idx)
4198 {
4199 	struct device *dev = kobj_to_dev(kobj);
4200 	struct drm_device *ddev = dev_get_drvdata(dev);
4201 	struct amdgpu_device *adev = drm_to_adev(ddev);
4202 
4203 	return adev->psp.sup_ifwi_up ? 0660 : 0;
4204 }
4205 
4206 const struct attribute_group amdgpu_flash_attr_group = {
4207 	.attrs = flash_attrs,
4208 	.bin_attrs_new = bin_flash_attrs,
4209 	.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4210 	.is_visible = amdgpu_flash_attr_is_visible,
4211 };
4212 
4213 #if defined(CONFIG_DEBUG_FS)
4214 static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
4215 {
4216 	struct amdgpu_device *adev = filp->f_inode->i_private;
4217 	struct spirom_bo *bo_triplet;
4218 	int ret;
4219 
4220 	/* serialize the open() file calling */
4221 	if (!mutex_trylock(&adev->psp.mutex))
4222 		return -EBUSY;
4223 
4224 	/*
4225 	 * make sure only one userpace process is alive for dumping so that
4226 	 * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
4227 	 * let's say the case where one process try opening the file while
4228 	 * another one has proceeded to read or release. In this way, eliminate
4229 	 * the use of mutex for read() or release() callback as well.
4230 	 */
4231 	if (adev->psp.spirom_dump_trip) {
4232 		mutex_unlock(&adev->psp.mutex);
4233 		return -EBUSY;
4234 	}
4235 
4236 	bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
4237 	if (!bo_triplet) {
4238 		mutex_unlock(&adev->psp.mutex);
4239 		return -ENOMEM;
4240 	}
4241 
4242 	ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
4243 				      AMDGPU_GPU_PAGE_SIZE,
4244 				      AMDGPU_GEM_DOMAIN_GTT,
4245 				      &bo_triplet->bo,
4246 				      &bo_triplet->mc_addr,
4247 				      &bo_triplet->cpu_addr);
4248 	if (ret)
4249 		goto rel_trip;
4250 
4251 	ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
4252 	if (ret)
4253 		goto rel_bo;
4254 
4255 	adev->psp.spirom_dump_trip = bo_triplet;
4256 	mutex_unlock(&adev->psp.mutex);
4257 	return 0;
4258 rel_bo:
4259 	amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4260 			      &bo_triplet->cpu_addr);
4261 rel_trip:
4262 	kfree(bo_triplet);
4263 	mutex_unlock(&adev->psp.mutex);
4264 	dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
4265 	return ret;
4266 }
4267 
4268 static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
4269 					    loff_t *pos)
4270 {
4271 	struct amdgpu_device *adev = filp->f_inode->i_private;
4272 	struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4273 
4274 	if (!bo_triplet)
4275 		return -EINVAL;
4276 
4277 	return simple_read_from_buffer(buf,
4278 				       size,
4279 				       pos, bo_triplet->cpu_addr,
4280 				       AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4281 }
4282 
4283 static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
4284 {
4285 	struct amdgpu_device *adev = filp->f_inode->i_private;
4286 	struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4287 
4288 	if (bo_triplet) {
4289 		amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4290 				      &bo_triplet->cpu_addr);
4291 		kfree(bo_triplet);
4292 	}
4293 
4294 	adev->psp.spirom_dump_trip = NULL;
4295 	return 0;
4296 }
4297 
4298 static const struct file_operations psp_dump_spirom_debugfs_ops = {
4299 	.owner = THIS_MODULE,
4300 	.open = psp_read_spirom_debugfs_open,
4301 	.read = psp_read_spirom_debugfs_read,
4302 	.release = psp_read_spirom_debugfs_release,
4303 	.llseek = default_llseek,
4304 };
4305 #endif
4306 
4307 void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
4308 {
4309 #if defined(CONFIG_DEBUG_FS)
4310 	struct drm_minor *minor = adev_to_drm(adev)->primary;
4311 
4312 	debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
4313 				 adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4314 #endif
4315 }
4316 
4317 const struct amd_ip_funcs psp_ip_funcs = {
4318 	.name = "psp",
4319 	.early_init = psp_early_init,
4320 	.sw_init = psp_sw_init,
4321 	.sw_fini = psp_sw_fini,
4322 	.hw_init = psp_hw_init,
4323 	.hw_fini = psp_hw_fini,
4324 	.suspend = psp_suspend,
4325 	.resume = psp_resume,
4326 	.set_clockgating_state = psp_set_clockgating_state,
4327 	.set_powergating_state = psp_set_powergating_state,
4328 };
4329 
4330 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4331 	.type = AMD_IP_BLOCK_TYPE_PSP,
4332 	.major = 3,
4333 	.minor = 1,
4334 	.rev = 0,
4335 	.funcs = &psp_ip_funcs,
4336 };
4337 
4338 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4339 	.type = AMD_IP_BLOCK_TYPE_PSP,
4340 	.major = 10,
4341 	.minor = 0,
4342 	.rev = 0,
4343 	.funcs = &psp_ip_funcs,
4344 };
4345 
4346 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4347 	.type = AMD_IP_BLOCK_TYPE_PSP,
4348 	.major = 11,
4349 	.minor = 0,
4350 	.rev = 0,
4351 	.funcs = &psp_ip_funcs,
4352 };
4353 
4354 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4355 	.type = AMD_IP_BLOCK_TYPE_PSP,
4356 	.major = 11,
4357 	.minor = 0,
4358 	.rev = 8,
4359 	.funcs = &psp_ip_funcs,
4360 };
4361 
4362 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4363 	.type = AMD_IP_BLOCK_TYPE_PSP,
4364 	.major = 12,
4365 	.minor = 0,
4366 	.rev = 0,
4367 	.funcs = &psp_ip_funcs,
4368 };
4369 
4370 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4371 	.type = AMD_IP_BLOCK_TYPE_PSP,
4372 	.major = 13,
4373 	.minor = 0,
4374 	.rev = 0,
4375 	.funcs = &psp_ip_funcs,
4376 };
4377 
4378 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4379 	.type = AMD_IP_BLOCK_TYPE_PSP,
4380 	.major = 13,
4381 	.minor = 0,
4382 	.rev = 4,
4383 	.funcs = &psp_ip_funcs,
4384 };
4385 
4386 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4387 	.type = AMD_IP_BLOCK_TYPE_PSP,
4388 	.major = 14,
4389 	.minor = 0,
4390 	.rev = 0,
4391 	.funcs = &psp_ip_funcs,
4392 };
4393