xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c (revision 009bfc5ec5c953534d0f528d1c1e4f60668b7371)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42 
43 #include "amdgpu_ras.h"
44 #include "amdgpu_securedisplay.h"
45 #include "amdgpu_atomfirmware.h"
46 
47 #define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*16)
48 
49 static int psp_load_smu_fw(struct psp_context *psp);
50 static int psp_rap_terminate(struct psp_context *psp);
51 static int psp_securedisplay_terminate(struct psp_context *psp);
52 
53 static int psp_ring_init(struct psp_context *psp,
54 			 enum psp_ring_type ring_type)
55 {
56 	int ret = 0;
57 	struct psp_ring *ring;
58 	struct amdgpu_device *adev = psp->adev;
59 
60 	ring = &psp->km_ring;
61 
62 	ring->ring_type = ring_type;
63 
64 	/* allocate 4k Page of Local Frame Buffer memory for ring */
65 	ring->ring_size = 0x1000;
66 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 				      AMDGPU_GEM_DOMAIN_VRAM |
68 				      AMDGPU_GEM_DOMAIN_GTT,
69 				      &adev->firmware.rbuf,
70 				      &ring->ring_mem_mc_addr,
71 				      (void **)&ring->ring_mem);
72 	if (ret) {
73 		ring->ring_size = 0;
74 		return ret;
75 	}
76 
77 	return 0;
78 }
79 
80 /*
81  * Due to DF Cstate management centralized to PMFW, the firmware
82  * loading sequence will be updated as below:
83  *   - Load KDB
84  *   - Load SYS_DRV
85  *   - Load tOS
86  *   - Load PMFW
87  *   - Setup TMR
88  *   - Load other non-psp fw
89  *   - Load ASD
90  *   - Load XGMI/RAS/HDCP/DTM TA if any
91  *
92  * This new sequence is required for
93  *   - Arcturus and onwards
94  */
95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96 {
97 	struct amdgpu_device *adev = psp->adev;
98 
99 	if (amdgpu_sriov_vf(adev)) {
100 		psp->pmfw_centralized_cstate_management = false;
101 		return;
102 	}
103 
104 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 	case IP_VERSION(11, 0, 0):
106 	case IP_VERSION(11, 0, 4):
107 	case IP_VERSION(11, 0, 5):
108 	case IP_VERSION(11, 0, 7):
109 	case IP_VERSION(11, 0, 9):
110 	case IP_VERSION(11, 0, 11):
111 	case IP_VERSION(11, 0, 12):
112 	case IP_VERSION(11, 0, 13):
113 	case IP_VERSION(13, 0, 0):
114 	case IP_VERSION(13, 0, 2):
115 	case IP_VERSION(13, 0, 7):
116 		psp->pmfw_centralized_cstate_management = true;
117 		break;
118 	default:
119 		psp->pmfw_centralized_cstate_management = false;
120 		break;
121 	}
122 }
123 
124 static int psp_init_sriov_microcode(struct psp_context *psp)
125 {
126 	struct amdgpu_device *adev = psp->adev;
127 	char ucode_prefix[30];
128 	int ret = 0;
129 
130 	amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131 
132 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 	case IP_VERSION(9, 0, 0):
134 	case IP_VERSION(11, 0, 7):
135 	case IP_VERSION(11, 0, 9):
136 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 		ret = psp_init_cap_microcode(psp, ucode_prefix);
138 		break;
139 	case IP_VERSION(13, 0, 2):
140 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 		ret = psp_init_cap_microcode(psp, ucode_prefix);
142 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 		break;
144 	case IP_VERSION(13, 0, 0):
145 		adev->virt.autoload_ucode_id = 0;
146 		break;
147 	case IP_VERSION(13, 0, 6):
148 	case IP_VERSION(13, 0, 14):
149 		ret = psp_init_cap_microcode(psp, ucode_prefix);
150 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
151 		break;
152 	case IP_VERSION(13, 0, 10):
153 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
154 		ret = psp_init_cap_microcode(psp, ucode_prefix);
155 		break;
156 	case IP_VERSION(13, 0, 12):
157 		ret = psp_init_ta_microcode(psp, ucode_prefix);
158 		break;
159 	default:
160 		return -EINVAL;
161 	}
162 	return ret;
163 }
164 
165 static int psp_early_init(struct amdgpu_ip_block *ip_block)
166 {
167 	struct amdgpu_device *adev = ip_block->adev;
168 	struct psp_context *psp = &adev->psp;
169 
170 	psp->autoload_supported = true;
171 	psp->boot_time_tmr = true;
172 
173 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
174 	case IP_VERSION(9, 0, 0):
175 		psp_v3_1_set_psp_funcs(psp);
176 		psp->autoload_supported = false;
177 		psp->boot_time_tmr = false;
178 		break;
179 	case IP_VERSION(10, 0, 0):
180 	case IP_VERSION(10, 0, 1):
181 		psp_v10_0_set_psp_funcs(psp);
182 		psp->autoload_supported = false;
183 		psp->boot_time_tmr = false;
184 		break;
185 	case IP_VERSION(11, 0, 2):
186 	case IP_VERSION(11, 0, 4):
187 		psp_v11_0_set_psp_funcs(psp);
188 		psp->autoload_supported = false;
189 		psp->boot_time_tmr = false;
190 		break;
191 	case IP_VERSION(11, 0, 0):
192 	case IP_VERSION(11, 0, 7):
193 		adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
194 		fallthrough;
195 	case IP_VERSION(11, 0, 5):
196 	case IP_VERSION(11, 0, 9):
197 	case IP_VERSION(11, 0, 11):
198 	case IP_VERSION(11, 5, 0):
199 	case IP_VERSION(11, 5, 2):
200 	case IP_VERSION(11, 0, 12):
201 	case IP_VERSION(11, 0, 13):
202 		psp_v11_0_set_psp_funcs(psp);
203 		psp->boot_time_tmr = false;
204 		break;
205 	case IP_VERSION(11, 0, 3):
206 	case IP_VERSION(12, 0, 1):
207 		psp_v12_0_set_psp_funcs(psp);
208 		psp->autoload_supported = false;
209 		psp->boot_time_tmr = false;
210 		break;
211 	case IP_VERSION(13, 0, 2):
212 		psp->boot_time_tmr = false;
213 		fallthrough;
214 	case IP_VERSION(13, 0, 6):
215 	case IP_VERSION(13, 0, 14):
216 		psp_v13_0_set_psp_funcs(psp);
217 		psp->autoload_supported = false;
218 		break;
219 	case IP_VERSION(13, 0, 12):
220 		psp_v13_0_set_psp_funcs(psp);
221 		psp->autoload_supported = false;
222 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
223 		break;
224 	case IP_VERSION(13, 0, 1):
225 	case IP_VERSION(13, 0, 3):
226 	case IP_VERSION(13, 0, 5):
227 	case IP_VERSION(13, 0, 8):
228 	case IP_VERSION(13, 0, 11):
229 	case IP_VERSION(14, 0, 0):
230 	case IP_VERSION(14, 0, 1):
231 	case IP_VERSION(14, 0, 4):
232 		psp_v13_0_set_psp_funcs(psp);
233 		psp->boot_time_tmr = false;
234 		break;
235 	case IP_VERSION(11, 0, 8):
236 		if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
237 			psp_v11_0_8_set_psp_funcs(psp);
238 		}
239 		psp->autoload_supported = false;
240 		psp->boot_time_tmr = false;
241 		break;
242 	case IP_VERSION(13, 0, 0):
243 	case IP_VERSION(13, 0, 7):
244 	case IP_VERSION(13, 0, 10):
245 		psp_v13_0_set_psp_funcs(psp);
246 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
247 		psp->boot_time_tmr = false;
248 		break;
249 	case IP_VERSION(13, 0, 4):
250 		psp_v13_0_4_set_psp_funcs(psp);
251 		psp->boot_time_tmr = false;
252 		break;
253 	case IP_VERSION(14, 0, 2):
254 	case IP_VERSION(14, 0, 3):
255 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
256 		psp_v14_0_set_psp_funcs(psp);
257 		break;
258 	case IP_VERSION(14, 0, 5):
259 		psp_v14_0_set_psp_funcs(psp);
260 		psp->boot_time_tmr = false;
261 		break;
262 	default:
263 		return -EINVAL;
264 	}
265 
266 	psp->adev = adev;
267 
268 	adev->psp_timeout = 20000;
269 
270 	psp_check_pmfw_centralized_cstate_management(psp);
271 
272 	if (amdgpu_sriov_vf(adev))
273 		return psp_init_sriov_microcode(psp);
274 	else
275 		return psp_init_microcode(psp);
276 }
277 
278 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
279 {
280 	amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
281 			      &mem_ctx->shared_buf);
282 	mem_ctx->shared_bo = NULL;
283 }
284 
285 static void psp_free_shared_bufs(struct psp_context *psp)
286 {
287 	void *tmr_buf;
288 	void **pptr;
289 
290 	/* free TMR memory buffer */
291 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
292 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
293 	psp->tmr_bo = NULL;
294 
295 	/* free xgmi shared memory */
296 	psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
297 
298 	/* free ras shared memory */
299 	psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
300 
301 	/* free hdcp shared memory */
302 	psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
303 
304 	/* free dtm shared memory */
305 	psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
306 
307 	/* free rap shared memory */
308 	psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
309 
310 	/* free securedisplay shared memory */
311 	psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
312 
313 
314 }
315 
316 static void psp_memory_training_fini(struct psp_context *psp)
317 {
318 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
319 
320 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
321 	kfree(ctx->sys_cache);
322 	ctx->sys_cache = NULL;
323 }
324 
325 static int psp_memory_training_init(struct psp_context *psp)
326 {
327 	int ret;
328 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
329 
330 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
331 		dev_dbg(psp->adev->dev, "memory training is not supported!\n");
332 		return 0;
333 	}
334 
335 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
336 	if (ctx->sys_cache == NULL) {
337 		dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
338 		ret = -ENOMEM;
339 		goto Err_out;
340 	}
341 
342 	dev_dbg(psp->adev->dev,
343 		"train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
344 		ctx->train_data_size,
345 		ctx->p2c_train_data_offset,
346 		ctx->c2p_train_data_offset);
347 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
348 	return 0;
349 
350 Err_out:
351 	psp_memory_training_fini(psp);
352 	return ret;
353 }
354 
355 /*
356  * Helper funciton to query psp runtime database entry
357  *
358  * @adev: amdgpu_device pointer
359  * @entry_type: the type of psp runtime database entry
360  * @db_entry: runtime database entry pointer
361  *
362  * Return false if runtime database doesn't exit or entry is invalid
363  * or true if the specific database entry is found, and copy to @db_entry
364  */
365 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
366 				     enum psp_runtime_entry_type entry_type,
367 				     void *db_entry)
368 {
369 	uint64_t db_header_pos, db_dir_pos;
370 	struct psp_runtime_data_header db_header = {0};
371 	struct psp_runtime_data_directory db_dir = {0};
372 	bool ret = false;
373 	int i;
374 
375 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
376 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
377 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
378 		return false;
379 
380 	db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
381 	db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
382 
383 	/* read runtime db header from vram */
384 	amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
385 			sizeof(struct psp_runtime_data_header), false);
386 
387 	if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
388 		/* runtime db doesn't exist, exit */
389 		dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
390 		return false;
391 	}
392 
393 	/* read runtime database entry from vram */
394 	amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
395 			sizeof(struct psp_runtime_data_directory), false);
396 
397 	if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
398 		/* invalid db entry count, exit */
399 		dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
400 		return false;
401 	}
402 
403 	/* look up for requested entry type */
404 	for (i = 0; i < db_dir.entry_count && !ret; i++) {
405 		if (db_dir.entry_list[i].entry_type == entry_type) {
406 			switch (entry_type) {
407 			case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
408 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
409 					/* invalid db entry size */
410 					dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
411 					return false;
412 				}
413 				/* read runtime database entry */
414 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
415 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
416 				ret = true;
417 				break;
418 			case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
419 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
420 					/* invalid db entry size */
421 					dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
422 					return false;
423 				}
424 				/* read runtime database entry */
425 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
426 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
427 				ret = true;
428 				break;
429 			default:
430 				ret = false;
431 				break;
432 			}
433 		}
434 	}
435 
436 	return ret;
437 }
438 
439 static int psp_sw_init(struct amdgpu_ip_block *ip_block)
440 {
441 	struct amdgpu_device *adev = ip_block->adev;
442 	struct psp_context *psp = &adev->psp;
443 	int ret;
444 	struct psp_runtime_boot_cfg_entry boot_cfg_entry;
445 	struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
446 	struct psp_runtime_scpm_entry scpm_entry;
447 
448 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
449 	if (!psp->cmd) {
450 		dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
451 		ret = -ENOMEM;
452 	}
453 
454 	adev->psp.xgmi_context.supports_extended_data =
455 		!adev->gmc.xgmi.connected_to_cpu &&
456 		amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
457 
458 	memset(&scpm_entry, 0, sizeof(scpm_entry));
459 	if ((psp_get_runtime_db_entry(adev,
460 				PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
461 				&scpm_entry)) &&
462 	    (scpm_entry.scpm_status != SCPM_DISABLE)) {
463 		adev->scpm_enabled = true;
464 		adev->scpm_status = scpm_entry.scpm_status;
465 	} else {
466 		adev->scpm_enabled = false;
467 		adev->scpm_status = SCPM_DISABLE;
468 	}
469 
470 	/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
471 
472 	memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
473 	if (psp_get_runtime_db_entry(adev,
474 				PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
475 				&boot_cfg_entry)) {
476 		psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
477 		if ((psp->boot_cfg_bitmask) &
478 		    BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
479 			/* If psp runtime database exists, then
480 			 * only enable two stage memory training
481 			 * when TWO_STAGE_DRAM_TRAINING bit is set
482 			 * in runtime database
483 			 */
484 			mem_training_ctx->enable_mem_training = true;
485 		}
486 
487 	} else {
488 		/* If psp runtime database doesn't exist or is
489 		 * invalid, force enable two stage memory training
490 		 */
491 		mem_training_ctx->enable_mem_training = true;
492 	}
493 
494 	if (mem_training_ctx->enable_mem_training) {
495 		ret = psp_memory_training_init(psp);
496 		if (ret) {
497 			dev_err(adev->dev, "Failed to initialize memory training!\n");
498 			return ret;
499 		}
500 
501 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
502 		if (ret) {
503 			dev_err(adev->dev, "Failed to process memory training!\n");
504 			return ret;
505 		}
506 	}
507 
508 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
509 				      (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
510 				      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
511 				      &psp->fw_pri_bo,
512 				      &psp->fw_pri_mc_addr,
513 				      &psp->fw_pri_buf);
514 	if (ret)
515 		return ret;
516 
517 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
518 				      AMDGPU_GEM_DOMAIN_VRAM |
519 				      AMDGPU_GEM_DOMAIN_GTT,
520 				      &psp->fence_buf_bo,
521 				      &psp->fence_buf_mc_addr,
522 				      &psp->fence_buf);
523 	if (ret)
524 		goto failed1;
525 
526 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
527 				      AMDGPU_GEM_DOMAIN_VRAM |
528 				      AMDGPU_GEM_DOMAIN_GTT,
529 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
530 				      (void **)&psp->cmd_buf_mem);
531 	if (ret)
532 		goto failed2;
533 
534 	return 0;
535 
536 failed2:
537 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
538 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
539 failed1:
540 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
541 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
542 	return ret;
543 }
544 
545 static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
546 {
547 	struct amdgpu_device *adev = ip_block->adev;
548 	struct psp_context *psp = &adev->psp;
549 
550 	psp_memory_training_fini(psp);
551 
552 	amdgpu_ucode_release(&psp->sos_fw);
553 	amdgpu_ucode_release(&psp->asd_fw);
554 	amdgpu_ucode_release(&psp->ta_fw);
555 	amdgpu_ucode_release(&psp->cap_fw);
556 	amdgpu_ucode_release(&psp->toc_fw);
557 
558 	kfree(psp->cmd);
559 	psp->cmd = NULL;
560 
561 	psp_free_shared_bufs(psp);
562 
563 	if (psp->km_ring.ring_mem)
564 		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
565 				      &psp->km_ring.ring_mem_mc_addr,
566 				      (void **)&psp->km_ring.ring_mem);
567 
568 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
569 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
570 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
571 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
572 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
573 			      (void **)&psp->cmd_buf_mem);
574 
575 	return 0;
576 }
577 
578 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
579 		 uint32_t reg_val, uint32_t mask, bool check_changed)
580 {
581 	uint32_t val;
582 	int i;
583 	struct amdgpu_device *adev = psp->adev;
584 
585 	if (psp->adev->no_hw_access)
586 		return 0;
587 
588 	for (i = 0; i < adev->usec_timeout; i++) {
589 		val = RREG32(reg_index);
590 		if (check_changed) {
591 			if (val != reg_val)
592 				return 0;
593 		} else {
594 			if ((val & mask) == reg_val)
595 				return 0;
596 		}
597 		udelay(1);
598 	}
599 
600 	dev_err(adev->dev,
601 		"psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x",
602 		reg_index, mask, val, reg_val);
603 
604 	return -ETIME;
605 }
606 
607 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
608 			       uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
609 {
610 	uint32_t val;
611 	int i;
612 	struct amdgpu_device *adev = psp->adev;
613 
614 	if (psp->adev->no_hw_access)
615 		return 0;
616 
617 	for (i = 0; i < msec_timeout; i++) {
618 		val = RREG32(reg_index);
619 		if ((val & mask) == reg_val)
620 			return 0;
621 		msleep(1);
622 	}
623 
624 	return -ETIME;
625 }
626 
627 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
628 {
629 	switch (cmd_id) {
630 	case GFX_CMD_ID_LOAD_TA:
631 		return "LOAD_TA";
632 	case GFX_CMD_ID_UNLOAD_TA:
633 		return "UNLOAD_TA";
634 	case GFX_CMD_ID_INVOKE_CMD:
635 		return "INVOKE_CMD";
636 	case GFX_CMD_ID_LOAD_ASD:
637 		return "LOAD_ASD";
638 	case GFX_CMD_ID_SETUP_TMR:
639 		return "SETUP_TMR";
640 	case GFX_CMD_ID_LOAD_IP_FW:
641 		return "LOAD_IP_FW";
642 	case GFX_CMD_ID_DESTROY_TMR:
643 		return "DESTROY_TMR";
644 	case GFX_CMD_ID_SAVE_RESTORE:
645 		return "SAVE_RESTORE_IP_FW";
646 	case GFX_CMD_ID_SETUP_VMR:
647 		return "SETUP_VMR";
648 	case GFX_CMD_ID_DESTROY_VMR:
649 		return "DESTROY_VMR";
650 	case GFX_CMD_ID_PROG_REG:
651 		return "PROG_REG";
652 	case GFX_CMD_ID_GET_FW_ATTESTATION:
653 		return "GET_FW_ATTESTATION";
654 	case GFX_CMD_ID_LOAD_TOC:
655 		return "ID_LOAD_TOC";
656 	case GFX_CMD_ID_AUTOLOAD_RLC:
657 		return "AUTOLOAD_RLC";
658 	case GFX_CMD_ID_BOOT_CFG:
659 		return "BOOT_CFG";
660 	case GFX_CMD_ID_CONFIG_SQ_PERFMON:
661 		return "CONFIG_SQ_PERFMON";
662 	case GFX_CMD_ID_FB_FW_RESERV_ADDR:
663 		return "FB_FW_RESERV_ADDR";
664 	case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
665 		return "FB_FW_RESERV_EXT_ADDR";
666 	default:
667 		return "UNKNOWN CMD";
668 	}
669 }
670 
671 static bool psp_err_warn(struct psp_context *psp)
672 {
673 	struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
674 
675 	/* This response indicates reg list is already loaded */
676 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
677 	    cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
678 	    cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
679 	    cmd->resp.status == TEE_ERROR_CANCEL)
680 		return false;
681 
682 	return true;
683 }
684 
685 static int
686 psp_cmd_submit_buf(struct psp_context *psp,
687 		   struct amdgpu_firmware_info *ucode,
688 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
689 {
690 	int ret;
691 	int index;
692 	int timeout = psp->adev->psp_timeout;
693 	bool ras_intr = false;
694 	bool skip_unsupport = false;
695 
696 	if (psp->adev->no_hw_access)
697 		return 0;
698 
699 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
700 
701 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
702 
703 	index = atomic_inc_return(&psp->fence_value);
704 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
705 	if (ret) {
706 		atomic_dec(&psp->fence_value);
707 		goto exit;
708 	}
709 
710 	amdgpu_device_invalidate_hdp(psp->adev, NULL);
711 	while (*((unsigned int *)psp->fence_buf) != index) {
712 		if (--timeout == 0)
713 			break;
714 		/*
715 		 * Shouldn't wait for timeout when err_event_athub occurs,
716 		 * because gpu reset thread triggered and lock resource should
717 		 * be released for psp resume sequence.
718 		 */
719 		ras_intr = amdgpu_ras_intr_triggered();
720 		if (ras_intr)
721 			break;
722 		usleep_range(10, 100);
723 		amdgpu_device_invalidate_hdp(psp->adev, NULL);
724 	}
725 
726 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
727 	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
728 		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
729 
730 	memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
731 
732 	/* In some cases, psp response status is not 0 even there is no
733 	 * problem while the command is submitted. Some version of PSP FW
734 	 * doesn't write 0 to that field.
735 	 * So here we would like to only print a warning instead of an error
736 	 * during psp initialization to avoid breaking hw_init and it doesn't
737 	 * return -EINVAL.
738 	 */
739 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
740 		if (ucode)
741 			dev_warn(psp->adev->dev,
742 				 "failed to load ucode %s(0x%X) ",
743 				 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
744 		if (psp_err_warn(psp))
745 			dev_warn(
746 				psp->adev->dev,
747 				"psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
748 				psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
749 				psp->cmd_buf_mem->cmd_id,
750 				psp->cmd_buf_mem->resp.status);
751 		/* If any firmware (including CAP) load fails under SRIOV, it should
752 		 * return failure to stop the VF from initializing.
753 		 * Also return failure in case of timeout
754 		 */
755 		if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
756 			ret = -EINVAL;
757 			goto exit;
758 		}
759 	}
760 
761 	if (ucode) {
762 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
763 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
764 	}
765 
766 exit:
767 	return ret;
768 }
769 
770 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
771 {
772 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
773 
774 	mutex_lock(&psp->mutex);
775 
776 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
777 
778 	return cmd;
779 }
780 
781 static void release_psp_cmd_buf(struct psp_context *psp)
782 {
783 	mutex_unlock(&psp->mutex);
784 }
785 
786 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
787 				 struct psp_gfx_cmd_resp *cmd,
788 				 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
789 {
790 	struct amdgpu_device *adev = psp->adev;
791 	uint32_t size = 0;
792 	uint64_t tmr_pa = 0;
793 
794 	if (tmr_bo) {
795 		size = amdgpu_bo_size(tmr_bo);
796 		tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
797 	}
798 
799 	if (amdgpu_sriov_vf(psp->adev))
800 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
801 	else
802 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
803 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
804 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
805 	cmd->cmd.cmd_setup_tmr.buf_size = size;
806 	cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
807 	cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
808 	cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
809 }
810 
811 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
812 				      uint64_t pri_buf_mc, uint32_t size)
813 {
814 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
815 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
816 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
817 	cmd->cmd.cmd_load_toc.toc_size = size;
818 }
819 
820 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
821 static int psp_load_toc(struct psp_context *psp,
822 			uint32_t *tmr_size)
823 {
824 	int ret;
825 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
826 
827 	/* Copy toc to psp firmware private buffer */
828 	psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
829 
830 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
831 
832 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
833 				 psp->fence_buf_mc_addr);
834 	if (!ret)
835 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
836 
837 	release_psp_cmd_buf(psp);
838 
839 	return ret;
840 }
841 
842 /* Set up Trusted Memory Region */
843 static int psp_tmr_init(struct psp_context *psp)
844 {
845 	int ret = 0;
846 	int tmr_size;
847 	void *tmr_buf;
848 	void **pptr;
849 
850 	/*
851 	 * According to HW engineer, they prefer the TMR address be "naturally
852 	 * aligned" , e.g. the start address be an integer divide of TMR size.
853 	 *
854 	 * Note: this memory need be reserved till the driver
855 	 * uninitializes.
856 	 */
857 	tmr_size = PSP_TMR_SIZE(psp->adev);
858 
859 	/* For ASICs support RLC autoload, psp will parse the toc
860 	 * and calculate the total size of TMR needed
861 	 */
862 	if (!amdgpu_sriov_vf(psp->adev) &&
863 	    psp->toc.start_addr &&
864 	    psp->toc.size_bytes &&
865 	    psp->fw_pri_buf) {
866 		ret = psp_load_toc(psp, &tmr_size);
867 		if (ret) {
868 			dev_err(psp->adev->dev, "Failed to load toc\n");
869 			return ret;
870 		}
871 	}
872 
873 	if (!psp->tmr_bo && !psp->boot_time_tmr) {
874 		pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
875 		ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
876 					      PSP_TMR_ALIGNMENT,
877 					      AMDGPU_HAS_VRAM(psp->adev) ?
878 					      AMDGPU_GEM_DOMAIN_VRAM :
879 					      AMDGPU_GEM_DOMAIN_GTT,
880 					      &psp->tmr_bo, &psp->tmr_mc_addr,
881 					      pptr);
882 	}
883 	if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
884 		psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
885 
886 	return ret;
887 }
888 
889 static bool psp_skip_tmr(struct psp_context *psp)
890 {
891 	switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
892 	case IP_VERSION(11, 0, 9):
893 	case IP_VERSION(11, 0, 7):
894 	case IP_VERSION(13, 0, 2):
895 	case IP_VERSION(13, 0, 6):
896 	case IP_VERSION(13, 0, 10):
897 	case IP_VERSION(13, 0, 12):
898 	case IP_VERSION(13, 0, 14):
899 		return true;
900 	default:
901 		return false;
902 	}
903 }
904 
905 static int psp_tmr_load(struct psp_context *psp)
906 {
907 	int ret;
908 	struct psp_gfx_cmd_resp *cmd;
909 
910 	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
911 	 * Already set up by host driver.
912 	 */
913 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
914 		return 0;
915 
916 	cmd = acquire_psp_cmd_buf(psp);
917 
918 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
919 	if (psp->tmr_bo)
920 		dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
921 			 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
922 
923 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
924 				 psp->fence_buf_mc_addr);
925 
926 	release_psp_cmd_buf(psp);
927 
928 	return ret;
929 }
930 
931 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
932 					struct psp_gfx_cmd_resp *cmd)
933 {
934 	if (amdgpu_sriov_vf(psp->adev))
935 		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
936 	else
937 		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
938 }
939 
940 static int psp_tmr_unload(struct psp_context *psp)
941 {
942 	int ret;
943 	struct psp_gfx_cmd_resp *cmd;
944 
945 	/* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
946 	 * as TMR is not loaded at all
947 	 */
948 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
949 		return 0;
950 
951 	cmd = acquire_psp_cmd_buf(psp);
952 
953 	psp_prep_tmr_unload_cmd_buf(psp, cmd);
954 	dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
955 
956 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
957 				 psp->fence_buf_mc_addr);
958 
959 	release_psp_cmd_buf(psp);
960 
961 	return ret;
962 }
963 
964 static int psp_tmr_terminate(struct psp_context *psp)
965 {
966 	return psp_tmr_unload(psp);
967 }
968 
969 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
970 					uint64_t *output_ptr)
971 {
972 	int ret;
973 	struct psp_gfx_cmd_resp *cmd;
974 
975 	if (!output_ptr)
976 		return -EINVAL;
977 
978 	if (amdgpu_sriov_vf(psp->adev))
979 		return 0;
980 
981 	cmd = acquire_psp_cmd_buf(psp);
982 
983 	cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
984 
985 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
986 				 psp->fence_buf_mc_addr);
987 
988 	if (!ret) {
989 		*output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
990 			      ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
991 	}
992 
993 	release_psp_cmd_buf(psp);
994 
995 	return ret;
996 }
997 
998 static int psp_get_fw_reservation_info(struct psp_context *psp,
999 						   uint32_t cmd_id,
1000 						   uint64_t *addr,
1001 						   uint32_t *size)
1002 {
1003 	int ret;
1004 	uint32_t status;
1005 	struct psp_gfx_cmd_resp *cmd;
1006 
1007 	cmd = acquire_psp_cmd_buf(psp);
1008 
1009 	cmd->cmd_id = cmd_id;
1010 
1011 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1012 				 psp->fence_buf_mc_addr);
1013 	if (ret) {
1014 		release_psp_cmd_buf(psp);
1015 		return ret;
1016 	}
1017 
1018 	status = cmd->resp.status;
1019 	if (status == PSP_ERR_UNKNOWN_COMMAND) {
1020 		release_psp_cmd_buf(psp);
1021 		*addr = 0;
1022 		*size = 0;
1023 		return 0;
1024 	}
1025 
1026 	*addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 |
1027 		cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo;
1028 	*size = cmd->resp.uresp.fw_reserve_info.reserve_size;
1029 
1030 	release_psp_cmd_buf(psp);
1031 
1032 	return 0;
1033 }
1034 
1035 int psp_update_fw_reservation(struct psp_context *psp)
1036 {
1037 	int ret;
1038 	uint64_t reserv_addr, reserv_addr_ext;
1039 	uint32_t reserv_size, reserv_size_ext;
1040 	struct amdgpu_device *adev = psp->adev;
1041 
1042 	if (amdgpu_sriov_vf(psp->adev))
1043 		return 0;
1044 
1045 	if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 2)) &&
1046 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 3)))
1047 		return 0;
1048 
1049 	ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
1050 	if (ret)
1051 		return ret;
1052 	ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
1053 	if (ret)
1054 		return ret;
1055 
1056 	if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
1057 		dev_warn(adev->dev, "reserve fw region is not valid!\n");
1058 		return 0;
1059 	}
1060 
1061 	amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
1062 
1063 	reserv_size = roundup(reserv_size, SZ_1M);
1064 
1065 	ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL);
1066 	if (ret) {
1067 		dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret);
1068 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
1069 		return ret;
1070 	}
1071 
1072 	reserv_size_ext = roundup(reserv_size_ext, SZ_1M);
1073 
1074 	ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext,
1075 					 &adev->mman.fw_reserved_memory_extend, NULL);
1076 	if (ret) {
1077 		dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret);
1078 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL);
1079 		return ret;
1080 	}
1081 
1082 	return 0;
1083 }
1084 
1085 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
1086 {
1087 	struct psp_context *psp = &adev->psp;
1088 	struct psp_gfx_cmd_resp *cmd;
1089 	int ret;
1090 
1091 	if (amdgpu_sriov_vf(adev))
1092 		return 0;
1093 
1094 	cmd = acquire_psp_cmd_buf(psp);
1095 
1096 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1097 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
1098 
1099 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1100 	if (!ret) {
1101 		*boot_cfg =
1102 			(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
1103 	}
1104 
1105 	release_psp_cmd_buf(psp);
1106 
1107 	return ret;
1108 }
1109 
1110 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
1111 {
1112 	int ret;
1113 	struct psp_context *psp = &adev->psp;
1114 	struct psp_gfx_cmd_resp *cmd;
1115 
1116 	if (amdgpu_sriov_vf(adev))
1117 		return 0;
1118 
1119 	cmd = acquire_psp_cmd_buf(psp);
1120 
1121 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1122 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1123 	cmd->cmd.boot_cfg.boot_config = boot_cfg;
1124 	cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1125 
1126 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1127 
1128 	release_psp_cmd_buf(psp);
1129 
1130 	return ret;
1131 }
1132 
1133 static int psp_rl_load(struct amdgpu_device *adev)
1134 {
1135 	int ret;
1136 	struct psp_context *psp = &adev->psp;
1137 	struct psp_gfx_cmd_resp *cmd;
1138 
1139 	if (!is_psp_fw_valid(psp->rl))
1140 		return 0;
1141 
1142 	cmd = acquire_psp_cmd_buf(psp);
1143 
1144 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1145 	memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1146 
1147 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1148 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1149 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1150 	cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1151 	cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1152 
1153 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1154 
1155 	release_psp_cmd_buf(psp);
1156 
1157 	return ret;
1158 }
1159 
1160 int psp_memory_partition(struct psp_context *psp, int mode)
1161 {
1162 	struct psp_gfx_cmd_resp *cmd;
1163 	int ret;
1164 
1165 	if (amdgpu_sriov_vf(psp->adev))
1166 		return 0;
1167 
1168 	cmd = acquire_psp_cmd_buf(psp);
1169 
1170 	cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
1171 	cmd->cmd.cmd_memory_part.mode = mode;
1172 
1173 	dev_info(psp->adev->dev,
1174 		 "Requesting %d memory partition change through PSP", mode);
1175 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1176 	if (ret)
1177 		dev_err(psp->adev->dev,
1178 			"PSP request failed to change to NPS%d mode\n", mode);
1179 
1180 	release_psp_cmd_buf(psp);
1181 
1182 	return ret;
1183 }
1184 
1185 int psp_spatial_partition(struct psp_context *psp, int mode)
1186 {
1187 	struct psp_gfx_cmd_resp *cmd;
1188 	int ret;
1189 
1190 	if (amdgpu_sriov_vf(psp->adev))
1191 		return 0;
1192 
1193 	cmd = acquire_psp_cmd_buf(psp);
1194 
1195 	cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1196 	cmd->cmd.cmd_spatial_part.mode = mode;
1197 
1198 	dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1199 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1200 
1201 	release_psp_cmd_buf(psp);
1202 
1203 	return ret;
1204 }
1205 
1206 static int psp_asd_initialize(struct psp_context *psp)
1207 {
1208 	int ret;
1209 
1210 	/* If PSP version doesn't match ASD version, asd loading will be failed.
1211 	 * add workaround to bypass it for sriov now.
1212 	 * TODO: add version check to make it common
1213 	 */
1214 	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1215 		return 0;
1216 
1217 	/* bypass asd if display hardware is not available */
1218 	if (!amdgpu_device_has_display_hardware(psp->adev) &&
1219 	    amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1220 		return 0;
1221 
1222 	psp->asd_context.mem_context.shared_mc_addr  = 0;
1223 	psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1224 	psp->asd_context.ta_load_type                = GFX_CMD_ID_LOAD_ASD;
1225 
1226 	ret = psp_ta_load(psp, &psp->asd_context);
1227 	if (!ret)
1228 		psp->asd_context.initialized = true;
1229 
1230 	return ret;
1231 }
1232 
1233 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1234 				       uint32_t session_id)
1235 {
1236 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1237 	cmd->cmd.cmd_unload_ta.session_id = session_id;
1238 }
1239 
1240 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1241 {
1242 	int ret;
1243 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1244 
1245 	psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1246 
1247 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1248 
1249 	context->resp_status = cmd->resp.status;
1250 
1251 	release_psp_cmd_buf(psp);
1252 
1253 	return ret;
1254 }
1255 
1256 static int psp_asd_terminate(struct psp_context *psp)
1257 {
1258 	int ret;
1259 
1260 	if (amdgpu_sriov_vf(psp->adev))
1261 		return 0;
1262 
1263 	if (!psp->asd_context.initialized)
1264 		return 0;
1265 
1266 	ret = psp_ta_unload(psp, &psp->asd_context);
1267 	if (!ret)
1268 		psp->asd_context.initialized = false;
1269 
1270 	return ret;
1271 }
1272 
1273 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1274 		uint32_t id, uint32_t value)
1275 {
1276 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1277 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1278 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1279 }
1280 
1281 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1282 		uint32_t value)
1283 {
1284 	struct psp_gfx_cmd_resp *cmd;
1285 	int ret = 0;
1286 
1287 	if (reg >= PSP_REG_LAST)
1288 		return -EINVAL;
1289 
1290 	cmd = acquire_psp_cmd_buf(psp);
1291 
1292 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1293 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1294 	if (ret)
1295 		dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1296 
1297 	release_psp_cmd_buf(psp);
1298 
1299 	return ret;
1300 }
1301 
1302 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1303 				     uint64_t ta_bin_mc,
1304 				     struct ta_context *context)
1305 {
1306 	cmd->cmd_id				= context->ta_load_type;
1307 	cmd->cmd.cmd_load_ta.app_phy_addr_lo	= lower_32_bits(ta_bin_mc);
1308 	cmd->cmd.cmd_load_ta.app_phy_addr_hi	= upper_32_bits(ta_bin_mc);
1309 	cmd->cmd.cmd_load_ta.app_len		= context->bin_desc.size_bytes;
1310 
1311 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1312 		lower_32_bits(context->mem_context.shared_mc_addr);
1313 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1314 		upper_32_bits(context->mem_context.shared_mc_addr);
1315 	cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1316 }
1317 
1318 int psp_ta_init_shared_buf(struct psp_context *psp,
1319 				  struct ta_mem_context *mem_ctx)
1320 {
1321 	/*
1322 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1323 	 * physical) for ta to host memory
1324 	 */
1325 	return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1326 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1327 				      AMDGPU_GEM_DOMAIN_GTT,
1328 				      &mem_ctx->shared_bo,
1329 				      &mem_ctx->shared_mc_addr,
1330 				      &mem_ctx->shared_buf);
1331 }
1332 
1333 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1334 				       uint32_t ta_cmd_id,
1335 				       uint32_t session_id)
1336 {
1337 	cmd->cmd_id				= GFX_CMD_ID_INVOKE_CMD;
1338 	cmd->cmd.cmd_invoke_cmd.session_id	= session_id;
1339 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id	= ta_cmd_id;
1340 }
1341 
1342 int psp_ta_invoke(struct psp_context *psp,
1343 		  uint32_t ta_cmd_id,
1344 		  struct ta_context *context)
1345 {
1346 	int ret;
1347 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1348 
1349 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1350 
1351 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1352 				 psp->fence_buf_mc_addr);
1353 
1354 	context->resp_status = cmd->resp.status;
1355 
1356 	release_psp_cmd_buf(psp);
1357 
1358 	return ret;
1359 }
1360 
1361 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1362 {
1363 	int ret;
1364 	struct psp_gfx_cmd_resp *cmd;
1365 
1366 	cmd = acquire_psp_cmd_buf(psp);
1367 
1368 	psp_copy_fw(psp, context->bin_desc.start_addr,
1369 		    context->bin_desc.size_bytes);
1370 
1371 	if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
1372 		context->mem_context.shared_bo)
1373 		context->mem_context.shared_mc_addr =
1374 			amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
1375 
1376 	psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1377 
1378 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1379 				 psp->fence_buf_mc_addr);
1380 
1381 	context->resp_status = cmd->resp.status;
1382 
1383 	if (!ret)
1384 		context->session_id = cmd->resp.session_id;
1385 
1386 	release_psp_cmd_buf(psp);
1387 
1388 	return ret;
1389 }
1390 
1391 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1392 {
1393 	return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1394 }
1395 
1396 int psp_xgmi_terminate(struct psp_context *psp)
1397 {
1398 	int ret;
1399 	struct amdgpu_device *adev = psp->adev;
1400 
1401 	/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1402 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1403 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1404 	     adev->gmc.xgmi.connected_to_cpu))
1405 		return 0;
1406 
1407 	if (!psp->xgmi_context.context.initialized)
1408 		return 0;
1409 
1410 	ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1411 
1412 	psp->xgmi_context.context.initialized = false;
1413 
1414 	return ret;
1415 }
1416 
1417 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1418 {
1419 	struct ta_xgmi_shared_memory *xgmi_cmd;
1420 	int ret;
1421 
1422 	if (!psp->ta_fw ||
1423 	    !psp->xgmi_context.context.bin_desc.size_bytes ||
1424 	    !psp->xgmi_context.context.bin_desc.start_addr)
1425 		return -ENOENT;
1426 
1427 	if (!load_ta)
1428 		goto invoke;
1429 
1430 	psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1431 	psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1432 
1433 	if (!psp->xgmi_context.context.mem_context.shared_buf) {
1434 		ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1435 		if (ret)
1436 			return ret;
1437 	}
1438 
1439 	/* Load XGMI TA */
1440 	ret = psp_ta_load(psp, &psp->xgmi_context.context);
1441 	if (!ret)
1442 		psp->xgmi_context.context.initialized = true;
1443 	else
1444 		return ret;
1445 
1446 invoke:
1447 	/* Initialize XGMI session */
1448 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1449 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1450 	xgmi_cmd->flag_extend_link_record = set_extended_data;
1451 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1452 
1453 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1454 	/* note down the capbility flag for XGMI TA */
1455 	psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1456 
1457 	return ret;
1458 }
1459 
1460 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1461 {
1462 	struct ta_xgmi_shared_memory *xgmi_cmd;
1463 	int ret;
1464 
1465 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1466 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1467 
1468 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1469 
1470 	/* Invoke xgmi ta to get hive id */
1471 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1472 	if (ret)
1473 		return ret;
1474 
1475 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1476 
1477 	return 0;
1478 }
1479 
1480 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1481 {
1482 	struct ta_xgmi_shared_memory *xgmi_cmd;
1483 	int ret;
1484 
1485 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1486 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1487 
1488 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1489 
1490 	/* Invoke xgmi ta to get the node id */
1491 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1492 	if (ret)
1493 		return ret;
1494 
1495 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1496 
1497 	return 0;
1498 }
1499 
1500 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1501 {
1502 	return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1503 			IP_VERSION(13, 0, 2) &&
1504 		psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1505 	       amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1506 		       IP_VERSION(13, 0, 6);
1507 }
1508 
1509 /*
1510  * Chips that support extended topology information require the driver to
1511  * reflect topology information in the opposite direction.  This is
1512  * because the TA has already exceeded its link record limit and if the
1513  * TA holds bi-directional information, the driver would have to do
1514  * multiple fetches instead of just two.
1515  */
1516 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1517 					struct psp_xgmi_node_info node_info)
1518 {
1519 	struct amdgpu_device *mirror_adev;
1520 	struct amdgpu_hive_info *hive;
1521 	uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1522 	uint64_t dst_node_id = node_info.node_id;
1523 	uint8_t dst_num_hops = node_info.num_hops;
1524 	uint8_t dst_num_links = node_info.num_links;
1525 
1526 	hive = amdgpu_get_xgmi_hive(psp->adev);
1527 	if (WARN_ON(!hive))
1528 		return;
1529 
1530 	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1531 		struct psp_xgmi_topology_info *mirror_top_info;
1532 		int j;
1533 
1534 		if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1535 			continue;
1536 
1537 		mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1538 		for (j = 0; j < mirror_top_info->num_nodes; j++) {
1539 			if (mirror_top_info->nodes[j].node_id != src_node_id)
1540 				continue;
1541 
1542 			mirror_top_info->nodes[j].num_hops = dst_num_hops;
1543 			/*
1544 			 * prevent 0 num_links value re-reflection since reflection
1545 			 * criteria is based on num_hops (direct or indirect).
1546 			 *
1547 			 */
1548 			if (dst_num_links)
1549 				mirror_top_info->nodes[j].num_links = dst_num_links;
1550 
1551 			break;
1552 		}
1553 
1554 		break;
1555 	}
1556 
1557 	amdgpu_put_xgmi_hive(hive);
1558 }
1559 
1560 int psp_xgmi_get_topology_info(struct psp_context *psp,
1561 			       int number_devices,
1562 			       struct psp_xgmi_topology_info *topology,
1563 			       bool get_extended_data)
1564 {
1565 	struct ta_xgmi_shared_memory *xgmi_cmd;
1566 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1567 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1568 	int i;
1569 	int ret;
1570 
1571 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1572 		return -EINVAL;
1573 
1574 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1575 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1576 	xgmi_cmd->flag_extend_link_record = get_extended_data;
1577 
1578 	/* Fill in the shared memory with topology information as input */
1579 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1580 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1581 	topology_info_input->num_nodes = number_devices;
1582 
1583 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1584 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1585 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1586 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1587 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1588 	}
1589 
1590 	/* Invoke xgmi ta to get the topology information */
1591 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1592 	if (ret)
1593 		return ret;
1594 
1595 	/* Read the output topology information from the shared memory */
1596 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1597 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1598 	for (i = 0; i < topology->num_nodes; i++) {
1599 		/* extended data will either be 0 or equal to non-extended data */
1600 		if (topology_info_output->nodes[i].num_hops)
1601 			topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1602 
1603 		/* non-extended data gets everything here so no need to update */
1604 		if (!get_extended_data) {
1605 			topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1606 			topology->nodes[i].is_sharing_enabled =
1607 					topology_info_output->nodes[i].is_sharing_enabled;
1608 			topology->nodes[i].sdma_engine =
1609 					topology_info_output->nodes[i].sdma_engine;
1610 		}
1611 
1612 	}
1613 
1614 	/* Invoke xgmi ta again to get the link information */
1615 	if (psp_xgmi_peer_link_info_supported(psp)) {
1616 		struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1617 		struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1618 		bool requires_reflection =
1619 			(psp->xgmi_context.supports_extended_data &&
1620 			 get_extended_data) ||
1621 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1622 				IP_VERSION(13, 0, 6) ||
1623 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1624 				IP_VERSION(13, 0, 14);
1625 		bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1626 				psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1627 
1628 		/* popluate the shared output buffer rather than the cmd input buffer
1629 		 * with node_ids as the input for GET_PEER_LINKS command execution.
1630 		 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1631 		 * The same requirement for GET_EXTEND_PEER_LINKS command.
1632 		 */
1633 		if (ta_port_num_support) {
1634 			link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1635 
1636 			for (i = 0; i < topology->num_nodes; i++)
1637 				link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1638 
1639 			link_extend_info_output->num_nodes = topology->num_nodes;
1640 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1641 		} else {
1642 			link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1643 
1644 			for (i = 0; i < topology->num_nodes; i++)
1645 				link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1646 
1647 			link_info_output->num_nodes = topology->num_nodes;
1648 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1649 		}
1650 
1651 		ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1652 		if (ret)
1653 			return ret;
1654 
1655 		for (i = 0; i < topology->num_nodes; i++) {
1656 			uint8_t node_num_links = ta_port_num_support ?
1657 				link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1658 			/* accumulate num_links on extended data */
1659 			if (get_extended_data) {
1660 				topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1661 			} else {
1662 				topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1663 								topology->nodes[i].num_links : node_num_links;
1664 			}
1665 			/* popluate the connected port num info if supported and available */
1666 			if (ta_port_num_support && topology->nodes[i].num_links) {
1667 				memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1668 				       sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1669 			}
1670 
1671 			/* reflect the topology information for bi-directionality */
1672 			if (requires_reflection && topology->nodes[i].num_hops)
1673 				psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1674 		}
1675 	}
1676 
1677 	return 0;
1678 }
1679 
1680 int psp_xgmi_set_topology_info(struct psp_context *psp,
1681 			       int number_devices,
1682 			       struct psp_xgmi_topology_info *topology)
1683 {
1684 	struct ta_xgmi_shared_memory *xgmi_cmd;
1685 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1686 	int i;
1687 
1688 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1689 		return -EINVAL;
1690 
1691 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1692 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1693 
1694 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1695 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1696 	topology_info_input->num_nodes = number_devices;
1697 
1698 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1699 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1700 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1701 		topology_info_input->nodes[i].is_sharing_enabled = 1;
1702 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1703 	}
1704 
1705 	/* Invoke xgmi ta to set topology information */
1706 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1707 }
1708 
1709 // ras begin
1710 static void psp_ras_ta_check_status(struct psp_context *psp)
1711 {
1712 	struct ta_ras_shared_memory *ras_cmd =
1713 		(struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1714 
1715 	switch (ras_cmd->ras_status) {
1716 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1717 		dev_warn(psp->adev->dev,
1718 			 "RAS WARNING: cmd failed due to unsupported ip\n");
1719 		break;
1720 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1721 		dev_warn(psp->adev->dev,
1722 			 "RAS WARNING: cmd failed due to unsupported error injection\n");
1723 		break;
1724 	case TA_RAS_STATUS__SUCCESS:
1725 		break;
1726 	case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1727 		if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1728 			dev_warn(psp->adev->dev,
1729 				 "RAS WARNING: Inject error to critical region is not allowed\n");
1730 		break;
1731 	default:
1732 		dev_warn(psp->adev->dev,
1733 			 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1734 		break;
1735 	}
1736 }
1737 
1738 static int psp_ras_send_cmd(struct psp_context *psp,
1739 		enum ras_command cmd_id, void *in, void *out)
1740 {
1741 	struct ta_ras_shared_memory *ras_cmd;
1742 	uint32_t cmd = cmd_id;
1743 	int ret = 0;
1744 
1745 	if (!in)
1746 		return -EINVAL;
1747 
1748 	mutex_lock(&psp->ras_context.mutex);
1749 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1750 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1751 
1752 	switch (cmd) {
1753 	case TA_RAS_COMMAND__ENABLE_FEATURES:
1754 	case TA_RAS_COMMAND__DISABLE_FEATURES:
1755 		memcpy(&ras_cmd->ras_in_message,
1756 			in, sizeof(ras_cmd->ras_in_message));
1757 		break;
1758 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1759 		memcpy(&ras_cmd->ras_in_message.trigger_error,
1760 			in, sizeof(ras_cmd->ras_in_message.trigger_error));
1761 		break;
1762 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1763 		memcpy(&ras_cmd->ras_in_message.address,
1764 			in, sizeof(ras_cmd->ras_in_message.address));
1765 		break;
1766 	default:
1767 		dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1768 		ret = -EINVAL;
1769 		goto err_out;
1770 	}
1771 
1772 	ras_cmd->cmd_id = cmd;
1773 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1774 
1775 	switch (cmd) {
1776 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1777 		if (!ret && out)
1778 			memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1779 		break;
1780 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1781 		if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1782 			ret = -EINVAL;
1783 		else if (out)
1784 			memcpy(out,
1785 				&ras_cmd->ras_out_message.address,
1786 				sizeof(ras_cmd->ras_out_message.address));
1787 		break;
1788 	default:
1789 		break;
1790 	}
1791 
1792 err_out:
1793 	mutex_unlock(&psp->ras_context.mutex);
1794 
1795 	return ret;
1796 }
1797 
1798 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1799 {
1800 	struct ta_ras_shared_memory *ras_cmd;
1801 	int ret;
1802 
1803 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1804 
1805 	/*
1806 	 * TODO: bypass the loading in sriov for now
1807 	 */
1808 	if (amdgpu_sriov_vf(psp->adev))
1809 		return 0;
1810 
1811 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1812 
1813 	if (amdgpu_ras_intr_triggered())
1814 		return ret;
1815 
1816 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1817 		dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1818 		return -EINVAL;
1819 	}
1820 
1821 	if (!ret) {
1822 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1823 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1824 
1825 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1826 		} else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1827 			dev_warn(psp->adev->dev,
1828 				 "RAS internal register access blocked\n");
1829 
1830 		psp_ras_ta_check_status(psp);
1831 	}
1832 
1833 	return ret;
1834 }
1835 
1836 int psp_ras_enable_features(struct psp_context *psp,
1837 		union ta_ras_cmd_input *info, bool enable)
1838 {
1839 	enum ras_command cmd_id;
1840 	int ret;
1841 
1842 	if (!psp->ras_context.context.initialized || !info)
1843 		return -EINVAL;
1844 
1845 	cmd_id = enable ?
1846 		TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1847 	ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
1848 	if (ret)
1849 		return -EINVAL;
1850 
1851 	return 0;
1852 }
1853 
1854 int psp_ras_terminate(struct psp_context *psp)
1855 {
1856 	int ret;
1857 
1858 	/*
1859 	 * TODO: bypass the terminate in sriov for now
1860 	 */
1861 	if (amdgpu_sriov_vf(psp->adev))
1862 		return 0;
1863 
1864 	if (!psp->ras_context.context.initialized)
1865 		return 0;
1866 
1867 	ret = psp_ta_unload(psp, &psp->ras_context.context);
1868 
1869 	psp->ras_context.context.initialized = false;
1870 
1871 	mutex_destroy(&psp->ras_context.mutex);
1872 
1873 	return ret;
1874 }
1875 
1876 int psp_ras_initialize(struct psp_context *psp)
1877 {
1878 	int ret;
1879 	uint32_t boot_cfg = 0xFF;
1880 	struct amdgpu_device *adev = psp->adev;
1881 	struct ta_ras_shared_memory *ras_cmd;
1882 
1883 	/*
1884 	 * TODO: bypass the initialize in sriov for now
1885 	 */
1886 	if (amdgpu_sriov_vf(adev))
1887 		return 0;
1888 
1889 	if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1890 	    !adev->psp.ras_context.context.bin_desc.start_addr) {
1891 		dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1892 		return 0;
1893 	}
1894 
1895 	if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1896 		/* query GECC enablement status from boot config
1897 		 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1898 		 */
1899 		ret = psp_boot_config_get(adev, &boot_cfg);
1900 		if (ret)
1901 			dev_warn(adev->dev, "PSP get boot config failed\n");
1902 
1903 		if (boot_cfg == 1 && !adev->ras_default_ecc_enabled &&
1904 		    amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1905 			dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n");
1906 			dev_warn(adev->dev,
1907 				"To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n");
1908 		} else {
1909 			if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) &&
1910 				amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1911 				if (boot_cfg == 1) {
1912 					dev_info(adev->dev, "GECC is enabled\n");
1913 				} else {
1914 					/* enable GECC in next boot cycle if it is disabled
1915 					 * in boot config, or force enable GECC if failed to
1916 					 * get boot configuration
1917 					 */
1918 					ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1919 					if (ret)
1920 						dev_warn(adev->dev, "PSP set boot config failed\n");
1921 					else
1922 						dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1923 				}
1924 			} else {
1925 				if (!boot_cfg) {
1926 					if (!adev->ras_default_ecc_enabled &&
1927 					    amdgpu_ras_enable != 1 &&
1928 					    amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
1929 						dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n");
1930 					else
1931 						dev_info(adev->dev, "GECC is disabled\n");
1932 				} else {
1933 					/* disable GECC in next boot cycle if ras is
1934 					 * disabled by module parameter amdgpu_ras_enable
1935 					 * and/or amdgpu_ras_mask, or boot_config_get call
1936 					 * is failed
1937 					 */
1938 					ret = psp_boot_config_set(adev, 0);
1939 					if (ret)
1940 						dev_warn(adev->dev, "PSP set boot config failed\n");
1941 					else
1942 						dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1943 				}
1944 			}
1945 		}
1946 	}
1947 
1948 	psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1949 	psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1950 
1951 	if (!psp->ras_context.context.mem_context.shared_buf) {
1952 		ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1953 		if (ret)
1954 			return ret;
1955 	}
1956 
1957 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1958 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1959 
1960 	if (amdgpu_ras_is_poison_mode_supported(adev))
1961 		ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1962 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1963 		ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1964 	ras_cmd->ras_in_message.init_flags.xcc_mask =
1965 		adev->gfx.xcc_mask;
1966 	ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1967 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
1968 		ras_cmd->ras_in_message.init_flags.nps_mode =
1969 			adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1970 	ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask;
1971 
1972 	ret = psp_ta_load(psp, &psp->ras_context.context);
1973 
1974 	if (!ret && !ras_cmd->ras_status) {
1975 		psp->ras_context.context.initialized = true;
1976 		mutex_init(&psp->ras_context.mutex);
1977 	} else {
1978 		if (ras_cmd->ras_status)
1979 			dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1980 
1981 		/* fail to load RAS TA */
1982 		psp->ras_context.context.initialized = false;
1983 	}
1984 
1985 	return ret;
1986 }
1987 
1988 int psp_ras_trigger_error(struct psp_context *psp,
1989 			  struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1990 {
1991 	struct amdgpu_device *adev = psp->adev;
1992 	int ret;
1993 	uint32_t dev_mask;
1994 	uint32_t ras_status = 0;
1995 
1996 	if (!psp->ras_context.context.initialized || !info)
1997 		return -EINVAL;
1998 
1999 	switch (info->block_id) {
2000 	case TA_RAS_BLOCK__GFX:
2001 		dev_mask = GET_MASK(GC, instance_mask);
2002 		break;
2003 	case TA_RAS_BLOCK__SDMA:
2004 		dev_mask = GET_MASK(SDMA0, instance_mask);
2005 		break;
2006 	case TA_RAS_BLOCK__VCN:
2007 	case TA_RAS_BLOCK__JPEG:
2008 		dev_mask = GET_MASK(VCN, instance_mask);
2009 		break;
2010 	default:
2011 		dev_mask = instance_mask;
2012 		break;
2013 	}
2014 
2015 	/* reuse sub_block_index for backward compatibility */
2016 	dev_mask <<= AMDGPU_RAS_INST_SHIFT;
2017 	dev_mask &= AMDGPU_RAS_INST_MASK;
2018 	info->sub_block_index |= dev_mask;
2019 
2020 	ret = psp_ras_send_cmd(psp,
2021 			TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
2022 	if (ret)
2023 		return -EINVAL;
2024 
2025 	/* If err_event_athub occurs error inject was successful, however
2026 	 *  return status from TA is no long reliable
2027 	 */
2028 	if (amdgpu_ras_intr_triggered())
2029 		return 0;
2030 
2031 	if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
2032 		return -EACCES;
2033 	else if (ras_status)
2034 		return -EINVAL;
2035 
2036 	return 0;
2037 }
2038 
2039 int psp_ras_query_address(struct psp_context *psp,
2040 			  struct ta_ras_query_address_input *addr_in,
2041 			  struct ta_ras_query_address_output *addr_out)
2042 {
2043 	int ret;
2044 
2045 	if (!psp->ras_context.context.initialized ||
2046 		!addr_in || !addr_out)
2047 		return -EINVAL;
2048 
2049 	ret = psp_ras_send_cmd(psp,
2050 			TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
2051 
2052 	return ret;
2053 }
2054 // ras end
2055 
2056 // HDCP start
2057 static int psp_hdcp_initialize(struct psp_context *psp)
2058 {
2059 	int ret;
2060 
2061 	/*
2062 	 * TODO: bypass the initialize in sriov for now
2063 	 */
2064 	if (amdgpu_sriov_vf(psp->adev))
2065 		return 0;
2066 
2067 	/* bypass hdcp initialization if dmu is harvested */
2068 	if (!amdgpu_device_has_display_hardware(psp->adev))
2069 		return 0;
2070 
2071 	if (!psp->hdcp_context.context.bin_desc.size_bytes ||
2072 	    !psp->hdcp_context.context.bin_desc.start_addr) {
2073 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
2074 		return 0;
2075 	}
2076 
2077 	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
2078 	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2079 
2080 	if (!psp->hdcp_context.context.mem_context.shared_buf) {
2081 		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
2082 		if (ret)
2083 			return ret;
2084 	}
2085 
2086 	ret = psp_ta_load(psp, &psp->hdcp_context.context);
2087 	if (!ret) {
2088 		psp->hdcp_context.context.initialized = true;
2089 		mutex_init(&psp->hdcp_context.mutex);
2090 	}
2091 
2092 	return ret;
2093 }
2094 
2095 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2096 {
2097 	/*
2098 	 * TODO: bypass the loading in sriov for now
2099 	 */
2100 	if (amdgpu_sriov_vf(psp->adev))
2101 		return 0;
2102 
2103 	if (!psp->hdcp_context.context.initialized)
2104 		return 0;
2105 
2106 	return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
2107 }
2108 
2109 static int psp_hdcp_terminate(struct psp_context *psp)
2110 {
2111 	int ret;
2112 
2113 	/*
2114 	 * TODO: bypass the terminate in sriov for now
2115 	 */
2116 	if (amdgpu_sriov_vf(psp->adev))
2117 		return 0;
2118 
2119 	if (!psp->hdcp_context.context.initialized)
2120 		return 0;
2121 
2122 	ret = psp_ta_unload(psp, &psp->hdcp_context.context);
2123 
2124 	psp->hdcp_context.context.initialized = false;
2125 
2126 	return ret;
2127 }
2128 // HDCP end
2129 
2130 // DTM start
2131 static int psp_dtm_initialize(struct psp_context *psp)
2132 {
2133 	int ret;
2134 
2135 	/*
2136 	 * TODO: bypass the initialize in sriov for now
2137 	 */
2138 	if (amdgpu_sriov_vf(psp->adev))
2139 		return 0;
2140 
2141 	/* bypass dtm initialization if dmu is harvested */
2142 	if (!amdgpu_device_has_display_hardware(psp->adev))
2143 		return 0;
2144 
2145 	if (!psp->dtm_context.context.bin_desc.size_bytes ||
2146 	    !psp->dtm_context.context.bin_desc.start_addr) {
2147 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
2148 		return 0;
2149 	}
2150 
2151 	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
2152 	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2153 
2154 	if (!psp->dtm_context.context.mem_context.shared_buf) {
2155 		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
2156 		if (ret)
2157 			return ret;
2158 	}
2159 
2160 	ret = psp_ta_load(psp, &psp->dtm_context.context);
2161 	if (!ret) {
2162 		psp->dtm_context.context.initialized = true;
2163 		mutex_init(&psp->dtm_context.mutex);
2164 	}
2165 
2166 	return ret;
2167 }
2168 
2169 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2170 {
2171 	/*
2172 	 * TODO: bypass the loading in sriov for now
2173 	 */
2174 	if (amdgpu_sriov_vf(psp->adev))
2175 		return 0;
2176 
2177 	if (!psp->dtm_context.context.initialized)
2178 		return 0;
2179 
2180 	return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
2181 }
2182 
2183 static int psp_dtm_terminate(struct psp_context *psp)
2184 {
2185 	int ret;
2186 
2187 	/*
2188 	 * TODO: bypass the terminate in sriov for now
2189 	 */
2190 	if (amdgpu_sriov_vf(psp->adev))
2191 		return 0;
2192 
2193 	if (!psp->dtm_context.context.initialized)
2194 		return 0;
2195 
2196 	ret = psp_ta_unload(psp, &psp->dtm_context.context);
2197 
2198 	psp->dtm_context.context.initialized = false;
2199 
2200 	return ret;
2201 }
2202 // DTM end
2203 
2204 // RAP start
2205 static int psp_rap_initialize(struct psp_context *psp)
2206 {
2207 	int ret;
2208 	enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2209 
2210 	/*
2211 	 * TODO: bypass the initialize in sriov for now
2212 	 */
2213 	if (amdgpu_sriov_vf(psp->adev))
2214 		return 0;
2215 
2216 	if (!psp->rap_context.context.bin_desc.size_bytes ||
2217 	    !psp->rap_context.context.bin_desc.start_addr) {
2218 		dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2219 		return 0;
2220 	}
2221 
2222 	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2223 	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2224 
2225 	if (!psp->rap_context.context.mem_context.shared_buf) {
2226 		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2227 		if (ret)
2228 			return ret;
2229 	}
2230 
2231 	ret = psp_ta_load(psp, &psp->rap_context.context);
2232 	if (!ret) {
2233 		psp->rap_context.context.initialized = true;
2234 		mutex_init(&psp->rap_context.mutex);
2235 	} else
2236 		return ret;
2237 
2238 	ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2239 	if (ret || status != TA_RAP_STATUS__SUCCESS) {
2240 		psp_rap_terminate(psp);
2241 		/* free rap shared memory */
2242 		psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2243 
2244 		dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2245 			 ret, status);
2246 
2247 		return ret;
2248 	}
2249 
2250 	return 0;
2251 }
2252 
2253 static int psp_rap_terminate(struct psp_context *psp)
2254 {
2255 	int ret;
2256 
2257 	if (!psp->rap_context.context.initialized)
2258 		return 0;
2259 
2260 	ret = psp_ta_unload(psp, &psp->rap_context.context);
2261 
2262 	psp->rap_context.context.initialized = false;
2263 
2264 	return ret;
2265 }
2266 
2267 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2268 {
2269 	struct ta_rap_shared_memory *rap_cmd;
2270 	int ret = 0;
2271 
2272 	if (!psp->rap_context.context.initialized)
2273 		return 0;
2274 
2275 	if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2276 	    ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2277 		return -EINVAL;
2278 
2279 	mutex_lock(&psp->rap_context.mutex);
2280 
2281 	rap_cmd = (struct ta_rap_shared_memory *)
2282 		  psp->rap_context.context.mem_context.shared_buf;
2283 	memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2284 
2285 	rap_cmd->cmd_id = ta_cmd_id;
2286 	rap_cmd->validation_method_id = METHOD_A;
2287 
2288 	ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2289 	if (ret)
2290 		goto out_unlock;
2291 
2292 	if (status)
2293 		*status = rap_cmd->rap_status;
2294 
2295 out_unlock:
2296 	mutex_unlock(&psp->rap_context.mutex);
2297 
2298 	return ret;
2299 }
2300 // RAP end
2301 
2302 /* securedisplay start */
2303 static int psp_securedisplay_initialize(struct psp_context *psp)
2304 {
2305 	int ret;
2306 	struct ta_securedisplay_cmd *securedisplay_cmd;
2307 
2308 	/*
2309 	 * TODO: bypass the initialize in sriov for now
2310 	 */
2311 	if (amdgpu_sriov_vf(psp->adev))
2312 		return 0;
2313 
2314 	/* bypass securedisplay initialization if dmu is harvested */
2315 	if (!amdgpu_device_has_display_hardware(psp->adev))
2316 		return 0;
2317 
2318 	if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2319 	    !psp->securedisplay_context.context.bin_desc.start_addr) {
2320 		dev_info(psp->adev->dev,
2321 			 "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
2322 		return 0;
2323 	}
2324 
2325 	psp->securedisplay_context.context.mem_context.shared_mem_size =
2326 		PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2327 	psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2328 
2329 	if (!psp->securedisplay_context.context.initialized) {
2330 		ret = psp_ta_init_shared_buf(psp,
2331 					     &psp->securedisplay_context.context.mem_context);
2332 		if (ret)
2333 			return ret;
2334 	}
2335 
2336 	ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2337 	if (!ret) {
2338 		psp->securedisplay_context.context.initialized = true;
2339 		mutex_init(&psp->securedisplay_context.mutex);
2340 	} else
2341 		return ret;
2342 
2343 	mutex_lock(&psp->securedisplay_context.mutex);
2344 
2345 	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2346 			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2347 
2348 	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2349 
2350 	mutex_unlock(&psp->securedisplay_context.mutex);
2351 
2352 	if (ret) {
2353 		psp_securedisplay_terminate(psp);
2354 		/* free securedisplay shared memory */
2355 		psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2356 		dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2357 		return -EINVAL;
2358 	}
2359 
2360 	if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2361 		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2362 		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2363 			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2364 		/* don't try again */
2365 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2366 	}
2367 
2368 	return 0;
2369 }
2370 
2371 static int psp_securedisplay_terminate(struct psp_context *psp)
2372 {
2373 	int ret;
2374 
2375 	/*
2376 	 * TODO:bypass the terminate in sriov for now
2377 	 */
2378 	if (amdgpu_sriov_vf(psp->adev))
2379 		return 0;
2380 
2381 	if (!psp->securedisplay_context.context.initialized)
2382 		return 0;
2383 
2384 	ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2385 
2386 	psp->securedisplay_context.context.initialized = false;
2387 
2388 	return ret;
2389 }
2390 
2391 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2392 {
2393 	int ret;
2394 
2395 	if (!psp->securedisplay_context.context.initialized)
2396 		return -EINVAL;
2397 
2398 	if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2399 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
2400 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
2401 		return -EINVAL;
2402 
2403 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2404 
2405 	return ret;
2406 }
2407 /* SECUREDISPLAY end */
2408 
2409 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2410 {
2411 	struct psp_context *psp = &adev->psp;
2412 	int ret = 0;
2413 
2414 	if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2415 		ret = psp->funcs->wait_for_bootloader(psp);
2416 
2417 	return ret;
2418 }
2419 
2420 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2421 {
2422 	if (psp->funcs &&
2423 	    psp->funcs->get_ras_capability) {
2424 		return psp->funcs->get_ras_capability(psp);
2425 	} else {
2426 		return false;
2427 	}
2428 }
2429 
2430 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
2431 {
2432 	struct psp_context *psp = &adev->psp;
2433 
2434 	if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
2435 		return false;
2436 
2437 	if (psp->funcs && psp->funcs->is_reload_needed)
2438 		return psp->funcs->is_reload_needed(psp);
2439 
2440 	return false;
2441 }
2442 
2443 static void psp_update_gpu_addresses(struct amdgpu_device *adev)
2444 {
2445 	struct psp_context *psp = &adev->psp;
2446 
2447 	if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
2448 		psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
2449 		psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
2450 		psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
2451 	}
2452 	if (adev->firmware.rbuf && psp->km_ring.ring_mem)
2453 		psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
2454 }
2455 
2456 static int psp_hw_start(struct psp_context *psp)
2457 {
2458 	struct amdgpu_device *adev = psp->adev;
2459 	int ret;
2460 
2461 	if (amdgpu_virt_xgmi_migrate_enabled(adev))
2462 		psp_update_gpu_addresses(adev);
2463 
2464 	if (!amdgpu_sriov_vf(adev)) {
2465 		if ((is_psp_fw_valid(psp->kdb)) &&
2466 		    (psp->funcs->bootloader_load_kdb != NULL)) {
2467 			ret = psp_bootloader_load_kdb(psp);
2468 			if (ret) {
2469 				dev_err(adev->dev, "PSP load kdb failed!\n");
2470 				return ret;
2471 			}
2472 		}
2473 
2474 		if ((is_psp_fw_valid(psp->spl)) &&
2475 		    (psp->funcs->bootloader_load_spl != NULL)) {
2476 			ret = psp_bootloader_load_spl(psp);
2477 			if (ret) {
2478 				dev_err(adev->dev, "PSP load spl failed!\n");
2479 				return ret;
2480 			}
2481 		}
2482 
2483 		if ((is_psp_fw_valid(psp->sys)) &&
2484 		    (psp->funcs->bootloader_load_sysdrv != NULL)) {
2485 			ret = psp_bootloader_load_sysdrv(psp);
2486 			if (ret) {
2487 				dev_err(adev->dev, "PSP load sys drv failed!\n");
2488 				return ret;
2489 			}
2490 		}
2491 
2492 		if ((is_psp_fw_valid(psp->soc_drv)) &&
2493 		    (psp->funcs->bootloader_load_soc_drv != NULL)) {
2494 			ret = psp_bootloader_load_soc_drv(psp);
2495 			if (ret) {
2496 				dev_err(adev->dev, "PSP load soc drv failed!\n");
2497 				return ret;
2498 			}
2499 		}
2500 
2501 		if ((is_psp_fw_valid(psp->intf_drv)) &&
2502 		    (psp->funcs->bootloader_load_intf_drv != NULL)) {
2503 			ret = psp_bootloader_load_intf_drv(psp);
2504 			if (ret) {
2505 				dev_err(adev->dev, "PSP load intf drv failed!\n");
2506 				return ret;
2507 			}
2508 		}
2509 
2510 		if ((is_psp_fw_valid(psp->dbg_drv)) &&
2511 		    (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2512 			ret = psp_bootloader_load_dbg_drv(psp);
2513 			if (ret) {
2514 				dev_err(adev->dev, "PSP load dbg drv failed!\n");
2515 				return ret;
2516 			}
2517 		}
2518 
2519 		if ((is_psp_fw_valid(psp->ras_drv)) &&
2520 		    (psp->funcs->bootloader_load_ras_drv != NULL)) {
2521 			ret = psp_bootloader_load_ras_drv(psp);
2522 			if (ret) {
2523 				dev_err(adev->dev, "PSP load ras_drv failed!\n");
2524 				return ret;
2525 			}
2526 		}
2527 
2528 		if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2529 		    (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2530 			ret = psp_bootloader_load_ipkeymgr_drv(psp);
2531 			if (ret) {
2532 				dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2533 				return ret;
2534 			}
2535 		}
2536 
2537 		if ((is_psp_fw_valid(psp->spdm_drv)) &&
2538 		    (psp->funcs->bootloader_load_spdm_drv != NULL)) {
2539 			ret = psp_bootloader_load_spdm_drv(psp);
2540 			if (ret) {
2541 				dev_err(adev->dev, "PSP load spdm_drv failed!\n");
2542 				return ret;
2543 			}
2544 		}
2545 
2546 		if ((is_psp_fw_valid(psp->sos)) &&
2547 		    (psp->funcs->bootloader_load_sos != NULL)) {
2548 			ret = psp_bootloader_load_sos(psp);
2549 			if (ret) {
2550 				dev_err(adev->dev, "PSP load sos failed!\n");
2551 				return ret;
2552 			}
2553 		}
2554 	}
2555 
2556 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2557 	if (ret) {
2558 		dev_err(adev->dev, "PSP create ring failed!\n");
2559 		return ret;
2560 	}
2561 
2562 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2563 		ret = psp_update_fw_reservation(psp);
2564 		if (ret) {
2565 			dev_err(adev->dev, "update fw reservation failed!\n");
2566 			return ret;
2567 		}
2568 	}
2569 
2570 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2571 		goto skip_pin_bo;
2572 
2573 	if (!psp->boot_time_tmr || psp->autoload_supported) {
2574 		ret = psp_tmr_init(psp);
2575 		if (ret) {
2576 			dev_err(adev->dev, "PSP tmr init failed!\n");
2577 			return ret;
2578 		}
2579 	}
2580 
2581 skip_pin_bo:
2582 	/*
2583 	 * For ASICs with DF Cstate management centralized
2584 	 * to PMFW, TMR setup should be performed after PMFW
2585 	 * loaded and before other non-psp firmware loaded.
2586 	 */
2587 	if (psp->pmfw_centralized_cstate_management) {
2588 		ret = psp_load_smu_fw(psp);
2589 		if (ret)
2590 			return ret;
2591 	}
2592 
2593 	if (!psp->boot_time_tmr || !psp->autoload_supported) {
2594 		ret = psp_tmr_load(psp);
2595 		if (ret) {
2596 			dev_err(adev->dev, "PSP load tmr failed!\n");
2597 			return ret;
2598 		}
2599 	}
2600 
2601 	return 0;
2602 }
2603 
2604 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2605 			   enum psp_gfx_fw_type *type)
2606 {
2607 	switch (ucode->ucode_id) {
2608 	case AMDGPU_UCODE_ID_CAP:
2609 		*type = GFX_FW_TYPE_CAP;
2610 		break;
2611 	case AMDGPU_UCODE_ID_SDMA0:
2612 		*type = GFX_FW_TYPE_SDMA0;
2613 		break;
2614 	case AMDGPU_UCODE_ID_SDMA1:
2615 		*type = GFX_FW_TYPE_SDMA1;
2616 		break;
2617 	case AMDGPU_UCODE_ID_SDMA2:
2618 		*type = GFX_FW_TYPE_SDMA2;
2619 		break;
2620 	case AMDGPU_UCODE_ID_SDMA3:
2621 		*type = GFX_FW_TYPE_SDMA3;
2622 		break;
2623 	case AMDGPU_UCODE_ID_SDMA4:
2624 		*type = GFX_FW_TYPE_SDMA4;
2625 		break;
2626 	case AMDGPU_UCODE_ID_SDMA5:
2627 		*type = GFX_FW_TYPE_SDMA5;
2628 		break;
2629 	case AMDGPU_UCODE_ID_SDMA6:
2630 		*type = GFX_FW_TYPE_SDMA6;
2631 		break;
2632 	case AMDGPU_UCODE_ID_SDMA7:
2633 		*type = GFX_FW_TYPE_SDMA7;
2634 		break;
2635 	case AMDGPU_UCODE_ID_CP_MES:
2636 		*type = GFX_FW_TYPE_CP_MES;
2637 		break;
2638 	case AMDGPU_UCODE_ID_CP_MES_DATA:
2639 		*type = GFX_FW_TYPE_MES_STACK;
2640 		break;
2641 	case AMDGPU_UCODE_ID_CP_MES1:
2642 		*type = GFX_FW_TYPE_CP_MES_KIQ;
2643 		break;
2644 	case AMDGPU_UCODE_ID_CP_MES1_DATA:
2645 		*type = GFX_FW_TYPE_MES_KIQ_STACK;
2646 		break;
2647 	case AMDGPU_UCODE_ID_CP_CE:
2648 		*type = GFX_FW_TYPE_CP_CE;
2649 		break;
2650 	case AMDGPU_UCODE_ID_CP_PFP:
2651 		*type = GFX_FW_TYPE_CP_PFP;
2652 		break;
2653 	case AMDGPU_UCODE_ID_CP_ME:
2654 		*type = GFX_FW_TYPE_CP_ME;
2655 		break;
2656 	case AMDGPU_UCODE_ID_CP_MEC1:
2657 		*type = GFX_FW_TYPE_CP_MEC;
2658 		break;
2659 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
2660 		*type = GFX_FW_TYPE_CP_MEC_ME1;
2661 		break;
2662 	case AMDGPU_UCODE_ID_CP_MEC2:
2663 		*type = GFX_FW_TYPE_CP_MEC;
2664 		break;
2665 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
2666 		*type = GFX_FW_TYPE_CP_MEC_ME2;
2667 		break;
2668 	case AMDGPU_UCODE_ID_RLC_P:
2669 		*type = GFX_FW_TYPE_RLC_P;
2670 		break;
2671 	case AMDGPU_UCODE_ID_RLC_V:
2672 		*type = GFX_FW_TYPE_RLC_V;
2673 		break;
2674 	case AMDGPU_UCODE_ID_RLC_G:
2675 		*type = GFX_FW_TYPE_RLC_G;
2676 		break;
2677 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2678 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2679 		break;
2680 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2681 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2682 		break;
2683 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2684 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2685 		break;
2686 	case AMDGPU_UCODE_ID_RLC_IRAM:
2687 		*type = GFX_FW_TYPE_RLC_IRAM;
2688 		break;
2689 	case AMDGPU_UCODE_ID_RLC_DRAM:
2690 		*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2691 		break;
2692 	case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2693 		*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2694 		break;
2695 	case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2696 		*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2697 		break;
2698 	case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2699 		*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2700 		break;
2701 	case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2702 		*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2703 		break;
2704 	case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2705 		*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2706 		break;
2707 	case AMDGPU_UCODE_ID_SMC:
2708 		*type = GFX_FW_TYPE_SMU;
2709 		break;
2710 	case AMDGPU_UCODE_ID_PPTABLE:
2711 		*type = GFX_FW_TYPE_PPTABLE;
2712 		break;
2713 	case AMDGPU_UCODE_ID_UVD:
2714 		*type = GFX_FW_TYPE_UVD;
2715 		break;
2716 	case AMDGPU_UCODE_ID_UVD1:
2717 		*type = GFX_FW_TYPE_UVD1;
2718 		break;
2719 	case AMDGPU_UCODE_ID_VCE:
2720 		*type = GFX_FW_TYPE_VCE;
2721 		break;
2722 	case AMDGPU_UCODE_ID_VCN:
2723 		*type = GFX_FW_TYPE_VCN;
2724 		break;
2725 	case AMDGPU_UCODE_ID_VCN1:
2726 		*type = GFX_FW_TYPE_VCN1;
2727 		break;
2728 	case AMDGPU_UCODE_ID_DMCU_ERAM:
2729 		*type = GFX_FW_TYPE_DMCU_ERAM;
2730 		break;
2731 	case AMDGPU_UCODE_ID_DMCU_INTV:
2732 		*type = GFX_FW_TYPE_DMCU_ISR;
2733 		break;
2734 	case AMDGPU_UCODE_ID_VCN0_RAM:
2735 		*type = GFX_FW_TYPE_VCN0_RAM;
2736 		break;
2737 	case AMDGPU_UCODE_ID_VCN1_RAM:
2738 		*type = GFX_FW_TYPE_VCN1_RAM;
2739 		break;
2740 	case AMDGPU_UCODE_ID_DMCUB:
2741 		*type = GFX_FW_TYPE_DMUB;
2742 		break;
2743 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2744 	case AMDGPU_UCODE_ID_SDMA_RS64:
2745 		*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2746 		break;
2747 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2748 		*type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2749 		break;
2750 	case AMDGPU_UCODE_ID_IMU_I:
2751 		*type = GFX_FW_TYPE_IMU_I;
2752 		break;
2753 	case AMDGPU_UCODE_ID_IMU_D:
2754 		*type = GFX_FW_TYPE_IMU_D;
2755 		break;
2756 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
2757 		*type = GFX_FW_TYPE_RS64_PFP;
2758 		break;
2759 	case AMDGPU_UCODE_ID_CP_RS64_ME:
2760 		*type = GFX_FW_TYPE_RS64_ME;
2761 		break;
2762 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
2763 		*type = GFX_FW_TYPE_RS64_MEC;
2764 		break;
2765 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2766 		*type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2767 		break;
2768 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2769 		*type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2770 		break;
2771 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2772 		*type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2773 		break;
2774 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2775 		*type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2776 		break;
2777 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2778 		*type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2779 		break;
2780 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2781 		*type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2782 		break;
2783 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2784 		*type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2785 		break;
2786 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2787 		*type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2788 		break;
2789 	case AMDGPU_UCODE_ID_VPE_CTX:
2790 		*type = GFX_FW_TYPE_VPEC_FW1;
2791 		break;
2792 	case AMDGPU_UCODE_ID_VPE_CTL:
2793 		*type = GFX_FW_TYPE_VPEC_FW2;
2794 		break;
2795 	case AMDGPU_UCODE_ID_VPE:
2796 		*type = GFX_FW_TYPE_VPE;
2797 		break;
2798 	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2799 		*type = GFX_FW_TYPE_UMSCH_UCODE;
2800 		break;
2801 	case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2802 		*type = GFX_FW_TYPE_UMSCH_DATA;
2803 		break;
2804 	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2805 		*type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2806 		break;
2807 	case AMDGPU_UCODE_ID_P2S_TABLE:
2808 		*type = GFX_FW_TYPE_P2S_TABLE;
2809 		break;
2810 	case AMDGPU_UCODE_ID_JPEG_RAM:
2811 		*type = GFX_FW_TYPE_JPEG_RAM;
2812 		break;
2813 	case AMDGPU_UCODE_ID_ISP:
2814 		*type = GFX_FW_TYPE_ISP;
2815 		break;
2816 	case AMDGPU_UCODE_ID_MAXIMUM:
2817 	default:
2818 		return -EINVAL;
2819 	}
2820 
2821 	return 0;
2822 }
2823 
2824 static void psp_print_fw_hdr(struct psp_context *psp,
2825 			     struct amdgpu_firmware_info *ucode)
2826 {
2827 	struct amdgpu_device *adev = psp->adev;
2828 	struct common_firmware_header *hdr;
2829 
2830 	switch (ucode->ucode_id) {
2831 	case AMDGPU_UCODE_ID_SDMA0:
2832 	case AMDGPU_UCODE_ID_SDMA1:
2833 	case AMDGPU_UCODE_ID_SDMA2:
2834 	case AMDGPU_UCODE_ID_SDMA3:
2835 	case AMDGPU_UCODE_ID_SDMA4:
2836 	case AMDGPU_UCODE_ID_SDMA5:
2837 	case AMDGPU_UCODE_ID_SDMA6:
2838 	case AMDGPU_UCODE_ID_SDMA7:
2839 		hdr = (struct common_firmware_header *)
2840 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2841 		amdgpu_ucode_print_sdma_hdr(hdr);
2842 		break;
2843 	case AMDGPU_UCODE_ID_CP_CE:
2844 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2845 		amdgpu_ucode_print_gfx_hdr(hdr);
2846 		break;
2847 	case AMDGPU_UCODE_ID_CP_PFP:
2848 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2849 		amdgpu_ucode_print_gfx_hdr(hdr);
2850 		break;
2851 	case AMDGPU_UCODE_ID_CP_ME:
2852 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2853 		amdgpu_ucode_print_gfx_hdr(hdr);
2854 		break;
2855 	case AMDGPU_UCODE_ID_CP_MEC1:
2856 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2857 		amdgpu_ucode_print_gfx_hdr(hdr);
2858 		break;
2859 	case AMDGPU_UCODE_ID_RLC_G:
2860 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2861 		amdgpu_ucode_print_rlc_hdr(hdr);
2862 		break;
2863 	case AMDGPU_UCODE_ID_SMC:
2864 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
2865 		amdgpu_ucode_print_smc_hdr(hdr);
2866 		break;
2867 	default:
2868 		break;
2869 	}
2870 }
2871 
2872 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2873 				       struct amdgpu_firmware_info *ucode,
2874 				       struct psp_gfx_cmd_resp *cmd)
2875 {
2876 	int ret;
2877 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
2878 
2879 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2880 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2881 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2882 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2883 
2884 	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2885 	if (ret)
2886 		dev_err(psp->adev->dev, "Unknown firmware type\n");
2887 
2888 	return ret;
2889 }
2890 
2891 int psp_execute_ip_fw_load(struct psp_context *psp,
2892 			   struct amdgpu_firmware_info *ucode)
2893 {
2894 	int ret = 0;
2895 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2896 
2897 	ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2898 	if (!ret) {
2899 		ret = psp_cmd_submit_buf(psp, ucode, cmd,
2900 					 psp->fence_buf_mc_addr);
2901 	}
2902 
2903 	release_psp_cmd_buf(psp);
2904 
2905 	return ret;
2906 }
2907 
2908 static int psp_load_p2s_table(struct psp_context *psp)
2909 {
2910 	int ret;
2911 	struct amdgpu_device *adev = psp->adev;
2912 	struct amdgpu_firmware_info *ucode =
2913 		&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2914 
2915 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2916 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2917 		return 0;
2918 
2919 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2920 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2921 		uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2922 								0x0036003C;
2923 		if (psp->sos.fw_version < supp_vers)
2924 			return 0;
2925 	}
2926 
2927 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2928 		return 0;
2929 
2930 	ret = psp_execute_ip_fw_load(psp, ucode);
2931 
2932 	return ret;
2933 }
2934 
2935 static int psp_load_smu_fw(struct psp_context *psp)
2936 {
2937 	int ret;
2938 	struct amdgpu_device *adev = psp->adev;
2939 	struct amdgpu_firmware_info *ucode =
2940 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2941 	struct amdgpu_ras *ras = psp->ras_context.ras;
2942 
2943 	/*
2944 	 * Skip SMU FW reloading in case of using BACO for runpm only,
2945 	 * as SMU is always alive.
2946 	 */
2947 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2948 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2949 		return 0;
2950 
2951 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2952 		return 0;
2953 
2954 	if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2955 	     (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2956 	      amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2957 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2958 		if (ret)
2959 			dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2960 	}
2961 
2962 	ret = psp_execute_ip_fw_load(psp, ucode);
2963 
2964 	if (ret)
2965 		dev_err(adev->dev, "PSP load smu failed!\n");
2966 
2967 	return ret;
2968 }
2969 
2970 static bool fw_load_skip_check(struct psp_context *psp,
2971 			       struct amdgpu_firmware_info *ucode)
2972 {
2973 	if (!ucode->fw || !ucode->ucode_size)
2974 		return true;
2975 
2976 	if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2977 		return true;
2978 
2979 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2980 	    (psp_smu_reload_quirk(psp) ||
2981 	     psp->autoload_supported ||
2982 	     psp->pmfw_centralized_cstate_management))
2983 		return true;
2984 
2985 	if (amdgpu_sriov_vf(psp->adev) &&
2986 	    amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2987 		return true;
2988 
2989 	if (psp->autoload_supported &&
2990 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2991 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2992 		/* skip mec JT when autoload is enabled */
2993 		return true;
2994 
2995 	return false;
2996 }
2997 
2998 int psp_load_fw_list(struct psp_context *psp,
2999 		     struct amdgpu_firmware_info **ucode_list, int ucode_count)
3000 {
3001 	int ret = 0, i;
3002 	struct amdgpu_firmware_info *ucode;
3003 
3004 	for (i = 0; i < ucode_count; ++i) {
3005 		ucode = ucode_list[i];
3006 		psp_print_fw_hdr(psp, ucode);
3007 		ret = psp_execute_ip_fw_load(psp, ucode);
3008 		if (ret)
3009 			return ret;
3010 	}
3011 	return ret;
3012 }
3013 
3014 static int psp_load_non_psp_fw(struct psp_context *psp)
3015 {
3016 	int i, ret;
3017 	struct amdgpu_firmware_info *ucode;
3018 	struct amdgpu_device *adev = psp->adev;
3019 
3020 	if (psp->autoload_supported &&
3021 	    !psp->pmfw_centralized_cstate_management) {
3022 		ret = psp_load_smu_fw(psp);
3023 		if (ret)
3024 			return ret;
3025 	}
3026 
3027 	/* Load P2S table first if it's available */
3028 	psp_load_p2s_table(psp);
3029 
3030 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
3031 		ucode = &adev->firmware.ucode[i];
3032 
3033 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
3034 		    !fw_load_skip_check(psp, ucode)) {
3035 			ret = psp_load_smu_fw(psp);
3036 			if (ret)
3037 				return ret;
3038 			continue;
3039 		}
3040 
3041 		if (fw_load_skip_check(psp, ucode))
3042 			continue;
3043 
3044 		if (psp->autoload_supported &&
3045 		    (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3046 			     IP_VERSION(11, 0, 7) ||
3047 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3048 			     IP_VERSION(11, 0, 11) ||
3049 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3050 			     IP_VERSION(11, 0, 12)) &&
3051 		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
3052 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
3053 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
3054 			/* PSP only receive one SDMA fw for sienna_cichlid,
3055 			 * as all four sdma fw are same
3056 			 */
3057 			continue;
3058 
3059 		psp_print_fw_hdr(psp, ucode);
3060 
3061 		ret = psp_execute_ip_fw_load(psp, ucode);
3062 		if (ret)
3063 			return ret;
3064 
3065 		/* Start rlc autoload after psp received all the gfx firmware */
3066 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
3067 		    adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
3068 			ret = psp_rlc_autoload_start(psp);
3069 			if (ret) {
3070 				dev_err(adev->dev, "Failed to start rlc autoload\n");
3071 				return ret;
3072 			}
3073 		}
3074 	}
3075 
3076 	return 0;
3077 }
3078 
3079 static int psp_load_fw(struct amdgpu_device *adev)
3080 {
3081 	int ret;
3082 	struct psp_context *psp = &adev->psp;
3083 
3084 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3085 		/* should not destroy ring, only stop */
3086 		psp_ring_stop(psp, PSP_RING_TYPE__KM);
3087 	} else {
3088 		memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
3089 
3090 		ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
3091 		if (ret) {
3092 			dev_err(adev->dev, "PSP ring init failed!\n");
3093 			goto failed;
3094 		}
3095 	}
3096 
3097 	ret = psp_hw_start(psp);
3098 	if (ret)
3099 		goto failed;
3100 
3101 	ret = psp_load_non_psp_fw(psp);
3102 	if (ret)
3103 		goto failed1;
3104 
3105 	ret = psp_asd_initialize(psp);
3106 	if (ret) {
3107 		dev_err(adev->dev, "PSP load asd failed!\n");
3108 		goto failed1;
3109 	}
3110 
3111 	ret = psp_rl_load(adev);
3112 	if (ret) {
3113 		dev_err(adev->dev, "PSP load RL failed!\n");
3114 		goto failed1;
3115 	}
3116 
3117 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3118 		if (adev->gmc.xgmi.num_physical_nodes > 1) {
3119 			ret = psp_xgmi_initialize(psp, false, true);
3120 			/* Warning the XGMI seesion initialize failure
3121 			 * Instead of stop driver initialization
3122 			 */
3123 			if (ret)
3124 				dev_err(psp->adev->dev,
3125 					"XGMI: Failed to initialize XGMI session\n");
3126 		}
3127 	}
3128 
3129 	if (psp->ta_fw) {
3130 		ret = psp_ras_initialize(psp);
3131 		if (ret)
3132 			dev_err(psp->adev->dev,
3133 				"RAS: Failed to initialize RAS\n");
3134 
3135 		ret = psp_hdcp_initialize(psp);
3136 		if (ret)
3137 			dev_err(psp->adev->dev,
3138 				"HDCP: Failed to initialize HDCP\n");
3139 
3140 		ret = psp_dtm_initialize(psp);
3141 		if (ret)
3142 			dev_err(psp->adev->dev,
3143 				"DTM: Failed to initialize DTM\n");
3144 
3145 		ret = psp_rap_initialize(psp);
3146 		if (ret)
3147 			dev_err(psp->adev->dev,
3148 				"RAP: Failed to initialize RAP\n");
3149 
3150 		ret = psp_securedisplay_initialize(psp);
3151 		if (ret)
3152 			dev_err(psp->adev->dev,
3153 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3154 	}
3155 
3156 	return 0;
3157 
3158 failed1:
3159 	psp_free_shared_bufs(psp);
3160 failed:
3161 	/*
3162 	 * all cleanup jobs (xgmi terminate, ras terminate,
3163 	 * ring destroy, cmd/fence/fw buffers destory,
3164 	 * psp->cmd destory) are delayed to psp_hw_fini
3165 	 */
3166 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3167 	return ret;
3168 }
3169 
3170 static int psp_hw_init(struct amdgpu_ip_block *ip_block)
3171 {
3172 	int ret;
3173 	struct amdgpu_device *adev = ip_block->adev;
3174 
3175 	mutex_lock(&adev->firmware.mutex);
3176 
3177 	ret = amdgpu_ucode_init_bo(adev);
3178 	if (ret)
3179 		goto failed;
3180 
3181 	ret = psp_load_fw(adev);
3182 	if (ret) {
3183 		dev_err(adev->dev, "PSP firmware loading failed\n");
3184 		goto failed;
3185 	}
3186 
3187 	mutex_unlock(&adev->firmware.mutex);
3188 	return 0;
3189 
3190 failed:
3191 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
3192 	mutex_unlock(&adev->firmware.mutex);
3193 	return -EINVAL;
3194 }
3195 
3196 static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
3197 {
3198 	struct amdgpu_device *adev = ip_block->adev;
3199 	struct psp_context *psp = &adev->psp;
3200 
3201 	if (psp->ta_fw) {
3202 		psp_ras_terminate(psp);
3203 		psp_securedisplay_terminate(psp);
3204 		psp_rap_terminate(psp);
3205 		psp_dtm_terminate(psp);
3206 		psp_hdcp_terminate(psp);
3207 
3208 		if (adev->gmc.xgmi.num_physical_nodes > 1)
3209 			psp_xgmi_terminate(psp);
3210 	}
3211 
3212 	psp_asd_terminate(psp);
3213 	psp_tmr_terminate(psp);
3214 
3215 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3216 
3217 	return 0;
3218 }
3219 
3220 static int psp_suspend(struct amdgpu_ip_block *ip_block)
3221 {
3222 	int ret = 0;
3223 	struct amdgpu_device *adev = ip_block->adev;
3224 	struct psp_context *psp = &adev->psp;
3225 
3226 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3227 	    psp->xgmi_context.context.initialized) {
3228 		ret = psp_xgmi_terminate(psp);
3229 		if (ret) {
3230 			dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3231 			goto out;
3232 		}
3233 	}
3234 
3235 	if (psp->ta_fw) {
3236 		ret = psp_ras_terminate(psp);
3237 		if (ret) {
3238 			dev_err(adev->dev, "Failed to terminate ras ta\n");
3239 			goto out;
3240 		}
3241 		ret = psp_hdcp_terminate(psp);
3242 		if (ret) {
3243 			dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3244 			goto out;
3245 		}
3246 		ret = psp_dtm_terminate(psp);
3247 		if (ret) {
3248 			dev_err(adev->dev, "Failed to terminate dtm ta\n");
3249 			goto out;
3250 		}
3251 		ret = psp_rap_terminate(psp);
3252 		if (ret) {
3253 			dev_err(adev->dev, "Failed to terminate rap ta\n");
3254 			goto out;
3255 		}
3256 		ret = psp_securedisplay_terminate(psp);
3257 		if (ret) {
3258 			dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3259 			goto out;
3260 		}
3261 	}
3262 
3263 	ret = psp_asd_terminate(psp);
3264 	if (ret) {
3265 		dev_err(adev->dev, "Failed to terminate asd\n");
3266 		goto out;
3267 	}
3268 
3269 	ret = psp_tmr_terminate(psp);
3270 	if (ret) {
3271 		dev_err(adev->dev, "Failed to terminate tmr\n");
3272 		goto out;
3273 	}
3274 
3275 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3276 	if (ret)
3277 		dev_err(adev->dev, "PSP ring stop failed\n");
3278 
3279 out:
3280 	return ret;
3281 }
3282 
3283 static int psp_resume(struct amdgpu_ip_block *ip_block)
3284 {
3285 	int ret;
3286 	struct amdgpu_device *adev = ip_block->adev;
3287 	struct psp_context *psp = &adev->psp;
3288 
3289 	dev_info(adev->dev, "PSP is resuming...\n");
3290 
3291 	if (psp->mem_train_ctx.enable_mem_training) {
3292 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3293 		if (ret) {
3294 			dev_err(adev->dev, "Failed to process memory training!\n");
3295 			return ret;
3296 		}
3297 	}
3298 
3299 	mutex_lock(&adev->firmware.mutex);
3300 
3301 	ret = amdgpu_ucode_init_bo(adev);
3302 	if (ret)
3303 		goto failed;
3304 
3305 	ret = psp_hw_start(psp);
3306 	if (ret)
3307 		goto failed;
3308 
3309 	ret = psp_load_non_psp_fw(psp);
3310 	if (ret)
3311 		goto failed;
3312 
3313 	ret = psp_asd_initialize(psp);
3314 	if (ret) {
3315 		dev_err(adev->dev, "PSP load asd failed!\n");
3316 		goto failed;
3317 	}
3318 
3319 	ret = psp_rl_load(adev);
3320 	if (ret) {
3321 		dev_err(adev->dev, "PSP load RL failed!\n");
3322 		goto failed;
3323 	}
3324 
3325 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3326 		ret = psp_xgmi_initialize(psp, false, true);
3327 		/* Warning the XGMI seesion initialize failure
3328 		 * Instead of stop driver initialization
3329 		 */
3330 		if (ret)
3331 			dev_err(psp->adev->dev,
3332 				"XGMI: Failed to initialize XGMI session\n");
3333 	}
3334 
3335 	if (psp->ta_fw) {
3336 		ret = psp_ras_initialize(psp);
3337 		if (ret)
3338 			dev_err(psp->adev->dev,
3339 				"RAS: Failed to initialize RAS\n");
3340 
3341 		ret = psp_hdcp_initialize(psp);
3342 		if (ret)
3343 			dev_err(psp->adev->dev,
3344 				"HDCP: Failed to initialize HDCP\n");
3345 
3346 		ret = psp_dtm_initialize(psp);
3347 		if (ret)
3348 			dev_err(psp->adev->dev,
3349 				"DTM: Failed to initialize DTM\n");
3350 
3351 		ret = psp_rap_initialize(psp);
3352 		if (ret)
3353 			dev_err(psp->adev->dev,
3354 				"RAP: Failed to initialize RAP\n");
3355 
3356 		ret = psp_securedisplay_initialize(psp);
3357 		if (ret)
3358 			dev_err(psp->adev->dev,
3359 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3360 	}
3361 
3362 	mutex_unlock(&adev->firmware.mutex);
3363 
3364 	return 0;
3365 
3366 failed:
3367 	dev_err(adev->dev, "PSP resume failed\n");
3368 	mutex_unlock(&adev->firmware.mutex);
3369 	return ret;
3370 }
3371 
3372 int psp_gpu_reset(struct amdgpu_device *adev)
3373 {
3374 	int ret;
3375 
3376 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3377 		return 0;
3378 
3379 	mutex_lock(&adev->psp.mutex);
3380 	ret = psp_mode1_reset(&adev->psp);
3381 	mutex_unlock(&adev->psp.mutex);
3382 
3383 	return ret;
3384 }
3385 
3386 int psp_rlc_autoload_start(struct psp_context *psp)
3387 {
3388 	int ret;
3389 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3390 
3391 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3392 
3393 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
3394 				 psp->fence_buf_mc_addr);
3395 
3396 	release_psp_cmd_buf(psp);
3397 
3398 	return ret;
3399 }
3400 
3401 int psp_ring_cmd_submit(struct psp_context *psp,
3402 			uint64_t cmd_buf_mc_addr,
3403 			uint64_t fence_mc_addr,
3404 			int index)
3405 {
3406 	unsigned int psp_write_ptr_reg = 0;
3407 	struct psp_gfx_rb_frame *write_frame;
3408 	struct psp_ring *ring = &psp->km_ring;
3409 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3410 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3411 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3412 	struct amdgpu_device *adev = psp->adev;
3413 	uint32_t ring_size_dw = ring->ring_size / 4;
3414 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3415 
3416 	/* KM (GPCOM) prepare write pointer */
3417 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
3418 
3419 	/* Update KM RB frame pointer to new frame */
3420 	/* write_frame ptr increments by size of rb_frame in bytes */
3421 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3422 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
3423 		write_frame = ring_buffer_start;
3424 	else
3425 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3426 	/* Check invalid write_frame ptr address */
3427 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3428 		dev_err(adev->dev,
3429 			"ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3430 			ring_buffer_start, ring_buffer_end, write_frame);
3431 		dev_err(adev->dev,
3432 			"write_frame is pointing to address out of bounds\n");
3433 		return -EINVAL;
3434 	}
3435 
3436 	/* Initialize KM RB frame */
3437 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3438 
3439 	/* Update KM RB frame */
3440 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3441 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3442 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3443 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3444 	write_frame->fence_value = index;
3445 	amdgpu_device_flush_hdp(adev, NULL);
3446 
3447 	/* Update the write Pointer in DWORDs */
3448 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3449 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
3450 	return 0;
3451 }
3452 
3453 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3454 {
3455 	struct amdgpu_device *adev = psp->adev;
3456 	const struct psp_firmware_header_v1_0 *asd_hdr;
3457 	int err = 0;
3458 
3459 	err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
3460 				   "amdgpu/%s_asd.bin", chip_name);
3461 	if (err)
3462 		goto out;
3463 
3464 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3465 	adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3466 	adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3467 	adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3468 	adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3469 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3470 	return 0;
3471 out:
3472 	amdgpu_ucode_release(&adev->psp.asd_fw);
3473 	return err;
3474 }
3475 
3476 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3477 {
3478 	struct amdgpu_device *adev = psp->adev;
3479 	const struct psp_firmware_header_v1_0 *toc_hdr;
3480 	int err = 0;
3481 
3482 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
3483 				   "amdgpu/%s_toc.bin", chip_name);
3484 	if (err)
3485 		goto out;
3486 
3487 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3488 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3489 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3490 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3491 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3492 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3493 	return 0;
3494 out:
3495 	amdgpu_ucode_release(&adev->psp.toc_fw);
3496 	return err;
3497 }
3498 
3499 static int parse_sos_bin_descriptor(struct psp_context *psp,
3500 				   const struct psp_fw_bin_desc *desc,
3501 				   const struct psp_firmware_header_v2_0 *sos_hdr)
3502 {
3503 	uint8_t *ucode_start_addr  = NULL;
3504 
3505 	if (!psp || !desc || !sos_hdr)
3506 		return -EINVAL;
3507 
3508 	ucode_start_addr  = (uint8_t *)sos_hdr +
3509 			    le32_to_cpu(desc->offset_bytes) +
3510 			    le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3511 
3512 	switch (desc->fw_type) {
3513 	case PSP_FW_TYPE_PSP_SOS:
3514 		psp->sos.fw_version        = le32_to_cpu(desc->fw_version);
3515 		psp->sos.feature_version   = le32_to_cpu(desc->fw_version);
3516 		psp->sos.size_bytes        = le32_to_cpu(desc->size_bytes);
3517 		psp->sos.start_addr	   = ucode_start_addr;
3518 		break;
3519 	case PSP_FW_TYPE_PSP_SYS_DRV:
3520 		psp->sys.fw_version        = le32_to_cpu(desc->fw_version);
3521 		psp->sys.feature_version   = le32_to_cpu(desc->fw_version);
3522 		psp->sys.size_bytes        = le32_to_cpu(desc->size_bytes);
3523 		psp->sys.start_addr        = ucode_start_addr;
3524 		break;
3525 	case PSP_FW_TYPE_PSP_KDB:
3526 		psp->kdb.fw_version        = le32_to_cpu(desc->fw_version);
3527 		psp->kdb.feature_version   = le32_to_cpu(desc->fw_version);
3528 		psp->kdb.size_bytes        = le32_to_cpu(desc->size_bytes);
3529 		psp->kdb.start_addr        = ucode_start_addr;
3530 		break;
3531 	case PSP_FW_TYPE_PSP_TOC:
3532 		psp->toc.fw_version        = le32_to_cpu(desc->fw_version);
3533 		psp->toc.feature_version   = le32_to_cpu(desc->fw_version);
3534 		psp->toc.size_bytes        = le32_to_cpu(desc->size_bytes);
3535 		psp->toc.start_addr        = ucode_start_addr;
3536 		break;
3537 	case PSP_FW_TYPE_PSP_SPL:
3538 		psp->spl.fw_version        = le32_to_cpu(desc->fw_version);
3539 		psp->spl.feature_version   = le32_to_cpu(desc->fw_version);
3540 		psp->spl.size_bytes        = le32_to_cpu(desc->size_bytes);
3541 		psp->spl.start_addr        = ucode_start_addr;
3542 		break;
3543 	case PSP_FW_TYPE_PSP_RL:
3544 		psp->rl.fw_version         = le32_to_cpu(desc->fw_version);
3545 		psp->rl.feature_version    = le32_to_cpu(desc->fw_version);
3546 		psp->rl.size_bytes         = le32_to_cpu(desc->size_bytes);
3547 		psp->rl.start_addr         = ucode_start_addr;
3548 		break;
3549 	case PSP_FW_TYPE_PSP_SOC_DRV:
3550 		psp->soc_drv.fw_version         = le32_to_cpu(desc->fw_version);
3551 		psp->soc_drv.feature_version    = le32_to_cpu(desc->fw_version);
3552 		psp->soc_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3553 		psp->soc_drv.start_addr         = ucode_start_addr;
3554 		break;
3555 	case PSP_FW_TYPE_PSP_INTF_DRV:
3556 		psp->intf_drv.fw_version        = le32_to_cpu(desc->fw_version);
3557 		psp->intf_drv.feature_version   = le32_to_cpu(desc->fw_version);
3558 		psp->intf_drv.size_bytes        = le32_to_cpu(desc->size_bytes);
3559 		psp->intf_drv.start_addr        = ucode_start_addr;
3560 		break;
3561 	case PSP_FW_TYPE_PSP_DBG_DRV:
3562 		psp->dbg_drv.fw_version         = le32_to_cpu(desc->fw_version);
3563 		psp->dbg_drv.feature_version    = le32_to_cpu(desc->fw_version);
3564 		psp->dbg_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3565 		psp->dbg_drv.start_addr         = ucode_start_addr;
3566 		break;
3567 	case PSP_FW_TYPE_PSP_RAS_DRV:
3568 		psp->ras_drv.fw_version         = le32_to_cpu(desc->fw_version);
3569 		psp->ras_drv.feature_version    = le32_to_cpu(desc->fw_version);
3570 		psp->ras_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3571 		psp->ras_drv.start_addr         = ucode_start_addr;
3572 		break;
3573 	case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3574 		psp->ipkeymgr_drv.fw_version         = le32_to_cpu(desc->fw_version);
3575 		psp->ipkeymgr_drv.feature_version    = le32_to_cpu(desc->fw_version);
3576 		psp->ipkeymgr_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3577 		psp->ipkeymgr_drv.start_addr         = ucode_start_addr;
3578 		break;
3579 	case PSP_FW_TYPE_PSP_SPDM_DRV:
3580 		psp->spdm_drv.fw_version	= le32_to_cpu(desc->fw_version);
3581 		psp->spdm_drv.feature_version	= le32_to_cpu(desc->fw_version);
3582 		psp->spdm_drv.size_bytes	= le32_to_cpu(desc->size_bytes);
3583 		psp->spdm_drv.start_addr	= ucode_start_addr;
3584 		break;
3585 	default:
3586 		dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3587 		break;
3588 	}
3589 
3590 	return 0;
3591 }
3592 
3593 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3594 {
3595 	const struct psp_firmware_header_v1_0 *sos_hdr;
3596 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3597 	uint8_t *ucode_array_start_addr;
3598 
3599 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3600 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3601 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3602 
3603 	if (adev->gmc.xgmi.connected_to_cpu ||
3604 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3605 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3606 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3607 
3608 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3609 		adev->psp.sys.start_addr = ucode_array_start_addr;
3610 
3611 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3612 		adev->psp.sos.start_addr = ucode_array_start_addr +
3613 				le32_to_cpu(sos_hdr->sos.offset_bytes);
3614 	} else {
3615 		/* Load alternate PSP SOS FW */
3616 		sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3617 
3618 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3619 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3620 
3621 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3622 		adev->psp.sys.start_addr = ucode_array_start_addr +
3623 			le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3624 
3625 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3626 		adev->psp.sos.start_addr = ucode_array_start_addr +
3627 			le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3628 	}
3629 
3630 	if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3631 		dev_warn(adev->dev, "PSP SOS FW not available");
3632 		return -EINVAL;
3633 	}
3634 
3635 	return 0;
3636 }
3637 
3638 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3639 {
3640 	struct amdgpu_device *adev = psp->adev;
3641 	const struct psp_firmware_header_v1_0 *sos_hdr;
3642 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3643 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3644 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3645 	const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3646 	const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
3647 	int fw_index, fw_bin_count, start_index = 0;
3648 	const struct psp_fw_bin_desc *fw_bin;
3649 	uint8_t *ucode_array_start_addr;
3650 	int err = 0;
3651 
3652 	if (amdgpu_is_kicker_fw(adev))
3653 		err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3654 					   "amdgpu/%s_sos_kicker.bin", chip_name);
3655 	else
3656 		err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3657 					   "amdgpu/%s_sos.bin", chip_name);
3658 	if (err)
3659 		goto out;
3660 
3661 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3662 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3663 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3664 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3665 
3666 	switch (sos_hdr->header.header_version_major) {
3667 	case 1:
3668 		err = psp_init_sos_base_fw(adev);
3669 		if (err)
3670 			goto out;
3671 
3672 		if (sos_hdr->header.header_version_minor == 1) {
3673 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3674 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3675 			adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3676 					le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3677 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3678 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3679 					le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3680 		}
3681 		if (sos_hdr->header.header_version_minor == 2) {
3682 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3683 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3684 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3685 						    le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3686 		}
3687 		if (sos_hdr->header.header_version_minor == 3) {
3688 			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3689 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3690 			adev->psp.toc.start_addr = ucode_array_start_addr +
3691 				le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3692 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3693 			adev->psp.kdb.start_addr = ucode_array_start_addr +
3694 				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3695 			adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3696 			adev->psp.spl.start_addr = ucode_array_start_addr +
3697 				le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3698 			adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3699 			adev->psp.rl.start_addr = ucode_array_start_addr +
3700 				le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3701 		}
3702 		break;
3703 	case 2:
3704 		sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3705 
3706 		fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
3707 
3708 		if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
3709 			dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3710 			err = -EINVAL;
3711 			goto out;
3712 		}
3713 
3714 		if (sos_hdr_v2_0->header.header_version_minor == 1) {
3715 			sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
3716 
3717 			fw_bin = sos_hdr_v2_1->psp_fw_bin;
3718 
3719 			if (psp_is_aux_sos_load_required(psp))
3720 				start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3721 			else
3722 				fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3723 
3724 		} else {
3725 			fw_bin = sos_hdr_v2_0->psp_fw_bin;
3726 		}
3727 
3728 		for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
3729 			err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
3730 						       sos_hdr_v2_0);
3731 			if (err)
3732 				goto out;
3733 		}
3734 		break;
3735 	default:
3736 		dev_err(adev->dev,
3737 			"unsupported psp sos firmware\n");
3738 		err = -EINVAL;
3739 		goto out;
3740 	}
3741 
3742 	return 0;
3743 out:
3744 	amdgpu_ucode_release(&adev->psp.sos_fw);
3745 
3746 	return err;
3747 }
3748 
3749 static bool is_ta_fw_applicable(struct psp_context *psp,
3750 			     const struct psp_fw_bin_desc *desc)
3751 {
3752 	struct amdgpu_device *adev = psp->adev;
3753 	uint32_t fw_version;
3754 
3755 	switch (desc->fw_type) {
3756 	case TA_FW_TYPE_PSP_XGMI:
3757 	case TA_FW_TYPE_PSP_XGMI_AUX:
3758 		/* for now, AUX TA only exists on 13.0.6 ta bin,
3759 		 * from v20.00.0x.14
3760 		 */
3761 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3762 		    IP_VERSION(13, 0, 6)) {
3763 			fw_version = le32_to_cpu(desc->fw_version);
3764 
3765 			if (adev->flags & AMD_IS_APU &&
3766 			    (fw_version & 0xff) >= 0x14)
3767 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
3768 			else
3769 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
3770 		}
3771 		break;
3772 	default:
3773 		break;
3774 	}
3775 
3776 	return true;
3777 }
3778 
3779 static int parse_ta_bin_descriptor(struct psp_context *psp,
3780 				   const struct psp_fw_bin_desc *desc,
3781 				   const struct ta_firmware_header_v2_0 *ta_hdr)
3782 {
3783 	uint8_t *ucode_start_addr  = NULL;
3784 
3785 	if (!psp || !desc || !ta_hdr)
3786 		return -EINVAL;
3787 
3788 	if (!is_ta_fw_applicable(psp, desc))
3789 		return 0;
3790 
3791 	ucode_start_addr  = (uint8_t *)ta_hdr +
3792 			    le32_to_cpu(desc->offset_bytes) +
3793 			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3794 
3795 	switch (desc->fw_type) {
3796 	case TA_FW_TYPE_PSP_ASD:
3797 		psp->asd_context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3798 		psp->asd_context.bin_desc.feature_version   = le32_to_cpu(desc->fw_version);
3799 		psp->asd_context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3800 		psp->asd_context.bin_desc.start_addr        = ucode_start_addr;
3801 		break;
3802 	case TA_FW_TYPE_PSP_XGMI:
3803 	case TA_FW_TYPE_PSP_XGMI_AUX:
3804 		psp->xgmi_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3805 		psp->xgmi_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3806 		psp->xgmi_context.context.bin_desc.start_addr       = ucode_start_addr;
3807 		break;
3808 	case TA_FW_TYPE_PSP_RAS:
3809 		psp->ras_context.context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3810 		psp->ras_context.context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3811 		psp->ras_context.context.bin_desc.start_addr        = ucode_start_addr;
3812 		break;
3813 	case TA_FW_TYPE_PSP_HDCP:
3814 		psp->hdcp_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3815 		psp->hdcp_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3816 		psp->hdcp_context.context.bin_desc.start_addr       = ucode_start_addr;
3817 		break;
3818 	case TA_FW_TYPE_PSP_DTM:
3819 		psp->dtm_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3820 		psp->dtm_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3821 		psp->dtm_context.context.bin_desc.start_addr       = ucode_start_addr;
3822 		break;
3823 	case TA_FW_TYPE_PSP_RAP:
3824 		psp->rap_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3825 		psp->rap_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3826 		psp->rap_context.context.bin_desc.start_addr       = ucode_start_addr;
3827 		break;
3828 	case TA_FW_TYPE_PSP_SECUREDISPLAY:
3829 		psp->securedisplay_context.context.bin_desc.fw_version =
3830 			le32_to_cpu(desc->fw_version);
3831 		psp->securedisplay_context.context.bin_desc.size_bytes =
3832 			le32_to_cpu(desc->size_bytes);
3833 		psp->securedisplay_context.context.bin_desc.start_addr =
3834 			ucode_start_addr;
3835 		break;
3836 	default:
3837 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3838 		break;
3839 	}
3840 
3841 	return 0;
3842 }
3843 
3844 static int parse_ta_v1_microcode(struct psp_context *psp)
3845 {
3846 	const struct ta_firmware_header_v1_0 *ta_hdr;
3847 	struct amdgpu_device *adev = psp->adev;
3848 
3849 	ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3850 
3851 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3852 		return -EINVAL;
3853 
3854 	adev->psp.xgmi_context.context.bin_desc.fw_version =
3855 		le32_to_cpu(ta_hdr->xgmi.fw_version);
3856 	adev->psp.xgmi_context.context.bin_desc.size_bytes =
3857 		le32_to_cpu(ta_hdr->xgmi.size_bytes);
3858 	adev->psp.xgmi_context.context.bin_desc.start_addr =
3859 		(uint8_t *)ta_hdr +
3860 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3861 
3862 	adev->psp.ras_context.context.bin_desc.fw_version =
3863 		le32_to_cpu(ta_hdr->ras.fw_version);
3864 	adev->psp.ras_context.context.bin_desc.size_bytes =
3865 		le32_to_cpu(ta_hdr->ras.size_bytes);
3866 	adev->psp.ras_context.context.bin_desc.start_addr =
3867 		(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3868 		le32_to_cpu(ta_hdr->ras.offset_bytes);
3869 
3870 	adev->psp.hdcp_context.context.bin_desc.fw_version =
3871 		le32_to_cpu(ta_hdr->hdcp.fw_version);
3872 	adev->psp.hdcp_context.context.bin_desc.size_bytes =
3873 		le32_to_cpu(ta_hdr->hdcp.size_bytes);
3874 	adev->psp.hdcp_context.context.bin_desc.start_addr =
3875 		(uint8_t *)ta_hdr +
3876 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3877 
3878 	adev->psp.dtm_context.context.bin_desc.fw_version =
3879 		le32_to_cpu(ta_hdr->dtm.fw_version);
3880 	adev->psp.dtm_context.context.bin_desc.size_bytes =
3881 		le32_to_cpu(ta_hdr->dtm.size_bytes);
3882 	adev->psp.dtm_context.context.bin_desc.start_addr =
3883 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3884 		le32_to_cpu(ta_hdr->dtm.offset_bytes);
3885 
3886 	adev->psp.securedisplay_context.context.bin_desc.fw_version =
3887 		le32_to_cpu(ta_hdr->securedisplay.fw_version);
3888 	adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3889 		le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3890 	adev->psp.securedisplay_context.context.bin_desc.start_addr =
3891 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3892 		le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3893 
3894 	adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3895 
3896 	return 0;
3897 }
3898 
3899 static int parse_ta_v2_microcode(struct psp_context *psp)
3900 {
3901 	const struct ta_firmware_header_v2_0 *ta_hdr;
3902 	struct amdgpu_device *adev = psp->adev;
3903 	int err = 0;
3904 	int ta_index = 0;
3905 
3906 	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3907 
3908 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3909 		return -EINVAL;
3910 
3911 	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3912 		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3913 		return -EINVAL;
3914 	}
3915 
3916 	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3917 		err = parse_ta_bin_descriptor(psp,
3918 					      &ta_hdr->ta_fw_bin[ta_index],
3919 					      ta_hdr);
3920 		if (err)
3921 			return err;
3922 	}
3923 
3924 	return 0;
3925 }
3926 
3927 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3928 {
3929 	const struct common_firmware_header *hdr;
3930 	struct amdgpu_device *adev = psp->adev;
3931 	int err;
3932 
3933 	if (amdgpu_is_kicker_fw(adev))
3934 		err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3935 					   "amdgpu/%s_ta_kicker.bin", chip_name);
3936 	else
3937 		err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3938 					   "amdgpu/%s_ta.bin", chip_name);
3939 	if (err)
3940 		return err;
3941 
3942 	hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3943 	switch (le16_to_cpu(hdr->header_version_major)) {
3944 	case 1:
3945 		err = parse_ta_v1_microcode(psp);
3946 		break;
3947 	case 2:
3948 		err = parse_ta_v2_microcode(psp);
3949 		break;
3950 	default:
3951 		dev_err(adev->dev, "unsupported TA header version\n");
3952 		err = -EINVAL;
3953 	}
3954 
3955 	if (err)
3956 		amdgpu_ucode_release(&adev->psp.ta_fw);
3957 
3958 	return err;
3959 }
3960 
3961 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3962 {
3963 	struct amdgpu_device *adev = psp->adev;
3964 	const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3965 	struct amdgpu_firmware_info *info = NULL;
3966 	int err = 0;
3967 
3968 	if (!amdgpu_sriov_vf(adev)) {
3969 		dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3970 		return -EINVAL;
3971 	}
3972 
3973 	err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
3974 				   "amdgpu/%s_cap.bin", chip_name);
3975 	if (err) {
3976 		if (err == -ENODEV) {
3977 			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3978 			err = 0;
3979 		} else {
3980 			dev_err(adev->dev, "fail to initialize cap microcode\n");
3981 		}
3982 		goto out;
3983 	}
3984 
3985 	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3986 	info->ucode_id = AMDGPU_UCODE_ID_CAP;
3987 	info->fw = adev->psp.cap_fw;
3988 	cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3989 		adev->psp.cap_fw->data;
3990 	adev->firmware.fw_size += ALIGN(
3991 			le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3992 	adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3993 	adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3994 	adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3995 
3996 	return 0;
3997 
3998 out:
3999 	amdgpu_ucode_release(&adev->psp.cap_fw);
4000 	return err;
4001 }
4002 
4003 int psp_config_sq_perfmon(struct psp_context *psp,
4004 		uint32_t xcp_id, bool core_override_enable,
4005 		bool reg_override_enable, bool perfmon_override_enable)
4006 {
4007 	int ret;
4008 
4009 	if (amdgpu_sriov_vf(psp->adev))
4010 		return 0;
4011 
4012 	if (xcp_id > MAX_XCP) {
4013 		dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
4014 		return -EINVAL;
4015 	}
4016 
4017 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
4018 		dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
4019 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
4020 		return -EINVAL;
4021 	}
4022 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
4023 
4024 	cmd->cmd_id	=	GFX_CMD_ID_CONFIG_SQ_PERFMON;
4025 	cmd->cmd.config_sq_perfmon.gfx_xcp_mask	=	BIT_MASK(xcp_id);
4026 	cmd->cmd.config_sq_perfmon.core_override	=	core_override_enable;
4027 	cmd->cmd.config_sq_perfmon.reg_override	=	reg_override_enable;
4028 	cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
4029 
4030 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
4031 	if (ret)
4032 		dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
4033 			xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
4034 
4035 	release_psp_cmd_buf(psp);
4036 	return ret;
4037 }
4038 
4039 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
4040 					enum amd_clockgating_state state)
4041 {
4042 	return 0;
4043 }
4044 
4045 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
4046 				     enum amd_powergating_state state)
4047 {
4048 	return 0;
4049 }
4050 
4051 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
4052 					 struct device_attribute *attr,
4053 					 char *buf)
4054 {
4055 	struct drm_device *ddev = dev_get_drvdata(dev);
4056 	struct amdgpu_device *adev = drm_to_adev(ddev);
4057 	struct amdgpu_ip_block *ip_block;
4058 	uint32_t fw_ver;
4059 	int ret;
4060 
4061 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
4062 	if (!ip_block || !ip_block->status.late_initialized) {
4063 		dev_info(adev->dev, "PSP block is not ready yet\n.");
4064 		return -EBUSY;
4065 	}
4066 
4067 	mutex_lock(&adev->psp.mutex);
4068 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
4069 	mutex_unlock(&adev->psp.mutex);
4070 
4071 	if (ret) {
4072 		dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
4073 		return ret;
4074 	}
4075 
4076 	return sysfs_emit(buf, "%x\n", fw_ver);
4077 }
4078 
4079 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
4080 						       struct device_attribute *attr,
4081 						       const char *buf,
4082 						       size_t count)
4083 {
4084 	struct drm_device *ddev = dev_get_drvdata(dev);
4085 	struct amdgpu_device *adev = drm_to_adev(ddev);
4086 	int ret, idx;
4087 	const struct firmware *usbc_pd_fw;
4088 	struct amdgpu_bo *fw_buf_bo = NULL;
4089 	uint64_t fw_pri_mc_addr;
4090 	void *fw_pri_cpu_addr;
4091 	struct amdgpu_ip_block *ip_block;
4092 
4093 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
4094 	if (!ip_block || !ip_block->status.late_initialized) {
4095 		dev_err(adev->dev, "PSP block is not ready yet.");
4096 		return -EBUSY;
4097 	}
4098 
4099 	if (!drm_dev_enter(ddev, &idx))
4100 		return -ENODEV;
4101 
4102 	ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
4103 				   "amdgpu/%s", buf);
4104 	if (ret)
4105 		goto fail;
4106 
4107 	/* LFB address which is aligned to 1MB boundary per PSP request */
4108 	ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
4109 				      AMDGPU_GEM_DOMAIN_VRAM |
4110 				      AMDGPU_GEM_DOMAIN_GTT,
4111 				      &fw_buf_bo, &fw_pri_mc_addr,
4112 				      &fw_pri_cpu_addr);
4113 	if (ret)
4114 		goto rel_buf;
4115 
4116 	memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
4117 
4118 	mutex_lock(&adev->psp.mutex);
4119 	ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
4120 	mutex_unlock(&adev->psp.mutex);
4121 
4122 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4123 
4124 rel_buf:
4125 	amdgpu_ucode_release(&usbc_pd_fw);
4126 fail:
4127 	if (ret) {
4128 		dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
4129 		count = ret;
4130 	}
4131 
4132 	drm_dev_exit(idx);
4133 	return count;
4134 }
4135 
4136 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
4137 {
4138 	int idx;
4139 
4140 	if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
4141 		return;
4142 
4143 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
4144 	memcpy(psp->fw_pri_buf, start_addr, bin_size);
4145 
4146 	drm_dev_exit(idx);
4147 }
4148 
4149 /**
4150  * DOC: usbc_pd_fw
4151  * Reading from this file will retrieve the USB-C PD firmware version. Writing to
4152  * this file will trigger the update process.
4153  */
4154 static DEVICE_ATTR(usbc_pd_fw, 0644,
4155 		   psp_usbc_pd_fw_sysfs_read,
4156 		   psp_usbc_pd_fw_sysfs_write);
4157 
4158 int is_psp_fw_valid(struct psp_bin_desc bin)
4159 {
4160 	return bin.size_bytes;
4161 }
4162 
4163 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
4164 					const struct bin_attribute *bin_attr,
4165 					char *buffer, loff_t pos, size_t count)
4166 {
4167 	struct device *dev = kobj_to_dev(kobj);
4168 	struct drm_device *ddev = dev_get_drvdata(dev);
4169 	struct amdgpu_device *adev = drm_to_adev(ddev);
4170 
4171 	adev->psp.vbflash_done = false;
4172 
4173 	/* Safeguard against memory drain */
4174 	if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
4175 		dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
4176 		kvfree(adev->psp.vbflash_tmp_buf);
4177 		adev->psp.vbflash_tmp_buf = NULL;
4178 		adev->psp.vbflash_image_size = 0;
4179 		return -ENOMEM;
4180 	}
4181 
4182 	/* TODO Just allocate max for now and optimize to realloc later if needed */
4183 	if (!adev->psp.vbflash_tmp_buf) {
4184 		adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
4185 		if (!adev->psp.vbflash_tmp_buf)
4186 			return -ENOMEM;
4187 	}
4188 
4189 	mutex_lock(&adev->psp.mutex);
4190 	memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
4191 	adev->psp.vbflash_image_size += count;
4192 	mutex_unlock(&adev->psp.mutex);
4193 
4194 	dev_dbg(adev->dev, "IFWI staged for update\n");
4195 
4196 	return count;
4197 }
4198 
4199 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
4200 				       const struct bin_attribute *bin_attr, char *buffer,
4201 				       loff_t pos, size_t count)
4202 {
4203 	struct device *dev = kobj_to_dev(kobj);
4204 	struct drm_device *ddev = dev_get_drvdata(dev);
4205 	struct amdgpu_device *adev = drm_to_adev(ddev);
4206 	struct amdgpu_bo *fw_buf_bo = NULL;
4207 	uint64_t fw_pri_mc_addr;
4208 	void *fw_pri_cpu_addr;
4209 	int ret;
4210 
4211 	if (adev->psp.vbflash_image_size == 0)
4212 		return -EINVAL;
4213 
4214 	dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
4215 
4216 	ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
4217 					AMDGPU_GPU_PAGE_SIZE,
4218 					AMDGPU_GEM_DOMAIN_VRAM,
4219 					&fw_buf_bo,
4220 					&fw_pri_mc_addr,
4221 					&fw_pri_cpu_addr);
4222 	if (ret)
4223 		goto rel_buf;
4224 
4225 	memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
4226 
4227 	mutex_lock(&adev->psp.mutex);
4228 	ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
4229 	mutex_unlock(&adev->psp.mutex);
4230 
4231 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4232 
4233 rel_buf:
4234 	kvfree(adev->psp.vbflash_tmp_buf);
4235 	adev->psp.vbflash_tmp_buf = NULL;
4236 	adev->psp.vbflash_image_size = 0;
4237 
4238 	if (ret) {
4239 		dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
4240 		return ret;
4241 	}
4242 
4243 	dev_dbg(adev->dev, "PSP IFWI flash process done\n");
4244 	return 0;
4245 }
4246 
4247 /**
4248  * DOC: psp_vbflash
4249  * Writing to this file will stage an IFWI for update. Reading from this file
4250  * will trigger the update process.
4251  */
4252 static const struct bin_attribute psp_vbflash_bin_attr = {
4253 	.attr = {.name = "psp_vbflash", .mode = 0660},
4254 	.size = 0,
4255 	.write_new = amdgpu_psp_vbflash_write,
4256 	.read_new = amdgpu_psp_vbflash_read,
4257 };
4258 
4259 /**
4260  * DOC: psp_vbflash_status
4261  * The status of the flash process.
4262  * 0: IFWI flash not complete.
4263  * 1: IFWI flash complete.
4264  */
4265 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
4266 					 struct device_attribute *attr,
4267 					 char *buf)
4268 {
4269 	struct drm_device *ddev = dev_get_drvdata(dev);
4270 	struct amdgpu_device *adev = drm_to_adev(ddev);
4271 	uint32_t vbflash_status;
4272 
4273 	vbflash_status = psp_vbflash_status(&adev->psp);
4274 	if (!adev->psp.vbflash_done)
4275 		vbflash_status = 0;
4276 	else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
4277 		vbflash_status = 1;
4278 
4279 	return sysfs_emit(buf, "0x%x\n", vbflash_status);
4280 }
4281 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
4282 
4283 static const struct bin_attribute *const bin_flash_attrs[] = {
4284 	&psp_vbflash_bin_attr,
4285 	NULL
4286 };
4287 
4288 static struct attribute *flash_attrs[] = {
4289 	&dev_attr_psp_vbflash_status.attr,
4290 	&dev_attr_usbc_pd_fw.attr,
4291 	NULL
4292 };
4293 
4294 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
4295 {
4296 	struct device *dev = kobj_to_dev(kobj);
4297 	struct drm_device *ddev = dev_get_drvdata(dev);
4298 	struct amdgpu_device *adev = drm_to_adev(ddev);
4299 
4300 	if (attr == &dev_attr_usbc_pd_fw.attr)
4301 		return adev->psp.sup_pd_fw_up ? 0660 : 0;
4302 
4303 	return adev->psp.sup_ifwi_up ? 0440 : 0;
4304 }
4305 
4306 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
4307 						const struct bin_attribute *attr,
4308 						int idx)
4309 {
4310 	struct device *dev = kobj_to_dev(kobj);
4311 	struct drm_device *ddev = dev_get_drvdata(dev);
4312 	struct amdgpu_device *adev = drm_to_adev(ddev);
4313 
4314 	return adev->psp.sup_ifwi_up ? 0660 : 0;
4315 }
4316 
4317 const struct attribute_group amdgpu_flash_attr_group = {
4318 	.attrs = flash_attrs,
4319 	.bin_attrs_new = bin_flash_attrs,
4320 	.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4321 	.is_visible = amdgpu_flash_attr_is_visible,
4322 };
4323 
4324 #if defined(CONFIG_DEBUG_FS)
4325 static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
4326 {
4327 	struct amdgpu_device *adev = filp->f_inode->i_private;
4328 	struct spirom_bo *bo_triplet;
4329 	int ret;
4330 
4331 	/* serialize the open() file calling */
4332 	if (!mutex_trylock(&adev->psp.mutex))
4333 		return -EBUSY;
4334 
4335 	/*
4336 	 * make sure only one userpace process is alive for dumping so that
4337 	 * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
4338 	 * let's say the case where one process try opening the file while
4339 	 * another one has proceeded to read or release. In this way, eliminate
4340 	 * the use of mutex for read() or release() callback as well.
4341 	 */
4342 	if (adev->psp.spirom_dump_trip) {
4343 		mutex_unlock(&adev->psp.mutex);
4344 		return -EBUSY;
4345 	}
4346 
4347 	bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
4348 	if (!bo_triplet) {
4349 		mutex_unlock(&adev->psp.mutex);
4350 		return -ENOMEM;
4351 	}
4352 
4353 	ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
4354 				      AMDGPU_GPU_PAGE_SIZE,
4355 				      AMDGPU_GEM_DOMAIN_GTT,
4356 				      &bo_triplet->bo,
4357 				      &bo_triplet->mc_addr,
4358 				      &bo_triplet->cpu_addr);
4359 	if (ret)
4360 		goto rel_trip;
4361 
4362 	ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
4363 	if (ret)
4364 		goto rel_bo;
4365 
4366 	adev->psp.spirom_dump_trip = bo_triplet;
4367 	mutex_unlock(&adev->psp.mutex);
4368 	return 0;
4369 rel_bo:
4370 	amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4371 			      &bo_triplet->cpu_addr);
4372 rel_trip:
4373 	kfree(bo_triplet);
4374 	mutex_unlock(&adev->psp.mutex);
4375 	dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
4376 	return ret;
4377 }
4378 
4379 static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
4380 					    loff_t *pos)
4381 {
4382 	struct amdgpu_device *adev = filp->f_inode->i_private;
4383 	struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4384 
4385 	if (!bo_triplet)
4386 		return -EINVAL;
4387 
4388 	return simple_read_from_buffer(buf,
4389 				       size,
4390 				       pos, bo_triplet->cpu_addr,
4391 				       AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4392 }
4393 
4394 static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
4395 {
4396 	struct amdgpu_device *adev = filp->f_inode->i_private;
4397 	struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4398 
4399 	if (bo_triplet) {
4400 		amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4401 				      &bo_triplet->cpu_addr);
4402 		kfree(bo_triplet);
4403 	}
4404 
4405 	adev->psp.spirom_dump_trip = NULL;
4406 	return 0;
4407 }
4408 
4409 static const struct file_operations psp_dump_spirom_debugfs_ops = {
4410 	.owner = THIS_MODULE,
4411 	.open = psp_read_spirom_debugfs_open,
4412 	.read = psp_read_spirom_debugfs_read,
4413 	.release = psp_read_spirom_debugfs_release,
4414 	.llseek = default_llseek,
4415 };
4416 #endif
4417 
4418 void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
4419 {
4420 #if defined(CONFIG_DEBUG_FS)
4421 	struct drm_minor *minor = adev_to_drm(adev)->primary;
4422 
4423 	debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
4424 				 adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4425 #endif
4426 }
4427 
4428 const struct amd_ip_funcs psp_ip_funcs = {
4429 	.name = "psp",
4430 	.early_init = psp_early_init,
4431 	.sw_init = psp_sw_init,
4432 	.sw_fini = psp_sw_fini,
4433 	.hw_init = psp_hw_init,
4434 	.hw_fini = psp_hw_fini,
4435 	.suspend = psp_suspend,
4436 	.resume = psp_resume,
4437 	.set_clockgating_state = psp_set_clockgating_state,
4438 	.set_powergating_state = psp_set_powergating_state,
4439 };
4440 
4441 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4442 	.type = AMD_IP_BLOCK_TYPE_PSP,
4443 	.major = 3,
4444 	.minor = 1,
4445 	.rev = 0,
4446 	.funcs = &psp_ip_funcs,
4447 };
4448 
4449 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4450 	.type = AMD_IP_BLOCK_TYPE_PSP,
4451 	.major = 10,
4452 	.minor = 0,
4453 	.rev = 0,
4454 	.funcs = &psp_ip_funcs,
4455 };
4456 
4457 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4458 	.type = AMD_IP_BLOCK_TYPE_PSP,
4459 	.major = 11,
4460 	.minor = 0,
4461 	.rev = 0,
4462 	.funcs = &psp_ip_funcs,
4463 };
4464 
4465 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4466 	.type = AMD_IP_BLOCK_TYPE_PSP,
4467 	.major = 11,
4468 	.minor = 0,
4469 	.rev = 8,
4470 	.funcs = &psp_ip_funcs,
4471 };
4472 
4473 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4474 	.type = AMD_IP_BLOCK_TYPE_PSP,
4475 	.major = 12,
4476 	.minor = 0,
4477 	.rev = 0,
4478 	.funcs = &psp_ip_funcs,
4479 };
4480 
4481 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4482 	.type = AMD_IP_BLOCK_TYPE_PSP,
4483 	.major = 13,
4484 	.minor = 0,
4485 	.rev = 0,
4486 	.funcs = &psp_ip_funcs,
4487 };
4488 
4489 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4490 	.type = AMD_IP_BLOCK_TYPE_PSP,
4491 	.major = 13,
4492 	.minor = 0,
4493 	.rev = 4,
4494 	.funcs = &psp_ip_funcs,
4495 };
4496 
4497 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4498 	.type = AMD_IP_BLOCK_TYPE_PSP,
4499 	.major = 14,
4500 	.minor = 0,
4501 	.rev = 0,
4502 	.funcs = &psp_ip_funcs,
4503 };
4504