xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c (revision c7062be3380cb20c8b1c4a935a13f1848ead0719)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42 #include "psp_v15_0_8.h"
43 
44 #include "amdgpu_ras.h"
45 #include "amdgpu_securedisplay.h"
46 #include "amdgpu_atomfirmware.h"
47 
48 #define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*16)
49 
50 static int psp_load_smu_fw(struct psp_context *psp);
51 static int psp_rap_terminate(struct psp_context *psp);
52 static int psp_securedisplay_terminate(struct psp_context *psp);
53 
54 static int psp_ring_init(struct psp_context *psp,
55 			 enum psp_ring_type ring_type)
56 {
57 	int ret = 0;
58 	struct psp_ring *ring;
59 	struct amdgpu_device *adev = psp->adev;
60 
61 	ring = &psp->km_ring;
62 
63 	ring->ring_type = ring_type;
64 
65 	/* allocate 4k Page of Local Frame Buffer memory for ring */
66 	ring->ring_size = 0x1000;
67 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
68 				      AMDGPU_GEM_DOMAIN_VRAM |
69 				      AMDGPU_GEM_DOMAIN_GTT,
70 				      &adev->firmware.rbuf,
71 				      &ring->ring_mem_mc_addr,
72 				      (void **)&ring->ring_mem);
73 	if (ret) {
74 		ring->ring_size = 0;
75 		return ret;
76 	}
77 
78 	return 0;
79 }
80 
81 /*
82  * Due to DF Cstate management centralized to PMFW, the firmware
83  * loading sequence will be updated as below:
84  *   - Load KDB
85  *   - Load SYS_DRV
86  *   - Load tOS
87  *   - Load PMFW
88  *   - Setup TMR
89  *   - Load other non-psp fw
90  *   - Load ASD
91  *   - Load XGMI/RAS/HDCP/DTM TA if any
92  *
93  * This new sequence is required for
94  *   - Arcturus and onwards
95  */
96 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
97 {
98 	struct amdgpu_device *adev = psp->adev;
99 
100 	if (amdgpu_sriov_vf(adev)) {
101 		psp->pmfw_centralized_cstate_management = false;
102 		return;
103 	}
104 
105 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
106 	case IP_VERSION(11, 0, 0):
107 	case IP_VERSION(11, 0, 4):
108 	case IP_VERSION(11, 0, 5):
109 	case IP_VERSION(11, 0, 7):
110 	case IP_VERSION(11, 0, 9):
111 	case IP_VERSION(11, 0, 11):
112 	case IP_VERSION(11, 0, 12):
113 	case IP_VERSION(11, 0, 13):
114 	case IP_VERSION(13, 0, 0):
115 	case IP_VERSION(13, 0, 2):
116 	case IP_VERSION(13, 0, 7):
117 		psp->pmfw_centralized_cstate_management = true;
118 		break;
119 	default:
120 		psp->pmfw_centralized_cstate_management = false;
121 		break;
122 	}
123 }
124 
125 static int psp_init_sriov_microcode(struct psp_context *psp)
126 {
127 	struct amdgpu_device *adev = psp->adev;
128 	char ucode_prefix[30];
129 	int ret = 0;
130 
131 	amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
132 
133 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
134 	case IP_VERSION(9, 0, 0):
135 	case IP_VERSION(11, 0, 7):
136 	case IP_VERSION(11, 0, 9):
137 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
138 		ret = psp_init_cap_microcode(psp, ucode_prefix);
139 		break;
140 	case IP_VERSION(13, 0, 2):
141 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
142 		ret = psp_init_cap_microcode(psp, ucode_prefix);
143 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
144 		break;
145 	case IP_VERSION(13, 0, 0):
146 		adev->virt.autoload_ucode_id = 0;
147 		break;
148 	case IP_VERSION(13, 0, 6):
149 	case IP_VERSION(13, 0, 14):
150 		ret = psp_init_cap_microcode(psp, ucode_prefix);
151 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
152 		break;
153 	case IP_VERSION(13, 0, 10):
154 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
155 		ret = psp_init_cap_microcode(psp, ucode_prefix);
156 		break;
157 	case IP_VERSION(13, 0, 12):
158 		ret = psp_init_ta_microcode(psp, ucode_prefix);
159 		break;
160 	default:
161 		return -EINVAL;
162 	}
163 	return ret;
164 }
165 
166 static int psp_early_init(struct amdgpu_ip_block *ip_block)
167 {
168 	struct amdgpu_device *adev = ip_block->adev;
169 	struct psp_context *psp = &adev->psp;
170 
171 	psp->autoload_supported = true;
172 	psp->boot_time_tmr = true;
173 
174 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
175 	case IP_VERSION(9, 0, 0):
176 		psp_v3_1_set_psp_funcs(psp);
177 		psp->autoload_supported = false;
178 		psp->boot_time_tmr = false;
179 		break;
180 	case IP_VERSION(10, 0, 0):
181 	case IP_VERSION(10, 0, 1):
182 		psp_v10_0_set_psp_funcs(psp);
183 		psp->autoload_supported = false;
184 		psp->boot_time_tmr = false;
185 		break;
186 	case IP_VERSION(11, 0, 2):
187 	case IP_VERSION(11, 0, 4):
188 		psp_v11_0_set_psp_funcs(psp);
189 		psp->autoload_supported = false;
190 		psp->boot_time_tmr = false;
191 		break;
192 	case IP_VERSION(11, 0, 0):
193 	case IP_VERSION(11, 0, 7):
194 		adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
195 		fallthrough;
196 	case IP_VERSION(11, 0, 5):
197 	case IP_VERSION(11, 0, 9):
198 	case IP_VERSION(11, 0, 11):
199 	case IP_VERSION(11, 5, 0):
200 	case IP_VERSION(11, 5, 2):
201 	case IP_VERSION(11, 0, 12):
202 	case IP_VERSION(11, 0, 13):
203 		psp_v11_0_set_psp_funcs(psp);
204 		psp->boot_time_tmr = false;
205 		break;
206 	case IP_VERSION(11, 0, 3):
207 	case IP_VERSION(12, 0, 1):
208 		psp_v12_0_set_psp_funcs(psp);
209 		psp->autoload_supported = false;
210 		psp->boot_time_tmr = false;
211 		break;
212 	case IP_VERSION(13, 0, 2):
213 		psp->boot_time_tmr = false;
214 		fallthrough;
215 	case IP_VERSION(13, 0, 6):
216 	case IP_VERSION(13, 0, 14):
217 		psp_v13_0_set_psp_funcs(psp);
218 		psp->autoload_supported = false;
219 		break;
220 	case IP_VERSION(13, 0, 12):
221 		psp_v13_0_set_psp_funcs(psp);
222 		psp->autoload_supported = false;
223 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
224 		break;
225 	case IP_VERSION(13, 0, 1):
226 	case IP_VERSION(13, 0, 3):
227 	case IP_VERSION(13, 0, 5):
228 	case IP_VERSION(13, 0, 8):
229 	case IP_VERSION(13, 0, 11):
230 	case IP_VERSION(14, 0, 0):
231 	case IP_VERSION(14, 0, 1):
232 	case IP_VERSION(14, 0, 4):
233 		psp_v13_0_set_psp_funcs(psp);
234 		psp->boot_time_tmr = false;
235 		break;
236 	case IP_VERSION(11, 0, 8):
237 		if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
238 			psp_v11_0_8_set_psp_funcs(psp);
239 		}
240 		psp->autoload_supported = false;
241 		psp->boot_time_tmr = false;
242 		break;
243 	case IP_VERSION(13, 0, 0):
244 	case IP_VERSION(13, 0, 7):
245 	case IP_VERSION(13, 0, 10):
246 		psp_v13_0_set_psp_funcs(psp);
247 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
248 		psp->boot_time_tmr = false;
249 		break;
250 	case IP_VERSION(13, 0, 4):
251 		psp_v13_0_4_set_psp_funcs(psp);
252 		psp->boot_time_tmr = false;
253 		break;
254 	case IP_VERSION(14, 0, 2):
255 	case IP_VERSION(14, 0, 3):
256 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
257 		psp_v14_0_set_psp_funcs(psp);
258 		break;
259 	case IP_VERSION(14, 0, 5):
260 		psp_v14_0_set_psp_funcs(psp);
261 		psp->boot_time_tmr = false;
262 		break;
263 	case IP_VERSION(15, 0, 8):
264 		psp_v15_0_8_set_psp_funcs(psp);
265 		break;
266 	default:
267 		return -EINVAL;
268 	}
269 
270 	psp->adev = adev;
271 
272 	adev->psp_timeout = 20000;
273 
274 	psp_check_pmfw_centralized_cstate_management(psp);
275 
276 	if (amdgpu_sriov_vf(adev))
277 		return psp_init_sriov_microcode(psp);
278 	else
279 		return psp_init_microcode(psp);
280 }
281 
282 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
283 {
284 	amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
285 			      &mem_ctx->shared_buf);
286 	mem_ctx->shared_bo = NULL;
287 }
288 
289 static void psp_free_shared_bufs(struct psp_context *psp)
290 {
291 	void *tmr_buf;
292 	void **pptr;
293 
294 	/* free TMR memory buffer */
295 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
296 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
297 	psp->tmr_bo = NULL;
298 
299 	/* free xgmi shared memory */
300 	psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
301 
302 	/* free ras shared memory */
303 	psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
304 
305 	/* free hdcp shared memory */
306 	psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
307 
308 	/* free dtm shared memory */
309 	psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
310 
311 	/* free rap shared memory */
312 	psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
313 
314 	/* free securedisplay shared memory */
315 	psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
316 
317 
318 }
319 
320 static void psp_memory_training_fini(struct psp_context *psp)
321 {
322 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
323 
324 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
325 	kfree(ctx->sys_cache);
326 	ctx->sys_cache = NULL;
327 }
328 
329 static int psp_memory_training_init(struct psp_context *psp)
330 {
331 	int ret;
332 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
333 
334 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
335 		dev_dbg(psp->adev->dev, "memory training is not supported!\n");
336 		return 0;
337 	}
338 
339 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
340 	if (ctx->sys_cache == NULL) {
341 		dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
342 		ret = -ENOMEM;
343 		goto Err_out;
344 	}
345 
346 	dev_dbg(psp->adev->dev,
347 		"train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
348 		ctx->train_data_size,
349 		ctx->p2c_train_data_offset,
350 		ctx->c2p_train_data_offset);
351 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
352 	return 0;
353 
354 Err_out:
355 	psp_memory_training_fini(psp);
356 	return ret;
357 }
358 
359 /*
360  * Helper funciton to query psp runtime database entry
361  *
362  * @adev: amdgpu_device pointer
363  * @entry_type: the type of psp runtime database entry
364  * @db_entry: runtime database entry pointer
365  *
366  * Return false if runtime database doesn't exit or entry is invalid
367  * or true if the specific database entry is found, and copy to @db_entry
368  */
369 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
370 				     enum psp_runtime_entry_type entry_type,
371 				     void *db_entry)
372 {
373 	uint64_t db_header_pos, db_dir_pos;
374 	struct psp_runtime_data_header db_header = {0};
375 	struct psp_runtime_data_directory db_dir = {0};
376 	bool ret = false;
377 	int i;
378 
379 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
380 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
381 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
382 		return false;
383 
384 	db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
385 	db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
386 
387 	/* read runtime db header from vram */
388 	amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
389 			sizeof(struct psp_runtime_data_header), false);
390 
391 	if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
392 		/* runtime db doesn't exist, exit */
393 		dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
394 		return false;
395 	}
396 
397 	/* read runtime database entry from vram */
398 	amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
399 			sizeof(struct psp_runtime_data_directory), false);
400 
401 	if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
402 		/* invalid db entry count, exit */
403 		dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
404 		return false;
405 	}
406 
407 	/* look up for requested entry type */
408 	for (i = 0; i < db_dir.entry_count && !ret; i++) {
409 		if (db_dir.entry_list[i].entry_type == entry_type) {
410 			switch (entry_type) {
411 			case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
412 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
413 					/* invalid db entry size */
414 					dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
415 					return false;
416 				}
417 				/* read runtime database entry */
418 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
419 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
420 				ret = true;
421 				break;
422 			case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
423 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
424 					/* invalid db entry size */
425 					dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
426 					return false;
427 				}
428 				/* read runtime database entry */
429 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
430 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
431 				ret = true;
432 				break;
433 			default:
434 				ret = false;
435 				break;
436 			}
437 		}
438 	}
439 
440 	return ret;
441 }
442 
443 static int psp_sw_init(struct amdgpu_ip_block *ip_block)
444 {
445 	struct amdgpu_device *adev = ip_block->adev;
446 	struct psp_context *psp = &adev->psp;
447 	int ret;
448 	struct psp_runtime_boot_cfg_entry boot_cfg_entry;
449 	struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
450 	struct psp_runtime_scpm_entry scpm_entry;
451 
452 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
453 	if (!psp->cmd) {
454 		dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
455 		return -ENOMEM;
456 	}
457 
458 	adev->psp.xgmi_context.supports_extended_data =
459 		!adev->gmc.xgmi.connected_to_cpu &&
460 		amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
461 
462 	memset(&scpm_entry, 0, sizeof(scpm_entry));
463 	if ((psp_get_runtime_db_entry(adev,
464 				PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
465 				&scpm_entry)) &&
466 	    (scpm_entry.scpm_status != SCPM_DISABLE)) {
467 		adev->scpm_enabled = true;
468 		adev->scpm_status = scpm_entry.scpm_status;
469 	} else {
470 		adev->scpm_enabled = false;
471 		adev->scpm_status = SCPM_DISABLE;
472 	}
473 
474 	/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
475 
476 	memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
477 	if (psp_get_runtime_db_entry(adev,
478 				PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
479 				&boot_cfg_entry)) {
480 		psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
481 		if ((psp->boot_cfg_bitmask) &
482 		    BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
483 			/* If psp runtime database exists, then
484 			 * only enable two stage memory training
485 			 * when TWO_STAGE_DRAM_TRAINING bit is set
486 			 * in runtime database
487 			 */
488 			mem_training_ctx->enable_mem_training = true;
489 		}
490 
491 	} else {
492 		/* If psp runtime database doesn't exist or is
493 		 * invalid, force enable two stage memory training
494 		 */
495 		mem_training_ctx->enable_mem_training = true;
496 	}
497 
498 	if (mem_training_ctx->enable_mem_training) {
499 		ret = psp_memory_training_init(psp);
500 		if (ret) {
501 			dev_err(adev->dev, "Failed to initialize memory training!\n");
502 			return ret;
503 		}
504 
505 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
506 		if (ret) {
507 			dev_err(adev->dev, "Failed to process memory training!\n");
508 			return ret;
509 		}
510 	}
511 
512 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
513 				      (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
514 				      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
515 				      &psp->fw_pri_bo,
516 				      &psp->fw_pri_mc_addr,
517 				      &psp->fw_pri_buf);
518 	if (ret)
519 		return ret;
520 
521 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
522 				      AMDGPU_GEM_DOMAIN_VRAM |
523 				      AMDGPU_GEM_DOMAIN_GTT,
524 				      &psp->fence_buf_bo,
525 				      &psp->fence_buf_mc_addr,
526 				      &psp->fence_buf);
527 	if (ret)
528 		goto failed1;
529 
530 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
531 				      AMDGPU_GEM_DOMAIN_VRAM |
532 				      AMDGPU_GEM_DOMAIN_GTT,
533 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
534 				      (void **)&psp->cmd_buf_mem);
535 	if (ret)
536 		goto failed2;
537 
538 	return 0;
539 
540 failed2:
541 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
542 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
543 failed1:
544 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
545 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
546 	return ret;
547 }
548 
549 static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
550 {
551 	struct amdgpu_device *adev = ip_block->adev;
552 	struct psp_context *psp = &adev->psp;
553 
554 	psp_memory_training_fini(psp);
555 
556 	amdgpu_ucode_release(&psp->sos_fw);
557 	amdgpu_ucode_release(&psp->asd_fw);
558 	amdgpu_ucode_release(&psp->ta_fw);
559 	amdgpu_ucode_release(&psp->cap_fw);
560 	amdgpu_ucode_release(&psp->toc_fw);
561 
562 	kfree(psp->cmd);
563 	psp->cmd = NULL;
564 
565 	psp_free_shared_bufs(psp);
566 
567 	if (psp->km_ring.ring_mem)
568 		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
569 				      &psp->km_ring.ring_mem_mc_addr,
570 				      (void **)&psp->km_ring.ring_mem);
571 
572 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
573 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
574 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
575 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
576 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
577 			      (void **)&psp->cmd_buf_mem);
578 
579 	return 0;
580 }
581 
582 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val,
583 		 uint32_t mask, uint32_t flags)
584 {
585 	bool check_changed = flags & PSP_WAITREG_CHANGED;
586 	bool verbose = !(flags & PSP_WAITREG_NOVERBOSE);
587 	uint32_t val;
588 	int i;
589 	struct amdgpu_device *adev = psp->adev;
590 
591 	if (psp->adev->no_hw_access)
592 		return 0;
593 
594 	for (i = 0; i < adev->usec_timeout; i++) {
595 		val = RREG32(reg_index);
596 		if (check_changed) {
597 			if (val != reg_val)
598 				return 0;
599 		} else {
600 			if ((val & mask) == reg_val)
601 				return 0;
602 		}
603 		udelay(1);
604 	}
605 
606 	if (verbose)
607 		dev_err(adev->dev,
608 			"psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x",
609 			reg_index, mask, val, reg_val);
610 
611 	return -ETIME;
612 }
613 
614 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
615 			       uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
616 {
617 	uint32_t val;
618 	int i;
619 	struct amdgpu_device *adev = psp->adev;
620 
621 	if (psp->adev->no_hw_access)
622 		return 0;
623 
624 	for (i = 0; i < msec_timeout; i++) {
625 		val = RREG32(reg_index);
626 		if ((val & mask) == reg_val)
627 			return 0;
628 		msleep(1);
629 	}
630 
631 	return -ETIME;
632 }
633 
634 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
635 {
636 	switch (cmd_id) {
637 	case GFX_CMD_ID_LOAD_TA:
638 		return "LOAD_TA";
639 	case GFX_CMD_ID_UNLOAD_TA:
640 		return "UNLOAD_TA";
641 	case GFX_CMD_ID_INVOKE_CMD:
642 		return "INVOKE_CMD";
643 	case GFX_CMD_ID_LOAD_ASD:
644 		return "LOAD_ASD";
645 	case GFX_CMD_ID_SETUP_TMR:
646 		return "SETUP_TMR";
647 	case GFX_CMD_ID_LOAD_IP_FW:
648 		return "LOAD_IP_FW";
649 	case GFX_CMD_ID_DESTROY_TMR:
650 		return "DESTROY_TMR";
651 	case GFX_CMD_ID_SAVE_RESTORE:
652 		return "SAVE_RESTORE_IP_FW";
653 	case GFX_CMD_ID_SETUP_VMR:
654 		return "SETUP_VMR";
655 	case GFX_CMD_ID_DESTROY_VMR:
656 		return "DESTROY_VMR";
657 	case GFX_CMD_ID_PROG_REG:
658 		return "PROG_REG";
659 	case GFX_CMD_ID_GET_FW_ATTESTATION:
660 		return "GET_FW_ATTESTATION";
661 	case GFX_CMD_ID_LOAD_TOC:
662 		return "ID_LOAD_TOC";
663 	case GFX_CMD_ID_AUTOLOAD_RLC:
664 		return "AUTOLOAD_RLC";
665 	case GFX_CMD_ID_BOOT_CFG:
666 		return "BOOT_CFG";
667 	case GFX_CMD_ID_CONFIG_SQ_PERFMON:
668 		return "CONFIG_SQ_PERFMON";
669 	case GFX_CMD_ID_FB_FW_RESERV_ADDR:
670 		return "FB_FW_RESERV_ADDR";
671 	case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
672 		return "FB_FW_RESERV_EXT_ADDR";
673 	case GFX_CMD_ID_SRIOV_SPATIAL_PART:
674 		return "SPATIAL_PARTITION";
675 	case GFX_CMD_ID_FB_NPS_MODE:
676 		return "NPS_MODE_CHANGE";
677 	default:
678 		return "UNKNOWN CMD";
679 	}
680 }
681 
682 static bool psp_err_warn(struct psp_context *psp)
683 {
684 	struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
685 
686 	/* This response indicates reg list is already loaded */
687 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
688 	    cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
689 	    cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
690 	    cmd->resp.status == TEE_ERROR_CANCEL)
691 		return false;
692 
693 	return true;
694 }
695 
696 static int
697 psp_cmd_submit_buf(struct psp_context *psp,
698 		   struct amdgpu_firmware_info *ucode,
699 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
700 {
701 	int ret;
702 	int index;
703 	int timeout = psp->adev->psp_timeout;
704 	bool ras_intr = false;
705 	bool skip_unsupport = false;
706 
707 	if (psp->adev->no_hw_access)
708 		return 0;
709 
710 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
711 
712 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
713 
714 	index = atomic_inc_return(&psp->fence_value);
715 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
716 	if (ret) {
717 		atomic_dec(&psp->fence_value);
718 		goto exit;
719 	}
720 
721 	amdgpu_device_invalidate_hdp(psp->adev, NULL);
722 	while (*((unsigned int *)psp->fence_buf) != index) {
723 		if (--timeout == 0)
724 			break;
725 		/*
726 		 * Shouldn't wait for timeout when err_event_athub occurs,
727 		 * because gpu reset thread triggered and lock resource should
728 		 * be released for psp resume sequence.
729 		 */
730 		ras_intr = amdgpu_ras_intr_triggered();
731 		if (ras_intr)
732 			break;
733 		usleep_range(10, 100);
734 		amdgpu_device_invalidate_hdp(psp->adev, NULL);
735 	}
736 
737 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
738 	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
739 		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
740 
741 	memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
742 
743 	/* In some cases, psp response status is not 0 even there is no
744 	 * problem while the command is submitted. Some version of PSP FW
745 	 * doesn't write 0 to that field.
746 	 * So here we would like to only print a warning instead of an error
747 	 * during psp initialization to avoid breaking hw_init and it doesn't
748 	 * return -EINVAL.
749 	 */
750 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
751 		if (ucode)
752 			dev_warn(psp->adev->dev,
753 				 "failed to load ucode %s(0x%X) ",
754 				 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
755 		if (psp_err_warn(psp))
756 			dev_warn(
757 				psp->adev->dev,
758 				"psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
759 				psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
760 				psp->cmd_buf_mem->cmd_id,
761 				psp->cmd_buf_mem->resp.status);
762 		/* If any firmware (including CAP) load fails under SRIOV, it should
763 		 * return failure to stop the VF from initializing.
764 		 * Also return failure in case of timeout
765 		 */
766 		if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
767 			ret = -EINVAL;
768 			goto exit;
769 		}
770 	}
771 
772 	if (ucode) {
773 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
774 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
775 	}
776 
777 exit:
778 	return ret;
779 }
780 
781 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
782 {
783 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
784 
785 	mutex_lock(&psp->mutex);
786 
787 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
788 
789 	return cmd;
790 }
791 
792 static void release_psp_cmd_buf(struct psp_context *psp)
793 {
794 	mutex_unlock(&psp->mutex);
795 }
796 
797 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
798 				 struct psp_gfx_cmd_resp *cmd,
799 				 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
800 {
801 	struct amdgpu_device *adev = psp->adev;
802 	uint32_t size = 0;
803 	uint64_t tmr_pa = 0;
804 
805 	if (tmr_bo) {
806 		size = amdgpu_bo_size(tmr_bo);
807 		tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
808 	}
809 
810 	if (amdgpu_sriov_vf(psp->adev))
811 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
812 	else
813 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
814 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
815 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
816 	cmd->cmd.cmd_setup_tmr.buf_size = size;
817 	cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
818 	cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
819 	cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
820 }
821 
822 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
823 				      uint64_t pri_buf_mc, uint32_t size)
824 {
825 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
826 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
827 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
828 	cmd->cmd.cmd_load_toc.toc_size = size;
829 }
830 
831 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
832 static int psp_load_toc(struct psp_context *psp,
833 			uint32_t *tmr_size)
834 {
835 	int ret;
836 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
837 
838 	/* Copy toc to psp firmware private buffer */
839 	psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
840 
841 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
842 
843 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
844 				 psp->fence_buf_mc_addr);
845 	if (!ret)
846 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
847 
848 	release_psp_cmd_buf(psp);
849 
850 	return ret;
851 }
852 
853 /* Set up Trusted Memory Region */
854 static int psp_tmr_init(struct psp_context *psp)
855 {
856 	int ret = 0;
857 	int tmr_size;
858 	void *tmr_buf;
859 	void **pptr;
860 
861 	/*
862 	 * According to HW engineer, they prefer the TMR address be "naturally
863 	 * aligned" , e.g. the start address be an integer divide of TMR size.
864 	 *
865 	 * Note: this memory need be reserved till the driver
866 	 * uninitializes.
867 	 */
868 	tmr_size = PSP_TMR_SIZE(psp->adev);
869 
870 	/* For ASICs support RLC autoload, psp will parse the toc
871 	 * and calculate the total size of TMR needed
872 	 */
873 	if (!amdgpu_sriov_vf(psp->adev) &&
874 	    psp->toc.start_addr &&
875 	    psp->toc.size_bytes &&
876 	    psp->fw_pri_buf) {
877 		ret = psp_load_toc(psp, &tmr_size);
878 		if (ret) {
879 			dev_err(psp->adev->dev, "Failed to load toc\n");
880 			return ret;
881 		}
882 	}
883 
884 	if (!psp->tmr_bo && !psp->boot_time_tmr) {
885 		pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
886 		ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
887 					      PSP_TMR_ALIGNMENT,
888 					      AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM,
889 					      &psp->tmr_bo, &psp->tmr_mc_addr,
890 					      pptr);
891 	}
892 	if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
893 		psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
894 
895 	return ret;
896 }
897 
898 static bool psp_skip_tmr(struct psp_context *psp)
899 {
900 	switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
901 	case IP_VERSION(11, 0, 9):
902 	case IP_VERSION(11, 0, 7):
903 	case IP_VERSION(13, 0, 2):
904 	case IP_VERSION(13, 0, 6):
905 	case IP_VERSION(13, 0, 10):
906 	case IP_VERSION(13, 0, 12):
907 	case IP_VERSION(13, 0, 14):
908 	case IP_VERSION(15, 0, 8):
909 		return true;
910 	default:
911 		return false;
912 	}
913 }
914 
915 static int psp_tmr_load(struct psp_context *psp)
916 {
917 	int ret;
918 	struct psp_gfx_cmd_resp *cmd;
919 
920 	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
921 	 * Already set up by host driver.
922 	 */
923 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
924 		return 0;
925 
926 	cmd = acquire_psp_cmd_buf(psp);
927 
928 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
929 	if (psp->tmr_bo)
930 		dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
931 			 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
932 
933 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
934 				 psp->fence_buf_mc_addr);
935 
936 	release_psp_cmd_buf(psp);
937 
938 	return ret;
939 }
940 
941 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
942 					struct psp_gfx_cmd_resp *cmd)
943 {
944 	if (amdgpu_sriov_vf(psp->adev))
945 		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
946 	else
947 		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
948 }
949 
950 static int psp_tmr_unload(struct psp_context *psp)
951 {
952 	int ret;
953 	struct psp_gfx_cmd_resp *cmd;
954 
955 	/* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
956 	 * as TMR is not loaded at all
957 	 */
958 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
959 		return 0;
960 
961 	cmd = acquire_psp_cmd_buf(psp);
962 
963 	psp_prep_tmr_unload_cmd_buf(psp, cmd);
964 	dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
965 
966 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
967 				 psp->fence_buf_mc_addr);
968 
969 	release_psp_cmd_buf(psp);
970 
971 	return ret;
972 }
973 
974 static int psp_tmr_terminate(struct psp_context *psp)
975 {
976 	return psp_tmr_unload(psp);
977 }
978 
979 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
980 					uint64_t *output_ptr)
981 {
982 	int ret;
983 	struct psp_gfx_cmd_resp *cmd;
984 
985 	if (!output_ptr)
986 		return -EINVAL;
987 
988 	if (amdgpu_sriov_vf(psp->adev))
989 		return 0;
990 
991 	cmd = acquire_psp_cmd_buf(psp);
992 
993 	cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
994 
995 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
996 				 psp->fence_buf_mc_addr);
997 
998 	if (!ret) {
999 		*output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
1000 			      ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
1001 	}
1002 
1003 	release_psp_cmd_buf(psp);
1004 
1005 	return ret;
1006 }
1007 
1008 static int psp_get_fw_reservation_info(struct psp_context *psp,
1009 						   uint32_t cmd_id,
1010 						   uint64_t *addr,
1011 						   uint32_t *size)
1012 {
1013 	int ret;
1014 	uint32_t status;
1015 	struct psp_gfx_cmd_resp *cmd;
1016 
1017 	cmd = acquire_psp_cmd_buf(psp);
1018 
1019 	cmd->cmd_id = cmd_id;
1020 
1021 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1022 				 psp->fence_buf_mc_addr);
1023 	if (ret) {
1024 		release_psp_cmd_buf(psp);
1025 		return ret;
1026 	}
1027 
1028 	status = cmd->resp.status;
1029 	if (status == PSP_ERR_UNKNOWN_COMMAND) {
1030 		release_psp_cmd_buf(psp);
1031 		*addr = 0;
1032 		*size = 0;
1033 		return 0;
1034 	}
1035 
1036 	*addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 |
1037 		cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo;
1038 	*size = cmd->resp.uresp.fw_reserve_info.reserve_size;
1039 
1040 	release_psp_cmd_buf(psp);
1041 
1042 	return 0;
1043 }
1044 
1045 int psp_update_fw_reservation(struct psp_context *psp)
1046 {
1047 	int ret;
1048 	uint64_t reserv_addr, reserv_addr_ext;
1049 	uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
1050 	struct amdgpu_device *adev = psp->adev;
1051 
1052 	mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
1053 
1054 	if (amdgpu_sriov_vf(psp->adev))
1055 		return 0;
1056 
1057 	switch (mp0_ip_ver) {
1058 	case IP_VERSION(14, 0, 2):
1059 		if (adev->psp.sos.fw_version < 0x3b0e0d)
1060 			return 0;
1061 		break;
1062 
1063 	case IP_VERSION(14, 0, 3):
1064 		if (adev->psp.sos.fw_version < 0x3a0e14)
1065 			return 0;
1066 		break;
1067 
1068 	default:
1069 		return 0;
1070 	}
1071 
1072 	ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
1073 	if (ret)
1074 		return ret;
1075 	ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
1076 	if (ret)
1077 		return ret;
1078 
1079 	if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
1080 		dev_warn(adev->dev, "reserve fw region is not valid!\n");
1081 		return 0;
1082 	}
1083 
1084 	amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
1085 
1086 	reserv_size = roundup(reserv_size, SZ_1M);
1087 
1088 	ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL);
1089 	if (ret) {
1090 		dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret);
1091 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
1092 		return ret;
1093 	}
1094 
1095 	reserv_size_ext = roundup(reserv_size_ext, SZ_1M);
1096 
1097 	ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext,
1098 					 &adev->mman.fw_reserved_memory_extend, NULL);
1099 	if (ret) {
1100 		dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret);
1101 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL);
1102 		return ret;
1103 	}
1104 
1105 	return 0;
1106 }
1107 
1108 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
1109 {
1110 	struct psp_context *psp = &adev->psp;
1111 	struct psp_gfx_cmd_resp *cmd;
1112 	int ret;
1113 
1114 	if (amdgpu_sriov_vf(adev))
1115 		return 0;
1116 
1117 	cmd = acquire_psp_cmd_buf(psp);
1118 
1119 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1120 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
1121 
1122 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1123 	if (!ret) {
1124 		*boot_cfg =
1125 			(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
1126 	}
1127 
1128 	release_psp_cmd_buf(psp);
1129 
1130 	return ret;
1131 }
1132 
1133 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
1134 {
1135 	int ret;
1136 	struct psp_context *psp = &adev->psp;
1137 	struct psp_gfx_cmd_resp *cmd;
1138 
1139 	if (amdgpu_sriov_vf(adev))
1140 		return 0;
1141 
1142 	cmd = acquire_psp_cmd_buf(psp);
1143 
1144 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1145 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1146 	cmd->cmd.boot_cfg.boot_config = boot_cfg;
1147 	cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1148 
1149 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1150 
1151 	release_psp_cmd_buf(psp);
1152 
1153 	return ret;
1154 }
1155 
1156 static int psp_rl_load(struct amdgpu_device *adev)
1157 {
1158 	int ret;
1159 	struct psp_context *psp = &adev->psp;
1160 	struct psp_gfx_cmd_resp *cmd;
1161 
1162 	if (!is_psp_fw_valid(psp->rl))
1163 		return 0;
1164 
1165 	cmd = acquire_psp_cmd_buf(psp);
1166 
1167 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1168 	memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1169 
1170 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1171 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1172 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1173 	cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1174 	cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1175 
1176 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1177 
1178 	release_psp_cmd_buf(psp);
1179 
1180 	return ret;
1181 }
1182 
1183 int psp_memory_partition(struct psp_context *psp, int mode)
1184 {
1185 	struct psp_gfx_cmd_resp *cmd;
1186 	int ret;
1187 
1188 	if (amdgpu_sriov_vf(psp->adev))
1189 		return 0;
1190 
1191 	cmd = acquire_psp_cmd_buf(psp);
1192 
1193 	cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
1194 	cmd->cmd.cmd_memory_part.mode = mode;
1195 
1196 	dev_info(psp->adev->dev,
1197 		 "Requesting %d memory partition change through PSP", mode);
1198 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1199 	if (ret)
1200 		dev_err(psp->adev->dev,
1201 			"PSP request failed to change to NPS%d mode\n", mode);
1202 
1203 	release_psp_cmd_buf(psp);
1204 
1205 	return ret;
1206 }
1207 
1208 int psp_spatial_partition(struct psp_context *psp, int mode)
1209 {
1210 	struct psp_gfx_cmd_resp *cmd;
1211 	int ret;
1212 
1213 	if (amdgpu_sriov_vf(psp->adev))
1214 		return 0;
1215 
1216 	cmd = acquire_psp_cmd_buf(psp);
1217 
1218 	cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1219 	cmd->cmd.cmd_spatial_part.mode = mode;
1220 
1221 	dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1222 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1223 
1224 	release_psp_cmd_buf(psp);
1225 
1226 	return ret;
1227 }
1228 
1229 static int psp_asd_initialize(struct psp_context *psp)
1230 {
1231 	int ret;
1232 
1233 	/* If PSP version doesn't match ASD version, asd loading will be failed.
1234 	 * add workaround to bypass it for sriov now.
1235 	 * TODO: add version check to make it common
1236 	 */
1237 	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1238 		return 0;
1239 
1240 	/* bypass asd if display hardware is not available */
1241 	if (!amdgpu_device_has_display_hardware(psp->adev) &&
1242 	    amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1243 		return 0;
1244 
1245 	psp->asd_context.mem_context.shared_mc_addr  = 0;
1246 	psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1247 	psp->asd_context.ta_load_type                = GFX_CMD_ID_LOAD_ASD;
1248 
1249 	ret = psp_ta_load(psp, &psp->asd_context);
1250 	if (!ret)
1251 		psp->asd_context.initialized = true;
1252 
1253 	return ret;
1254 }
1255 
1256 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1257 				       uint32_t session_id)
1258 {
1259 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1260 	cmd->cmd.cmd_unload_ta.session_id = session_id;
1261 }
1262 
1263 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1264 {
1265 	int ret;
1266 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1267 
1268 	psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1269 
1270 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1271 
1272 	context->resp_status = cmd->resp.status;
1273 
1274 	release_psp_cmd_buf(psp);
1275 
1276 	return ret;
1277 }
1278 
1279 static int psp_asd_terminate(struct psp_context *psp)
1280 {
1281 	int ret;
1282 
1283 	if (amdgpu_sriov_vf(psp->adev))
1284 		return 0;
1285 
1286 	if (!psp->asd_context.initialized)
1287 		return 0;
1288 
1289 	ret = psp_ta_unload(psp, &psp->asd_context);
1290 	if (!ret)
1291 		psp->asd_context.initialized = false;
1292 
1293 	return ret;
1294 }
1295 
1296 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1297 		uint32_t id, uint32_t value)
1298 {
1299 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1300 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1301 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1302 }
1303 
1304 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1305 		uint32_t value)
1306 {
1307 	struct psp_gfx_cmd_resp *cmd;
1308 	int ret = 0;
1309 
1310 	if (reg >= PSP_REG_LAST)
1311 		return -EINVAL;
1312 
1313 	cmd = acquire_psp_cmd_buf(psp);
1314 
1315 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1316 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1317 	if (ret)
1318 		dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1319 
1320 	release_psp_cmd_buf(psp);
1321 
1322 	return ret;
1323 }
1324 
1325 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1326 				     uint64_t ta_bin_mc,
1327 				     struct ta_context *context)
1328 {
1329 	cmd->cmd_id				= context->ta_load_type;
1330 	cmd->cmd.cmd_load_ta.app_phy_addr_lo	= lower_32_bits(ta_bin_mc);
1331 	cmd->cmd.cmd_load_ta.app_phy_addr_hi	= upper_32_bits(ta_bin_mc);
1332 	cmd->cmd.cmd_load_ta.app_len		= context->bin_desc.size_bytes;
1333 
1334 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1335 		lower_32_bits(context->mem_context.shared_mc_addr);
1336 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1337 		upper_32_bits(context->mem_context.shared_mc_addr);
1338 	cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1339 }
1340 
1341 int psp_ta_init_shared_buf(struct psp_context *psp,
1342 				  struct ta_mem_context *mem_ctx)
1343 {
1344 	/*
1345 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1346 	 * physical) for ta to host memory
1347 	 */
1348 	return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1349 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1350 				      AMDGPU_GEM_DOMAIN_GTT,
1351 				      &mem_ctx->shared_bo,
1352 				      &mem_ctx->shared_mc_addr,
1353 				      &mem_ctx->shared_buf);
1354 }
1355 
1356 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1357 				       uint32_t ta_cmd_id,
1358 				       uint32_t session_id)
1359 {
1360 	cmd->cmd_id				= GFX_CMD_ID_INVOKE_CMD;
1361 	cmd->cmd.cmd_invoke_cmd.session_id	= session_id;
1362 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id	= ta_cmd_id;
1363 }
1364 
1365 int psp_ta_invoke(struct psp_context *psp,
1366 		  uint32_t ta_cmd_id,
1367 		  struct ta_context *context)
1368 {
1369 	int ret;
1370 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1371 
1372 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1373 
1374 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1375 				 psp->fence_buf_mc_addr);
1376 
1377 	context->resp_status = cmd->resp.status;
1378 
1379 	release_psp_cmd_buf(psp);
1380 
1381 	return ret;
1382 }
1383 
1384 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1385 {
1386 	int ret;
1387 	struct psp_gfx_cmd_resp *cmd;
1388 
1389 	cmd = acquire_psp_cmd_buf(psp);
1390 
1391 	psp_copy_fw(psp, context->bin_desc.start_addr,
1392 		    context->bin_desc.size_bytes);
1393 
1394 	if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
1395 		context->mem_context.shared_bo)
1396 		context->mem_context.shared_mc_addr =
1397 			amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
1398 
1399 	psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1400 
1401 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1402 				 psp->fence_buf_mc_addr);
1403 
1404 	context->resp_status = cmd->resp.status;
1405 
1406 	if (!ret)
1407 		context->session_id = cmd->resp.session_id;
1408 
1409 	release_psp_cmd_buf(psp);
1410 
1411 	return ret;
1412 }
1413 
1414 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1415 {
1416 	return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1417 }
1418 
1419 int psp_xgmi_terminate(struct psp_context *psp)
1420 {
1421 	int ret;
1422 	struct amdgpu_device *adev = psp->adev;
1423 
1424 	/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1425 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1426 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1427 	     adev->gmc.xgmi.connected_to_cpu))
1428 		return 0;
1429 
1430 	if (!psp->xgmi_context.context.initialized)
1431 		return 0;
1432 
1433 	ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1434 
1435 	psp->xgmi_context.context.initialized = false;
1436 
1437 	return ret;
1438 }
1439 
1440 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1441 {
1442 	struct ta_xgmi_shared_memory *xgmi_cmd;
1443 	int ret;
1444 
1445 	if (!psp->ta_fw ||
1446 	    !psp->xgmi_context.context.bin_desc.size_bytes ||
1447 	    !psp->xgmi_context.context.bin_desc.start_addr)
1448 		return -ENOENT;
1449 
1450 	if (!load_ta)
1451 		goto invoke;
1452 
1453 	psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1454 	psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1455 
1456 	if (!psp->xgmi_context.context.mem_context.shared_buf) {
1457 		ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1458 		if (ret)
1459 			return ret;
1460 	}
1461 
1462 	/* Load XGMI TA */
1463 	ret = psp_ta_load(psp, &psp->xgmi_context.context);
1464 	if (!ret)
1465 		psp->xgmi_context.context.initialized = true;
1466 	else
1467 		return ret;
1468 
1469 invoke:
1470 	/* Initialize XGMI session */
1471 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1472 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1473 	xgmi_cmd->flag_extend_link_record = set_extended_data;
1474 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1475 
1476 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1477 	/* note down the capbility flag for XGMI TA */
1478 	psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1479 
1480 	return ret;
1481 }
1482 
1483 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1484 {
1485 	struct ta_xgmi_shared_memory *xgmi_cmd;
1486 	int ret;
1487 
1488 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1489 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1490 
1491 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1492 
1493 	/* Invoke xgmi ta to get hive id */
1494 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1495 	if (ret)
1496 		return ret;
1497 
1498 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1499 
1500 	return 0;
1501 }
1502 
1503 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1504 {
1505 	struct ta_xgmi_shared_memory *xgmi_cmd;
1506 	int ret;
1507 
1508 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1509 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1510 
1511 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1512 
1513 	/* Invoke xgmi ta to get the node id */
1514 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1515 	if (ret)
1516 		return ret;
1517 
1518 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1519 
1520 	return 0;
1521 }
1522 
1523 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1524 {
1525 	return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1526 			IP_VERSION(13, 0, 2) &&
1527 		psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1528 	       amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1529 		       IP_VERSION(13, 0, 6);
1530 }
1531 
1532 /*
1533  * Chips that support extended topology information require the driver to
1534  * reflect topology information in the opposite direction.  This is
1535  * because the TA has already exceeded its link record limit and if the
1536  * TA holds bi-directional information, the driver would have to do
1537  * multiple fetches instead of just two.
1538  */
1539 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1540 					struct psp_xgmi_node_info node_info)
1541 {
1542 	struct amdgpu_device *mirror_adev;
1543 	struct amdgpu_hive_info *hive;
1544 	uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1545 	uint64_t dst_node_id = node_info.node_id;
1546 	uint8_t dst_num_hops = node_info.num_hops;
1547 	uint8_t dst_is_sharing_enabled = node_info.is_sharing_enabled;
1548 	uint8_t dst_num_links = node_info.num_links;
1549 
1550 	hive = amdgpu_get_xgmi_hive(psp->adev);
1551 	if (WARN_ON(!hive))
1552 		return;
1553 
1554 	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1555 		struct psp_xgmi_topology_info *mirror_top_info;
1556 		int j;
1557 
1558 		if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1559 			continue;
1560 
1561 		mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1562 		for (j = 0; j < mirror_top_info->num_nodes; j++) {
1563 			if (mirror_top_info->nodes[j].node_id != src_node_id)
1564 				continue;
1565 
1566 			mirror_top_info->nodes[j].num_hops = dst_num_hops;
1567 			mirror_top_info->nodes[j].is_sharing_enabled = dst_is_sharing_enabled;
1568 			/* prevent 0 num_links value re-reflection since reflection
1569 			 * criteria is based on num_hops (direct or indirect).
1570 			 */
1571 			if (dst_num_links) {
1572 				mirror_top_info->nodes[j].num_links = dst_num_links;
1573 				/* swap src and dst due to frame of reference */
1574 				for (int k = 0; k < dst_num_links; k++) {
1575 					mirror_top_info->nodes[j].port_num[k].src_xgmi_port_num =
1576 						node_info.port_num[k].dst_xgmi_port_num;
1577 					mirror_top_info->nodes[j].port_num[k].dst_xgmi_port_num =
1578 						node_info.port_num[k].src_xgmi_port_num;
1579 				}
1580 			}
1581 
1582 			break;
1583 		}
1584 
1585 		break;
1586 	}
1587 
1588 	amdgpu_put_xgmi_hive(hive);
1589 }
1590 
1591 int psp_xgmi_get_topology_info(struct psp_context *psp,
1592 			       int number_devices,
1593 			       struct psp_xgmi_topology_info *topology,
1594 			       bool get_extended_data)
1595 {
1596 	struct ta_xgmi_shared_memory *xgmi_cmd;
1597 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1598 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1599 	int i;
1600 	int ret;
1601 
1602 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1603 		return -EINVAL;
1604 
1605 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1606 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1607 	xgmi_cmd->flag_extend_link_record = get_extended_data;
1608 
1609 	/* Fill in the shared memory with topology information as input */
1610 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1611 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1612 	topology_info_input->num_nodes = number_devices;
1613 
1614 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1615 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1616 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1617 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1618 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1619 	}
1620 
1621 	/* Invoke xgmi ta to get the topology information */
1622 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1623 	if (ret)
1624 		return ret;
1625 
1626 	/* Read the output topology information from the shared memory */
1627 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1628 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1629 	for (i = 0; i < topology->num_nodes; i++) {
1630 		/* extended data will either be 0 or equal to non-extended data */
1631 		if (topology_info_output->nodes[i].num_hops)
1632 			topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1633 
1634 		/* non-extended data gets everything here so no need to update */
1635 		if (!get_extended_data) {
1636 			topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1637 			topology->nodes[i].is_sharing_enabled =
1638 					topology_info_output->nodes[i].is_sharing_enabled;
1639 			topology->nodes[i].sdma_engine =
1640 					topology_info_output->nodes[i].sdma_engine;
1641 		}
1642 
1643 	}
1644 
1645 	/* Invoke xgmi ta again to get the link information */
1646 	if (psp_xgmi_peer_link_info_supported(psp)) {
1647 		struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1648 		struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1649 		bool requires_reflection =
1650 			(psp->xgmi_context.supports_extended_data &&
1651 			 get_extended_data) ||
1652 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1653 				IP_VERSION(13, 0, 6) ||
1654 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1655 				IP_VERSION(13, 0, 14) ||
1656 			amdgpu_sriov_vf(psp->adev);
1657 		bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG ||
1658 			amdgpu_sriov_xgmi_ta_ext_peer_link_en(psp->adev);
1659 
1660 		/* popluate the shared output buffer rather than the cmd input buffer
1661 		 * with node_ids as the input for GET_PEER_LINKS command execution.
1662 		 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1663 		 * The same requirement for GET_EXTEND_PEER_LINKS command.
1664 		 */
1665 		if (ta_port_num_support) {
1666 			link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1667 
1668 			for (i = 0; i < topology->num_nodes; i++)
1669 				link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1670 
1671 			link_extend_info_output->num_nodes = topology->num_nodes;
1672 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1673 		} else {
1674 			link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1675 
1676 			for (i = 0; i < topology->num_nodes; i++)
1677 				link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1678 
1679 			link_info_output->num_nodes = topology->num_nodes;
1680 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1681 		}
1682 
1683 		ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1684 		if (ret)
1685 			return ret;
1686 
1687 		for (i = 0; i < topology->num_nodes; i++) {
1688 			uint8_t node_num_links = ta_port_num_support ?
1689 				link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1690 			/* accumulate num_links on extended data */
1691 			if (get_extended_data) {
1692 				topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1693 			} else {
1694 				topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1695 								topology->nodes[i].num_links : node_num_links;
1696 			}
1697 			/* popluate the connected port num info if supported and available */
1698 			if (ta_port_num_support && topology->nodes[i].num_links) {
1699 				memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1700 				       sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1701 			}
1702 
1703 			/* reflect the topology information for bi-directionality */
1704 			if (requires_reflection && topology->nodes[i].num_hops)
1705 				psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1706 		}
1707 	}
1708 
1709 	return 0;
1710 }
1711 
1712 int psp_xgmi_set_topology_info(struct psp_context *psp,
1713 			       int number_devices,
1714 			       struct psp_xgmi_topology_info *topology)
1715 {
1716 	struct ta_xgmi_shared_memory *xgmi_cmd;
1717 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1718 	int i;
1719 
1720 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1721 		return -EINVAL;
1722 
1723 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1724 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1725 
1726 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1727 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1728 	topology_info_input->num_nodes = number_devices;
1729 
1730 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1731 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1732 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1733 		topology_info_input->nodes[i].is_sharing_enabled = 1;
1734 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1735 	}
1736 
1737 	/* Invoke xgmi ta to set topology information */
1738 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1739 }
1740 
1741 // ras begin
1742 static void psp_ras_ta_check_status(struct psp_context *psp)
1743 {
1744 	struct ta_ras_shared_memory *ras_cmd =
1745 		(struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1746 
1747 	switch (ras_cmd->ras_status) {
1748 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1749 		dev_warn(psp->adev->dev,
1750 			 "RAS WARNING: cmd failed due to unsupported ip\n");
1751 		break;
1752 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1753 		dev_warn(psp->adev->dev,
1754 			 "RAS WARNING: cmd failed due to unsupported error injection\n");
1755 		break;
1756 	case TA_RAS_STATUS__SUCCESS:
1757 		break;
1758 	case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1759 		if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1760 			dev_warn(psp->adev->dev,
1761 				 "RAS WARNING: Inject error to critical region is not allowed\n");
1762 		break;
1763 	default:
1764 		dev_warn(psp->adev->dev,
1765 			 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1766 		break;
1767 	}
1768 }
1769 
1770 static int psp_ras_send_cmd(struct psp_context *psp,
1771 		enum ras_command cmd_id, void *in, void *out)
1772 {
1773 	struct ta_ras_shared_memory *ras_cmd;
1774 	uint32_t cmd = cmd_id;
1775 	int ret = 0;
1776 
1777 	if (!in)
1778 		return -EINVAL;
1779 
1780 	mutex_lock(&psp->ras_context.mutex);
1781 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1782 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1783 
1784 	switch (cmd) {
1785 	case TA_RAS_COMMAND__ENABLE_FEATURES:
1786 	case TA_RAS_COMMAND__DISABLE_FEATURES:
1787 		memcpy(&ras_cmd->ras_in_message,
1788 			in, sizeof(ras_cmd->ras_in_message));
1789 		break;
1790 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1791 		memcpy(&ras_cmd->ras_in_message.trigger_error,
1792 			in, sizeof(ras_cmd->ras_in_message.trigger_error));
1793 		break;
1794 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1795 		memcpy(&ras_cmd->ras_in_message.address,
1796 			in, sizeof(ras_cmd->ras_in_message.address));
1797 		break;
1798 	default:
1799 		dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1800 		ret = -EINVAL;
1801 		goto err_out;
1802 	}
1803 
1804 	ras_cmd->cmd_id = cmd;
1805 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1806 
1807 	switch (cmd) {
1808 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1809 		if (!ret && out)
1810 			memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1811 		break;
1812 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1813 		if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1814 			ret = -EINVAL;
1815 		else if (out)
1816 			memcpy(out,
1817 				&ras_cmd->ras_out_message.address,
1818 				sizeof(ras_cmd->ras_out_message.address));
1819 		break;
1820 	default:
1821 		break;
1822 	}
1823 
1824 err_out:
1825 	mutex_unlock(&psp->ras_context.mutex);
1826 
1827 	return ret;
1828 }
1829 
1830 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1831 {
1832 	struct ta_ras_shared_memory *ras_cmd;
1833 	int ret;
1834 
1835 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1836 
1837 	/*
1838 	 * TODO: bypass the loading in sriov for now
1839 	 */
1840 	if (amdgpu_sriov_vf(psp->adev))
1841 		return 0;
1842 
1843 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1844 
1845 	if (amdgpu_ras_intr_triggered())
1846 		return ret;
1847 
1848 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1849 		dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1850 		return -EINVAL;
1851 	}
1852 
1853 	if (!ret) {
1854 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1855 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1856 
1857 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1858 		} else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1859 			dev_warn(psp->adev->dev,
1860 				 "RAS internal register access blocked\n");
1861 
1862 		psp_ras_ta_check_status(psp);
1863 	}
1864 
1865 	return ret;
1866 }
1867 
1868 int psp_ras_enable_features(struct psp_context *psp,
1869 		union ta_ras_cmd_input *info, bool enable)
1870 {
1871 	enum ras_command cmd_id;
1872 	int ret;
1873 
1874 	if (!psp->ras_context.context.initialized || !info)
1875 		return -EINVAL;
1876 
1877 	cmd_id = enable ?
1878 		TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1879 	ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
1880 	if (ret)
1881 		return -EINVAL;
1882 
1883 	return 0;
1884 }
1885 
1886 int psp_ras_terminate(struct psp_context *psp)
1887 {
1888 	int ret;
1889 
1890 	/*
1891 	 * TODO: bypass the terminate in sriov for now
1892 	 */
1893 	if (amdgpu_sriov_vf(psp->adev))
1894 		return 0;
1895 
1896 	if (!psp->ras_context.context.initialized)
1897 		return 0;
1898 
1899 	ret = psp_ta_unload(psp, &psp->ras_context.context);
1900 
1901 	psp->ras_context.context.initialized = false;
1902 
1903 	mutex_destroy(&psp->ras_context.mutex);
1904 
1905 	return ret;
1906 }
1907 
1908 int psp_ras_initialize(struct psp_context *psp)
1909 {
1910 	int ret;
1911 	uint32_t boot_cfg = 0xFF;
1912 	struct amdgpu_device *adev = psp->adev;
1913 	struct ta_ras_shared_memory *ras_cmd;
1914 
1915 	/*
1916 	 * TODO: bypass the initialize in sriov for now
1917 	 */
1918 	if (amdgpu_sriov_vf(adev))
1919 		return 0;
1920 
1921 	if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1922 	    !adev->psp.ras_context.context.bin_desc.start_addr) {
1923 		dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1924 		return 0;
1925 	}
1926 
1927 	if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1928 		/* query GECC enablement status from boot config
1929 		 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1930 		 */
1931 		ret = psp_boot_config_get(adev, &boot_cfg);
1932 		if (ret)
1933 			dev_warn(adev->dev, "PSP get boot config failed\n");
1934 
1935 		if (boot_cfg == 1 && !adev->ras_default_ecc_enabled &&
1936 		    amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1937 			dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n");
1938 			dev_warn(adev->dev,
1939 				"To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n");
1940 		} else {
1941 			if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) &&
1942 				amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1943 				if (boot_cfg == 1) {
1944 					dev_info(adev->dev, "GECC is enabled\n");
1945 				} else {
1946 					/* enable GECC in next boot cycle if it is disabled
1947 					 * in boot config, or force enable GECC if failed to
1948 					 * get boot configuration
1949 					 */
1950 					ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1951 					if (ret)
1952 						dev_warn(adev->dev, "PSP set boot config failed\n");
1953 					else
1954 						dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1955 				}
1956 			} else {
1957 				if (!boot_cfg) {
1958 					if (!adev->ras_default_ecc_enabled &&
1959 					    amdgpu_ras_enable != 1 &&
1960 					    amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
1961 						dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n");
1962 					else
1963 						dev_info(adev->dev, "GECC is disabled\n");
1964 				} else {
1965 					/* disable GECC in next boot cycle if ras is
1966 					 * disabled by module parameter amdgpu_ras_enable
1967 					 * and/or amdgpu_ras_mask, or boot_config_get call
1968 					 * is failed
1969 					 */
1970 					ret = psp_boot_config_set(adev, 0);
1971 					if (ret)
1972 						dev_warn(adev->dev, "PSP set boot config failed\n");
1973 					else
1974 						dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1975 				}
1976 			}
1977 		}
1978 	}
1979 
1980 	psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1981 	psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1982 
1983 	if (!psp->ras_context.context.mem_context.shared_buf) {
1984 		ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1985 		if (ret)
1986 			return ret;
1987 	}
1988 
1989 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1990 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1991 
1992 	if (amdgpu_ras_is_poison_mode_supported(adev))
1993 		ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1994 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1995 		ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1996 	ras_cmd->ras_in_message.init_flags.xcc_mask =
1997 		adev->gfx.xcc_mask;
1998 	ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1999 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
2000 		ras_cmd->ras_in_message.init_flags.nps_mode =
2001 			adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
2002 	ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask;
2003 
2004 	ret = psp_ta_load(psp, &psp->ras_context.context);
2005 
2006 	if (!ret && !ras_cmd->ras_status) {
2007 		psp->ras_context.context.initialized = true;
2008 		mutex_init(&psp->ras_context.mutex);
2009 	} else {
2010 		if (ras_cmd->ras_status)
2011 			dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
2012 
2013 		/* fail to load RAS TA */
2014 		psp->ras_context.context.initialized = false;
2015 	}
2016 
2017 	return ret;
2018 }
2019 
2020 int psp_ras_trigger_error(struct psp_context *psp,
2021 			  struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
2022 {
2023 	struct amdgpu_device *adev = psp->adev;
2024 	int ret;
2025 	uint32_t dev_mask;
2026 	uint32_t ras_status = 0;
2027 
2028 	if (!psp->ras_context.context.initialized || !info)
2029 		return -EINVAL;
2030 
2031 	switch (info->block_id) {
2032 	case TA_RAS_BLOCK__GFX:
2033 		dev_mask = GET_MASK(GC, instance_mask);
2034 		break;
2035 	case TA_RAS_BLOCK__SDMA:
2036 		dev_mask = GET_MASK(SDMA0, instance_mask);
2037 		break;
2038 	case TA_RAS_BLOCK__VCN:
2039 	case TA_RAS_BLOCK__JPEG:
2040 		dev_mask = GET_MASK(VCN, instance_mask);
2041 		break;
2042 	default:
2043 		dev_mask = instance_mask;
2044 		break;
2045 	}
2046 
2047 	/* reuse sub_block_index for backward compatibility */
2048 	dev_mask <<= AMDGPU_RAS_INST_SHIFT;
2049 	dev_mask &= AMDGPU_RAS_INST_MASK;
2050 	info->sub_block_index |= dev_mask;
2051 
2052 	ret = psp_ras_send_cmd(psp,
2053 			TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
2054 	if (ret)
2055 		return -EINVAL;
2056 
2057 	/* If err_event_athub occurs error inject was successful, however
2058 	 *  return status from TA is no long reliable
2059 	 */
2060 	if (amdgpu_ras_intr_triggered())
2061 		return 0;
2062 
2063 	if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
2064 		return -EACCES;
2065 	else if (ras_status)
2066 		return -EINVAL;
2067 
2068 	return 0;
2069 }
2070 
2071 int psp_ras_query_address(struct psp_context *psp,
2072 			  struct ta_ras_query_address_input *addr_in,
2073 			  struct ta_ras_query_address_output *addr_out)
2074 {
2075 	int ret;
2076 
2077 	if (!psp->ras_context.context.initialized ||
2078 		!addr_in || !addr_out)
2079 		return -EINVAL;
2080 
2081 	ret = psp_ras_send_cmd(psp,
2082 			TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
2083 
2084 	return ret;
2085 }
2086 // ras end
2087 
2088 // HDCP start
2089 static int psp_hdcp_initialize(struct psp_context *psp)
2090 {
2091 	int ret;
2092 
2093 	/*
2094 	 * TODO: bypass the initialize in sriov for now
2095 	 */
2096 	if (amdgpu_sriov_vf(psp->adev))
2097 		return 0;
2098 
2099 	/* bypass hdcp initialization if dmu is harvested */
2100 	if (!amdgpu_device_has_display_hardware(psp->adev))
2101 		return 0;
2102 
2103 	if (!psp->hdcp_context.context.bin_desc.size_bytes ||
2104 	    !psp->hdcp_context.context.bin_desc.start_addr) {
2105 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
2106 		return 0;
2107 	}
2108 
2109 	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
2110 	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2111 
2112 	if (!psp->hdcp_context.context.mem_context.shared_buf) {
2113 		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
2114 		if (ret)
2115 			return ret;
2116 	}
2117 
2118 	ret = psp_ta_load(psp, &psp->hdcp_context.context);
2119 	if (!ret) {
2120 		psp->hdcp_context.context.initialized = true;
2121 		mutex_init(&psp->hdcp_context.mutex);
2122 	}
2123 
2124 	return ret;
2125 }
2126 
2127 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2128 {
2129 	/*
2130 	 * TODO: bypass the loading in sriov for now
2131 	 */
2132 	if (amdgpu_sriov_vf(psp->adev))
2133 		return 0;
2134 
2135 	if (!psp->hdcp_context.context.initialized)
2136 		return 0;
2137 
2138 	return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
2139 }
2140 
2141 static int psp_hdcp_terminate(struct psp_context *psp)
2142 {
2143 	int ret;
2144 
2145 	/*
2146 	 * TODO: bypass the terminate in sriov for now
2147 	 */
2148 	if (amdgpu_sriov_vf(psp->adev))
2149 		return 0;
2150 
2151 	if (!psp->hdcp_context.context.initialized)
2152 		return 0;
2153 
2154 	ret = psp_ta_unload(psp, &psp->hdcp_context.context);
2155 
2156 	psp->hdcp_context.context.initialized = false;
2157 
2158 	return ret;
2159 }
2160 // HDCP end
2161 
2162 // DTM start
2163 static int psp_dtm_initialize(struct psp_context *psp)
2164 {
2165 	int ret;
2166 
2167 	/*
2168 	 * TODO: bypass the initialize in sriov for now
2169 	 */
2170 	if (amdgpu_sriov_vf(psp->adev))
2171 		return 0;
2172 
2173 	/* bypass dtm initialization if dmu is harvested */
2174 	if (!amdgpu_device_has_display_hardware(psp->adev))
2175 		return 0;
2176 
2177 	if (!psp->dtm_context.context.bin_desc.size_bytes ||
2178 	    !psp->dtm_context.context.bin_desc.start_addr) {
2179 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
2180 		return 0;
2181 	}
2182 
2183 	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
2184 	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2185 
2186 	if (!psp->dtm_context.context.mem_context.shared_buf) {
2187 		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
2188 		if (ret)
2189 			return ret;
2190 	}
2191 
2192 	ret = psp_ta_load(psp, &psp->dtm_context.context);
2193 	if (!ret) {
2194 		psp->dtm_context.context.initialized = true;
2195 		mutex_init(&psp->dtm_context.mutex);
2196 	}
2197 
2198 	return ret;
2199 }
2200 
2201 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2202 {
2203 	/*
2204 	 * TODO: bypass the loading in sriov for now
2205 	 */
2206 	if (amdgpu_sriov_vf(psp->adev))
2207 		return 0;
2208 
2209 	if (!psp->dtm_context.context.initialized)
2210 		return 0;
2211 
2212 	return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
2213 }
2214 
2215 static int psp_dtm_terminate(struct psp_context *psp)
2216 {
2217 	int ret;
2218 
2219 	/*
2220 	 * TODO: bypass the terminate in sriov for now
2221 	 */
2222 	if (amdgpu_sriov_vf(psp->adev))
2223 		return 0;
2224 
2225 	if (!psp->dtm_context.context.initialized)
2226 		return 0;
2227 
2228 	ret = psp_ta_unload(psp, &psp->dtm_context.context);
2229 
2230 	psp->dtm_context.context.initialized = false;
2231 
2232 	return ret;
2233 }
2234 // DTM end
2235 
2236 // RAP start
2237 static int psp_rap_initialize(struct psp_context *psp)
2238 {
2239 	int ret;
2240 	enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2241 
2242 	/*
2243 	 * TODO: bypass the initialize in sriov for now
2244 	 */
2245 	if (amdgpu_sriov_vf(psp->adev))
2246 		return 0;
2247 
2248 	if (!psp->rap_context.context.bin_desc.size_bytes ||
2249 	    !psp->rap_context.context.bin_desc.start_addr) {
2250 		dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2251 		return 0;
2252 	}
2253 
2254 	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2255 	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2256 
2257 	if (!psp->rap_context.context.mem_context.shared_buf) {
2258 		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2259 		if (ret)
2260 			return ret;
2261 	}
2262 
2263 	ret = psp_ta_load(psp, &psp->rap_context.context);
2264 	if (!ret) {
2265 		psp->rap_context.context.initialized = true;
2266 		mutex_init(&psp->rap_context.mutex);
2267 	} else
2268 		return ret;
2269 
2270 	ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2271 	if (ret || status != TA_RAP_STATUS__SUCCESS) {
2272 		psp_rap_terminate(psp);
2273 		/* free rap shared memory */
2274 		psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2275 
2276 		dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2277 			 ret, status);
2278 
2279 		return ret;
2280 	}
2281 
2282 	return 0;
2283 }
2284 
2285 static int psp_rap_terminate(struct psp_context *psp)
2286 {
2287 	int ret;
2288 
2289 	if (!psp->rap_context.context.initialized)
2290 		return 0;
2291 
2292 	ret = psp_ta_unload(psp, &psp->rap_context.context);
2293 
2294 	psp->rap_context.context.initialized = false;
2295 
2296 	return ret;
2297 }
2298 
2299 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2300 {
2301 	struct ta_rap_shared_memory *rap_cmd;
2302 	int ret = 0;
2303 
2304 	if (!psp->rap_context.context.initialized)
2305 		return 0;
2306 
2307 	if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2308 	    ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2309 		return -EINVAL;
2310 
2311 	mutex_lock(&psp->rap_context.mutex);
2312 
2313 	rap_cmd = (struct ta_rap_shared_memory *)
2314 		  psp->rap_context.context.mem_context.shared_buf;
2315 	memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2316 
2317 	rap_cmd->cmd_id = ta_cmd_id;
2318 	rap_cmd->validation_method_id = METHOD_A;
2319 
2320 	ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2321 	if (ret)
2322 		goto out_unlock;
2323 
2324 	if (status)
2325 		*status = rap_cmd->rap_status;
2326 
2327 out_unlock:
2328 	mutex_unlock(&psp->rap_context.mutex);
2329 
2330 	return ret;
2331 }
2332 // RAP end
2333 
2334 /* securedisplay start */
2335 static int psp_securedisplay_initialize(struct psp_context *psp)
2336 {
2337 	int ret;
2338 	struct ta_securedisplay_cmd *securedisplay_cmd;
2339 
2340 	/*
2341 	 * TODO: bypass the initialize in sriov for now
2342 	 */
2343 	if (amdgpu_sriov_vf(psp->adev))
2344 		return 0;
2345 
2346 	/* bypass securedisplay initialization if dmu is harvested */
2347 	if (!amdgpu_device_has_display_hardware(psp->adev))
2348 		return 0;
2349 
2350 	if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2351 	    !psp->securedisplay_context.context.bin_desc.start_addr) {
2352 		dev_info(psp->adev->dev,
2353 			 "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
2354 		return 0;
2355 	}
2356 
2357 	psp->securedisplay_context.context.mem_context.shared_mem_size =
2358 		PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2359 	psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2360 
2361 	if (!psp->securedisplay_context.context.initialized) {
2362 		ret = psp_ta_init_shared_buf(psp,
2363 					     &psp->securedisplay_context.context.mem_context);
2364 		if (ret)
2365 			return ret;
2366 	}
2367 
2368 	ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2369 	if (!ret && !psp->securedisplay_context.context.resp_status) {
2370 		psp->securedisplay_context.context.initialized = true;
2371 		mutex_init(&psp->securedisplay_context.mutex);
2372 	} else {
2373 		/* don't try again */
2374 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2375 		return ret;
2376 	}
2377 
2378 	mutex_lock(&psp->securedisplay_context.mutex);
2379 
2380 	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2381 			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2382 
2383 	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2384 
2385 	mutex_unlock(&psp->securedisplay_context.mutex);
2386 
2387 	if (ret) {
2388 		psp_securedisplay_terminate(psp);
2389 		/* free securedisplay shared memory */
2390 		psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2391 		dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2392 		return -EINVAL;
2393 	}
2394 
2395 	if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2396 		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2397 		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2398 			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2399 		/* don't try again */
2400 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2401 	}
2402 
2403 	return 0;
2404 }
2405 
2406 static int psp_securedisplay_terminate(struct psp_context *psp)
2407 {
2408 	int ret;
2409 
2410 	/*
2411 	 * TODO:bypass the terminate in sriov for now
2412 	 */
2413 	if (amdgpu_sriov_vf(psp->adev))
2414 		return 0;
2415 
2416 	if (!psp->securedisplay_context.context.initialized)
2417 		return 0;
2418 
2419 	ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2420 
2421 	psp->securedisplay_context.context.initialized = false;
2422 
2423 	return ret;
2424 }
2425 
2426 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2427 {
2428 	int ret;
2429 
2430 	if (!psp->securedisplay_context.context.initialized)
2431 		return -EINVAL;
2432 
2433 	if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2434 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
2435 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
2436 		return -EINVAL;
2437 
2438 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2439 
2440 	return ret;
2441 }
2442 /* SECUREDISPLAY end */
2443 
2444 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2445 {
2446 	struct psp_context *psp = &adev->psp;
2447 	int ret = 0;
2448 
2449 	if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2450 		ret = psp->funcs->wait_for_bootloader(psp);
2451 
2452 	return ret;
2453 }
2454 
2455 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2456 {
2457 	if (psp->funcs &&
2458 	    psp->funcs->get_ras_capability) {
2459 		return psp->funcs->get_ras_capability(psp);
2460 	} else {
2461 		return false;
2462 	}
2463 }
2464 
2465 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
2466 {
2467 	struct psp_context *psp = &adev->psp;
2468 
2469 	if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
2470 		return false;
2471 
2472 	if (psp->funcs && psp->funcs->is_reload_needed)
2473 		return psp->funcs->is_reload_needed(psp);
2474 
2475 	return false;
2476 }
2477 
2478 static void psp_update_gpu_addresses(struct amdgpu_device *adev)
2479 {
2480 	struct psp_context *psp = &adev->psp;
2481 
2482 	if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
2483 		psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
2484 		psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
2485 		psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
2486 	}
2487 	if (adev->firmware.rbuf && psp->km_ring.ring_mem)
2488 		psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
2489 }
2490 
2491 static int psp_hw_start(struct psp_context *psp)
2492 {
2493 	struct amdgpu_device *adev = psp->adev;
2494 	int ret;
2495 
2496 	if (amdgpu_virt_xgmi_migrate_enabled(adev))
2497 		psp_update_gpu_addresses(adev);
2498 
2499 	if (!amdgpu_sriov_vf(adev)) {
2500 		if ((is_psp_fw_valid(psp->kdb)) &&
2501 		    (psp->funcs->bootloader_load_kdb != NULL)) {
2502 			ret = psp_bootloader_load_kdb(psp);
2503 			if (ret) {
2504 				dev_err(adev->dev, "PSP load kdb failed!\n");
2505 				return ret;
2506 			}
2507 		}
2508 
2509 		if ((is_psp_fw_valid(psp->spl)) &&
2510 		    (psp->funcs->bootloader_load_spl != NULL)) {
2511 			ret = psp_bootloader_load_spl(psp);
2512 			if (ret) {
2513 				dev_err(adev->dev, "PSP load spl failed!\n");
2514 				return ret;
2515 			}
2516 		}
2517 
2518 		if ((is_psp_fw_valid(psp->sys)) &&
2519 		    (psp->funcs->bootloader_load_sysdrv != NULL)) {
2520 			ret = psp_bootloader_load_sysdrv(psp);
2521 			if (ret) {
2522 				dev_err(adev->dev, "PSP load sys drv failed!\n");
2523 				return ret;
2524 			}
2525 		}
2526 
2527 		if ((is_psp_fw_valid(psp->soc_drv)) &&
2528 		    (psp->funcs->bootloader_load_soc_drv != NULL)) {
2529 			ret = psp_bootloader_load_soc_drv(psp);
2530 			if (ret) {
2531 				dev_err(adev->dev, "PSP load soc drv failed!\n");
2532 				return ret;
2533 			}
2534 		}
2535 
2536 		if ((is_psp_fw_valid(psp->intf_drv)) &&
2537 		    (psp->funcs->bootloader_load_intf_drv != NULL)) {
2538 			ret = psp_bootloader_load_intf_drv(psp);
2539 			if (ret) {
2540 				dev_err(adev->dev, "PSP load intf drv failed!\n");
2541 				return ret;
2542 			}
2543 		}
2544 
2545 		if ((is_psp_fw_valid(psp->dbg_drv)) &&
2546 		    (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2547 			ret = psp_bootloader_load_dbg_drv(psp);
2548 			if (ret) {
2549 				dev_err(adev->dev, "PSP load dbg drv failed!\n");
2550 				return ret;
2551 			}
2552 		}
2553 
2554 		if ((is_psp_fw_valid(psp->ras_drv)) &&
2555 		    (psp->funcs->bootloader_load_ras_drv != NULL)) {
2556 			ret = psp_bootloader_load_ras_drv(psp);
2557 			if (ret) {
2558 				dev_err(adev->dev, "PSP load ras_drv failed!\n");
2559 				return ret;
2560 			}
2561 		}
2562 
2563 		if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2564 		    (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2565 			ret = psp_bootloader_load_ipkeymgr_drv(psp);
2566 			if (ret) {
2567 				dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2568 				return ret;
2569 			}
2570 		}
2571 
2572 		if ((is_psp_fw_valid(psp->spdm_drv)) &&
2573 		    (psp->funcs->bootloader_load_spdm_drv != NULL)) {
2574 			ret = psp_bootloader_load_spdm_drv(psp);
2575 			if (ret) {
2576 				dev_err(adev->dev, "PSP load spdm_drv failed!\n");
2577 				return ret;
2578 			}
2579 		}
2580 
2581 		if ((is_psp_fw_valid(psp->sos)) &&
2582 		    (psp->funcs->bootloader_load_sos != NULL)) {
2583 			ret = psp_bootloader_load_sos(psp);
2584 			if (ret) {
2585 				dev_err(adev->dev, "PSP load sos failed!\n");
2586 				return ret;
2587 			}
2588 		}
2589 	}
2590 
2591 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2592 	if (ret) {
2593 		dev_err(adev->dev, "PSP create ring failed!\n");
2594 		return ret;
2595 	}
2596 
2597 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2598 		ret = psp_update_fw_reservation(psp);
2599 		if (ret) {
2600 			dev_err(adev->dev, "update fw reservation failed!\n");
2601 			return ret;
2602 		}
2603 	}
2604 
2605 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2606 		goto skip_pin_bo;
2607 
2608 	if (!psp->boot_time_tmr || psp->autoload_supported) {
2609 		ret = psp_tmr_init(psp);
2610 		if (ret) {
2611 			dev_err(adev->dev, "PSP tmr init failed!\n");
2612 			return ret;
2613 		}
2614 	}
2615 
2616 skip_pin_bo:
2617 	/*
2618 	 * For ASICs with DF Cstate management centralized
2619 	 * to PMFW, TMR setup should be performed after PMFW
2620 	 * loaded and before other non-psp firmware loaded.
2621 	 */
2622 	if (psp->pmfw_centralized_cstate_management) {
2623 		ret = psp_load_smu_fw(psp);
2624 		if (ret)
2625 			return ret;
2626 	}
2627 
2628 	if (!psp->boot_time_tmr || !psp->autoload_supported) {
2629 		ret = psp_tmr_load(psp);
2630 		if (ret) {
2631 			dev_err(adev->dev, "PSP load tmr failed!\n");
2632 			return ret;
2633 		}
2634 	}
2635 
2636 	return 0;
2637 }
2638 
2639 int amdgpu_psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2640 			   enum psp_gfx_fw_type *type)
2641 {
2642 	switch (ucode->ucode_id) {
2643 	case AMDGPU_UCODE_ID_CAP:
2644 		*type = GFX_FW_TYPE_CAP;
2645 		break;
2646 	case AMDGPU_UCODE_ID_SDMA0:
2647 		*type = GFX_FW_TYPE_SDMA0;
2648 		break;
2649 	case AMDGPU_UCODE_ID_SDMA1:
2650 		*type = GFX_FW_TYPE_SDMA1;
2651 		break;
2652 	case AMDGPU_UCODE_ID_SDMA2:
2653 		*type = GFX_FW_TYPE_SDMA2;
2654 		break;
2655 	case AMDGPU_UCODE_ID_SDMA3:
2656 		*type = GFX_FW_TYPE_SDMA3;
2657 		break;
2658 	case AMDGPU_UCODE_ID_SDMA4:
2659 		*type = GFX_FW_TYPE_SDMA4;
2660 		break;
2661 	case AMDGPU_UCODE_ID_SDMA5:
2662 		*type = GFX_FW_TYPE_SDMA5;
2663 		break;
2664 	case AMDGPU_UCODE_ID_SDMA6:
2665 		*type = GFX_FW_TYPE_SDMA6;
2666 		break;
2667 	case AMDGPU_UCODE_ID_SDMA7:
2668 		*type = GFX_FW_TYPE_SDMA7;
2669 		break;
2670 	case AMDGPU_UCODE_ID_CP_MES:
2671 		*type = GFX_FW_TYPE_CP_MES;
2672 		break;
2673 	case AMDGPU_UCODE_ID_CP_MES_DATA:
2674 		*type = GFX_FW_TYPE_MES_STACK;
2675 		break;
2676 	case AMDGPU_UCODE_ID_CP_MES1:
2677 		*type = GFX_FW_TYPE_CP_MES_KIQ;
2678 		break;
2679 	case AMDGPU_UCODE_ID_CP_MES1_DATA:
2680 		*type = GFX_FW_TYPE_MES_KIQ_STACK;
2681 		break;
2682 	case AMDGPU_UCODE_ID_CP_CE:
2683 		*type = GFX_FW_TYPE_CP_CE;
2684 		break;
2685 	case AMDGPU_UCODE_ID_CP_PFP:
2686 		*type = GFX_FW_TYPE_CP_PFP;
2687 		break;
2688 	case AMDGPU_UCODE_ID_CP_ME:
2689 		*type = GFX_FW_TYPE_CP_ME;
2690 		break;
2691 	case AMDGPU_UCODE_ID_CP_MEC1:
2692 		*type = GFX_FW_TYPE_CP_MEC;
2693 		break;
2694 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
2695 		*type = GFX_FW_TYPE_CP_MEC_ME1;
2696 		break;
2697 	case AMDGPU_UCODE_ID_CP_MEC2:
2698 		*type = GFX_FW_TYPE_CP_MEC;
2699 		break;
2700 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
2701 		*type = GFX_FW_TYPE_CP_MEC_ME2;
2702 		break;
2703 	case AMDGPU_UCODE_ID_RLC_P:
2704 		*type = GFX_FW_TYPE_RLC_P;
2705 		break;
2706 	case AMDGPU_UCODE_ID_RLC_V:
2707 		*type = GFX_FW_TYPE_RLC_V;
2708 		break;
2709 	case AMDGPU_UCODE_ID_RLC_G:
2710 		*type = GFX_FW_TYPE_RLC_G;
2711 		break;
2712 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2713 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2714 		break;
2715 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2716 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2717 		break;
2718 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2719 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2720 		break;
2721 	case AMDGPU_UCODE_ID_RLC_IRAM:
2722 		*type = GFX_FW_TYPE_RLC_IRAM;
2723 		break;
2724 	case AMDGPU_UCODE_ID_RLC_DRAM:
2725 		*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2726 		break;
2727 	case AMDGPU_UCODE_ID_RLC_IRAM_1:
2728 		*type = GFX_FW_TYPE_RLX6_UCODE_CORE1;
2729 		break;
2730 	case AMDGPU_UCODE_ID_RLC_DRAM_1:
2731 		*type = GFX_FW_TYPE_RLX6_DRAM_BOOT_CORE1;
2732 		break;
2733 	case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2734 		*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2735 		break;
2736 	case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2737 		*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2738 		break;
2739 	case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2740 		*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2741 		break;
2742 	case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2743 		*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2744 		break;
2745 	case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2746 		*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2747 		break;
2748 	case AMDGPU_UCODE_ID_SMC:
2749 		*type = GFX_FW_TYPE_SMU;
2750 		break;
2751 	case AMDGPU_UCODE_ID_PPTABLE:
2752 		*type = GFX_FW_TYPE_PPTABLE;
2753 		break;
2754 	case AMDGPU_UCODE_ID_UVD:
2755 		*type = GFX_FW_TYPE_UVD;
2756 		break;
2757 	case AMDGPU_UCODE_ID_UVD1:
2758 		*type = GFX_FW_TYPE_UVD1;
2759 		break;
2760 	case AMDGPU_UCODE_ID_VCE:
2761 		*type = GFX_FW_TYPE_VCE;
2762 		break;
2763 	case AMDGPU_UCODE_ID_VCN:
2764 		*type = GFX_FW_TYPE_VCN;
2765 		break;
2766 	case AMDGPU_UCODE_ID_VCN1:
2767 		*type = GFX_FW_TYPE_VCN1;
2768 		break;
2769 	case AMDGPU_UCODE_ID_DMCU_ERAM:
2770 		*type = GFX_FW_TYPE_DMCU_ERAM;
2771 		break;
2772 	case AMDGPU_UCODE_ID_DMCU_INTV:
2773 		*type = GFX_FW_TYPE_DMCU_ISR;
2774 		break;
2775 	case AMDGPU_UCODE_ID_VCN0_RAM:
2776 		*type = GFX_FW_TYPE_VCN0_RAM;
2777 		break;
2778 	case AMDGPU_UCODE_ID_VCN1_RAM:
2779 		*type = GFX_FW_TYPE_VCN1_RAM;
2780 		break;
2781 	case AMDGPU_UCODE_ID_DMCUB:
2782 		*type = GFX_FW_TYPE_DMUB;
2783 		break;
2784 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2785 	case AMDGPU_UCODE_ID_SDMA_RS64:
2786 		*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2787 		break;
2788 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2789 		*type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2790 		break;
2791 	case AMDGPU_UCODE_ID_IMU_I:
2792 		*type = GFX_FW_TYPE_IMU_I;
2793 		break;
2794 	case AMDGPU_UCODE_ID_IMU_D:
2795 		*type = GFX_FW_TYPE_IMU_D;
2796 		break;
2797 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
2798 		*type = GFX_FW_TYPE_RS64_PFP;
2799 		break;
2800 	case AMDGPU_UCODE_ID_CP_RS64_ME:
2801 		*type = GFX_FW_TYPE_RS64_ME;
2802 		break;
2803 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
2804 		*type = GFX_FW_TYPE_RS64_MEC;
2805 		break;
2806 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2807 		*type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2808 		break;
2809 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2810 		*type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2811 		break;
2812 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2813 		*type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2814 		break;
2815 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2816 		*type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2817 		break;
2818 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2819 		*type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2820 		break;
2821 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2822 		*type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2823 		break;
2824 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2825 		*type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2826 		break;
2827 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2828 		*type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2829 		break;
2830 	case AMDGPU_UCODE_ID_VPE_CTX:
2831 		*type = GFX_FW_TYPE_VPEC_FW1;
2832 		break;
2833 	case AMDGPU_UCODE_ID_VPE_CTL:
2834 		*type = GFX_FW_TYPE_VPEC_FW2;
2835 		break;
2836 	case AMDGPU_UCODE_ID_VPE:
2837 		*type = GFX_FW_TYPE_VPE;
2838 		break;
2839 	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2840 		*type = GFX_FW_TYPE_UMSCH_UCODE;
2841 		break;
2842 	case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2843 		*type = GFX_FW_TYPE_UMSCH_DATA;
2844 		break;
2845 	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2846 		*type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2847 		break;
2848 	case AMDGPU_UCODE_ID_P2S_TABLE:
2849 		*type = GFX_FW_TYPE_P2S_TABLE;
2850 		break;
2851 	case AMDGPU_UCODE_ID_JPEG_RAM:
2852 		*type = GFX_FW_TYPE_JPEG_RAM;
2853 		break;
2854 	case AMDGPU_UCODE_ID_ISP:
2855 		*type = GFX_FW_TYPE_ISP;
2856 		break;
2857 	case AMDGPU_UCODE_ID_MAXIMUM:
2858 	default:
2859 		return -EINVAL;
2860 	}
2861 
2862 	return 0;
2863 }
2864 
2865 static void psp_print_fw_hdr(struct psp_context *psp,
2866 			     struct amdgpu_firmware_info *ucode)
2867 {
2868 	struct amdgpu_device *adev = psp->adev;
2869 	struct common_firmware_header *hdr;
2870 
2871 	switch (ucode->ucode_id) {
2872 	case AMDGPU_UCODE_ID_SDMA0:
2873 	case AMDGPU_UCODE_ID_SDMA1:
2874 	case AMDGPU_UCODE_ID_SDMA2:
2875 	case AMDGPU_UCODE_ID_SDMA3:
2876 	case AMDGPU_UCODE_ID_SDMA4:
2877 	case AMDGPU_UCODE_ID_SDMA5:
2878 	case AMDGPU_UCODE_ID_SDMA6:
2879 	case AMDGPU_UCODE_ID_SDMA7:
2880 		hdr = (struct common_firmware_header *)
2881 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2882 		amdgpu_ucode_print_sdma_hdr(hdr);
2883 		break;
2884 	case AMDGPU_UCODE_ID_CP_CE:
2885 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2886 		amdgpu_ucode_print_gfx_hdr(hdr);
2887 		break;
2888 	case AMDGPU_UCODE_ID_CP_PFP:
2889 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2890 		amdgpu_ucode_print_gfx_hdr(hdr);
2891 		break;
2892 	case AMDGPU_UCODE_ID_CP_ME:
2893 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2894 		amdgpu_ucode_print_gfx_hdr(hdr);
2895 		break;
2896 	case AMDGPU_UCODE_ID_CP_MEC1:
2897 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2898 		amdgpu_ucode_print_gfx_hdr(hdr);
2899 		break;
2900 	case AMDGPU_UCODE_ID_RLC_G:
2901 	case AMDGPU_UCODE_ID_RLC_DRAM_1:
2902 	case AMDGPU_UCODE_ID_RLC_IRAM_1:
2903 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2904 		amdgpu_ucode_print_rlc_hdr(hdr);
2905 		break;
2906 	case AMDGPU_UCODE_ID_SMC:
2907 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
2908 		amdgpu_ucode_print_smc_hdr(hdr);
2909 		break;
2910 	default:
2911 		break;
2912 	}
2913 }
2914 
2915 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2916 				       struct amdgpu_firmware_info *ucode,
2917 				       struct psp_gfx_cmd_resp *cmd)
2918 {
2919 	int ret;
2920 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
2921 
2922 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2923 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2924 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2925 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2926 
2927 	ret = psp_get_fw_type(psp, ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2928 	if (ret)
2929 		dev_err(psp->adev->dev, "Unknown firmware type\n");
2930 	return ret;
2931 }
2932 
2933 int psp_execute_ip_fw_load(struct psp_context *psp,
2934 			   struct amdgpu_firmware_info *ucode)
2935 {
2936 	int ret = 0;
2937 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2938 
2939 	ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2940 	if (!ret) {
2941 		ret = psp_cmd_submit_buf(psp, ucode, cmd,
2942 					 psp->fence_buf_mc_addr);
2943 	}
2944 
2945 	release_psp_cmd_buf(psp);
2946 
2947 	return ret;
2948 }
2949 
2950 static int psp_load_p2s_table(struct psp_context *psp)
2951 {
2952 	int ret;
2953 	struct amdgpu_device *adev = psp->adev;
2954 	struct amdgpu_firmware_info *ucode =
2955 		&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2956 
2957 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2958 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2959 		return 0;
2960 
2961 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2962 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2963 		uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2964 								0x0036003C;
2965 		if (psp->sos.fw_version < supp_vers)
2966 			return 0;
2967 	}
2968 
2969 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2970 		return 0;
2971 
2972 	ret = psp_execute_ip_fw_load(psp, ucode);
2973 
2974 	return ret;
2975 }
2976 
2977 static int psp_load_smu_fw(struct psp_context *psp)
2978 {
2979 	int ret;
2980 	struct amdgpu_device *adev = psp->adev;
2981 	struct amdgpu_firmware_info *ucode =
2982 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2983 	struct amdgpu_ras *ras = psp->ras_context.ras;
2984 
2985 	/*
2986 	 * Skip SMU FW reloading in case of using BACO for runpm only,
2987 	 * as SMU is always alive.
2988 	 */
2989 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2990 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2991 		return 0;
2992 
2993 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2994 		return 0;
2995 
2996 	if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2997 	     (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2998 	      amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2999 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
3000 		if (ret)
3001 			dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
3002 	}
3003 
3004 	ret = psp_execute_ip_fw_load(psp, ucode);
3005 
3006 	if (ret)
3007 		dev_err(adev->dev, "PSP load smu failed!\n");
3008 
3009 	return ret;
3010 }
3011 
3012 static bool fw_load_skip_check(struct psp_context *psp,
3013 			       struct amdgpu_firmware_info *ucode)
3014 {
3015 	if (!ucode->fw || !ucode->ucode_size)
3016 		return true;
3017 
3018 	if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
3019 		return true;
3020 
3021 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
3022 	    (psp_smu_reload_quirk(psp) ||
3023 	     psp->autoload_supported ||
3024 	     psp->pmfw_centralized_cstate_management))
3025 		return true;
3026 
3027 	if (amdgpu_sriov_vf(psp->adev) &&
3028 	    amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
3029 		return true;
3030 
3031 	if (psp->autoload_supported &&
3032 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
3033 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
3034 		/* skip mec JT when autoload is enabled */
3035 		return true;
3036 
3037 	return false;
3038 }
3039 
3040 int psp_load_fw_list(struct psp_context *psp,
3041 		     struct amdgpu_firmware_info **ucode_list, int ucode_count)
3042 {
3043 	int ret = 0, i;
3044 	struct amdgpu_firmware_info *ucode;
3045 
3046 	for (i = 0; i < ucode_count; ++i) {
3047 		ucode = ucode_list[i];
3048 		psp_print_fw_hdr(psp, ucode);
3049 		ret = psp_execute_ip_fw_load(psp, ucode);
3050 		if (ret)
3051 			return ret;
3052 	}
3053 	return ret;
3054 }
3055 
3056 static int psp_load_non_psp_fw(struct psp_context *psp)
3057 {
3058 	int i, ret;
3059 	struct amdgpu_firmware_info *ucode;
3060 	struct amdgpu_device *adev = psp->adev;
3061 
3062 	if (psp->autoload_supported &&
3063 	    !psp->pmfw_centralized_cstate_management) {
3064 		ret = psp_load_smu_fw(psp);
3065 		if (ret)
3066 			return ret;
3067 	}
3068 
3069 	/* Load P2S table first if it's available */
3070 	psp_load_p2s_table(psp);
3071 
3072 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
3073 		ucode = &adev->firmware.ucode[i];
3074 
3075 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
3076 		    !fw_load_skip_check(psp, ucode)) {
3077 			ret = psp_load_smu_fw(psp);
3078 			if (ret)
3079 				return ret;
3080 			continue;
3081 		}
3082 
3083 		if (fw_load_skip_check(psp, ucode))
3084 			continue;
3085 
3086 		if (psp->autoload_supported &&
3087 		    (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3088 			     IP_VERSION(11, 0, 7) ||
3089 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3090 			     IP_VERSION(11, 0, 11) ||
3091 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3092 			     IP_VERSION(11, 0, 12) ||
3093 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3094 			     IP_VERSION(15, 0, 8)) &&
3095 		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
3096 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
3097 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
3098 			/* PSP only receive one SDMA fw for sienna_cichlid,
3099 			 * as all four sdma fw are same
3100 			 */
3101 			continue;
3102 
3103 		psp_print_fw_hdr(psp, ucode);
3104 
3105 		ret = psp_execute_ip_fw_load(psp, ucode);
3106 		if (ret)
3107 			return ret;
3108 
3109 		/* Start rlc autoload after psp received all the gfx firmware */
3110 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
3111 		    adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
3112 			ret = psp_rlc_autoload_start(psp);
3113 			if (ret) {
3114 				dev_err(adev->dev, "Failed to start rlc autoload\n");
3115 				return ret;
3116 			}
3117 		}
3118 	}
3119 
3120 	return 0;
3121 }
3122 
3123 static int psp_load_fw(struct amdgpu_device *adev)
3124 {
3125 	int ret;
3126 	struct psp_context *psp = &adev->psp;
3127 
3128 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3129 		/* should not destroy ring, only stop */
3130 		psp_ring_stop(psp, PSP_RING_TYPE__KM);
3131 	} else {
3132 		memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
3133 
3134 		ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
3135 		if (ret) {
3136 			dev_err(adev->dev, "PSP ring init failed!\n");
3137 			goto failed;
3138 		}
3139 	}
3140 
3141 	ret = psp_hw_start(psp);
3142 	if (ret)
3143 		goto failed;
3144 
3145 	ret = psp_load_non_psp_fw(psp);
3146 	if (ret)
3147 		goto failed1;
3148 
3149 	ret = psp_asd_initialize(psp);
3150 	if (ret) {
3151 		dev_err(adev->dev, "PSP load asd failed!\n");
3152 		goto failed1;
3153 	}
3154 
3155 	ret = psp_rl_load(adev);
3156 	if (ret) {
3157 		dev_err(adev->dev, "PSP load RL failed!\n");
3158 		goto failed1;
3159 	}
3160 
3161 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3162 		if (adev->gmc.xgmi.num_physical_nodes > 1) {
3163 			ret = psp_xgmi_initialize(psp, false, true);
3164 			/* Warning the XGMI seesion initialize failure
3165 			 * Instead of stop driver initialization
3166 			 */
3167 			if (ret)
3168 				dev_err(psp->adev->dev,
3169 					"XGMI: Failed to initialize XGMI session\n");
3170 		}
3171 	}
3172 
3173 	if (psp->ta_fw) {
3174 		ret = psp_ras_initialize(psp);
3175 		if (ret)
3176 			dev_err(psp->adev->dev,
3177 				"RAS: Failed to initialize RAS\n");
3178 
3179 		ret = psp_hdcp_initialize(psp);
3180 		if (ret)
3181 			dev_err(psp->adev->dev,
3182 				"HDCP: Failed to initialize HDCP\n");
3183 
3184 		ret = psp_dtm_initialize(psp);
3185 		if (ret)
3186 			dev_err(psp->adev->dev,
3187 				"DTM: Failed to initialize DTM\n");
3188 
3189 		ret = psp_rap_initialize(psp);
3190 		if (ret)
3191 			dev_err(psp->adev->dev,
3192 				"RAP: Failed to initialize RAP\n");
3193 
3194 		ret = psp_securedisplay_initialize(psp);
3195 		if (ret)
3196 			dev_err(psp->adev->dev,
3197 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3198 	}
3199 
3200 	return 0;
3201 
3202 failed1:
3203 	psp_free_shared_bufs(psp);
3204 failed:
3205 	/*
3206 	 * all cleanup jobs (xgmi terminate, ras terminate,
3207 	 * ring destroy, cmd/fence/fw buffers destory,
3208 	 * psp->cmd destory) are delayed to psp_hw_fini
3209 	 */
3210 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3211 	return ret;
3212 }
3213 
3214 static int psp_hw_init(struct amdgpu_ip_block *ip_block)
3215 {
3216 	int ret;
3217 	struct amdgpu_device *adev = ip_block->adev;
3218 
3219 	mutex_lock(&adev->firmware.mutex);
3220 
3221 	ret = amdgpu_ucode_init_bo(adev);
3222 	if (ret)
3223 		goto failed;
3224 
3225 	ret = psp_load_fw(adev);
3226 	if (ret) {
3227 		dev_err(adev->dev, "PSP firmware loading failed\n");
3228 		goto failed;
3229 	}
3230 
3231 	mutex_unlock(&adev->firmware.mutex);
3232 	return 0;
3233 
3234 failed:
3235 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
3236 	mutex_unlock(&adev->firmware.mutex);
3237 	return -EINVAL;
3238 }
3239 
3240 static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
3241 {
3242 	struct amdgpu_device *adev = ip_block->adev;
3243 	struct psp_context *psp = &adev->psp;
3244 
3245 	if (psp->ta_fw) {
3246 		psp_ras_terminate(psp);
3247 		psp_securedisplay_terminate(psp);
3248 		psp_rap_terminate(psp);
3249 		psp_dtm_terminate(psp);
3250 		psp_hdcp_terminate(psp);
3251 
3252 		if (adev->gmc.xgmi.num_physical_nodes > 1)
3253 			psp_xgmi_terminate(psp);
3254 	}
3255 
3256 	psp_asd_terminate(psp);
3257 	psp_tmr_terminate(psp);
3258 
3259 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3260 
3261 	return 0;
3262 }
3263 
3264 static int psp_suspend(struct amdgpu_ip_block *ip_block)
3265 {
3266 	int ret = 0;
3267 	struct amdgpu_device *adev = ip_block->adev;
3268 	struct psp_context *psp = &adev->psp;
3269 
3270 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3271 	    psp->xgmi_context.context.initialized) {
3272 		ret = psp_xgmi_terminate(psp);
3273 		if (ret) {
3274 			dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3275 			goto out;
3276 		}
3277 	}
3278 
3279 	if (psp->ta_fw) {
3280 		ret = psp_ras_terminate(psp);
3281 		if (ret) {
3282 			dev_err(adev->dev, "Failed to terminate ras ta\n");
3283 			goto out;
3284 		}
3285 		ret = psp_hdcp_terminate(psp);
3286 		if (ret) {
3287 			dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3288 			goto out;
3289 		}
3290 		ret = psp_dtm_terminate(psp);
3291 		if (ret) {
3292 			dev_err(adev->dev, "Failed to terminate dtm ta\n");
3293 			goto out;
3294 		}
3295 		ret = psp_rap_terminate(psp);
3296 		if (ret) {
3297 			dev_err(adev->dev, "Failed to terminate rap ta\n");
3298 			goto out;
3299 		}
3300 		ret = psp_securedisplay_terminate(psp);
3301 		if (ret) {
3302 			dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3303 			goto out;
3304 		}
3305 	}
3306 
3307 	ret = psp_asd_terminate(psp);
3308 	if (ret) {
3309 		dev_err(adev->dev, "Failed to terminate asd\n");
3310 		goto out;
3311 	}
3312 
3313 	ret = psp_tmr_terminate(psp);
3314 	if (ret) {
3315 		dev_err(adev->dev, "Failed to terminate tmr\n");
3316 		goto out;
3317 	}
3318 
3319 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3320 	if (ret)
3321 		dev_err(adev->dev, "PSP ring stop failed\n");
3322 
3323 out:
3324 	return ret;
3325 }
3326 
3327 static int psp_resume(struct amdgpu_ip_block *ip_block)
3328 {
3329 	int ret;
3330 	struct amdgpu_device *adev = ip_block->adev;
3331 	struct psp_context *psp = &adev->psp;
3332 
3333 	dev_info(adev->dev, "PSP is resuming...\n");
3334 
3335 	if (psp->mem_train_ctx.enable_mem_training) {
3336 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3337 		if (ret) {
3338 			dev_err(adev->dev, "Failed to process memory training!\n");
3339 			return ret;
3340 		}
3341 	}
3342 
3343 	mutex_lock(&adev->firmware.mutex);
3344 
3345 	ret = amdgpu_ucode_init_bo(adev);
3346 	if (ret)
3347 		goto failed;
3348 
3349 	ret = psp_hw_start(psp);
3350 	if (ret)
3351 		goto failed;
3352 
3353 	ret = psp_load_non_psp_fw(psp);
3354 	if (ret)
3355 		goto failed;
3356 
3357 	ret = psp_asd_initialize(psp);
3358 	if (ret) {
3359 		dev_err(adev->dev, "PSP load asd failed!\n");
3360 		goto failed;
3361 	}
3362 
3363 	ret = psp_rl_load(adev);
3364 	if (ret) {
3365 		dev_err(adev->dev, "PSP load RL failed!\n");
3366 		goto failed;
3367 	}
3368 
3369 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3370 		ret = psp_xgmi_initialize(psp, false, true);
3371 		/* Warning the XGMI seesion initialize failure
3372 		 * Instead of stop driver initialization
3373 		 */
3374 		if (ret)
3375 			dev_err(psp->adev->dev,
3376 				"XGMI: Failed to initialize XGMI session\n");
3377 	}
3378 
3379 	if (psp->ta_fw) {
3380 		ret = psp_ras_initialize(psp);
3381 		if (ret)
3382 			dev_err(psp->adev->dev,
3383 				"RAS: Failed to initialize RAS\n");
3384 
3385 		ret = psp_hdcp_initialize(psp);
3386 		if (ret)
3387 			dev_err(psp->adev->dev,
3388 				"HDCP: Failed to initialize HDCP\n");
3389 
3390 		ret = psp_dtm_initialize(psp);
3391 		if (ret)
3392 			dev_err(psp->adev->dev,
3393 				"DTM: Failed to initialize DTM\n");
3394 
3395 		ret = psp_rap_initialize(psp);
3396 		if (ret)
3397 			dev_err(psp->adev->dev,
3398 				"RAP: Failed to initialize RAP\n");
3399 
3400 		ret = psp_securedisplay_initialize(psp);
3401 		if (ret)
3402 			dev_err(psp->adev->dev,
3403 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3404 	}
3405 
3406 	mutex_unlock(&adev->firmware.mutex);
3407 
3408 	return 0;
3409 
3410 failed:
3411 	dev_err(adev->dev, "PSP resume failed\n");
3412 	mutex_unlock(&adev->firmware.mutex);
3413 	return ret;
3414 }
3415 
3416 int psp_gpu_reset(struct amdgpu_device *adev)
3417 {
3418 	int ret;
3419 
3420 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3421 		return 0;
3422 
3423 	mutex_lock(&adev->psp.mutex);
3424 	ret = psp_mode1_reset(&adev->psp);
3425 	mutex_unlock(&adev->psp.mutex);
3426 
3427 	return ret;
3428 }
3429 
3430 int psp_rlc_autoload_start(struct psp_context *psp)
3431 {
3432 	int ret;
3433 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3434 
3435 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3436 
3437 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
3438 				 psp->fence_buf_mc_addr);
3439 
3440 	release_psp_cmd_buf(psp);
3441 
3442 	return ret;
3443 }
3444 
3445 int psp_ring_cmd_submit(struct psp_context *psp,
3446 			uint64_t cmd_buf_mc_addr,
3447 			uint64_t fence_mc_addr,
3448 			int index)
3449 {
3450 	unsigned int psp_write_ptr_reg = 0;
3451 	struct psp_gfx_rb_frame *write_frame;
3452 	struct psp_ring *ring = &psp->km_ring;
3453 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3454 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3455 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3456 	struct amdgpu_device *adev = psp->adev;
3457 	uint32_t ring_size_dw = ring->ring_size / 4;
3458 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3459 
3460 	/* KM (GPCOM) prepare write pointer */
3461 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
3462 
3463 	/* Update KM RB frame pointer to new frame */
3464 	/* write_frame ptr increments by size of rb_frame in bytes */
3465 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3466 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
3467 		write_frame = ring_buffer_start;
3468 	else
3469 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3470 	/* Check invalid write_frame ptr address */
3471 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3472 		dev_err(adev->dev,
3473 			"ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3474 			ring_buffer_start, ring_buffer_end, write_frame);
3475 		dev_err(adev->dev,
3476 			"write_frame is pointing to address out of bounds\n");
3477 		return -EINVAL;
3478 	}
3479 
3480 	/* Initialize KM RB frame */
3481 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3482 
3483 	/* Update KM RB frame */
3484 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3485 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3486 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3487 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3488 	write_frame->fence_value = index;
3489 	amdgpu_device_flush_hdp(adev, NULL);
3490 
3491 	/* Update the write Pointer in DWORDs */
3492 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3493 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
3494 	return 0;
3495 }
3496 
3497 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3498 {
3499 	struct amdgpu_device *adev = psp->adev;
3500 	const struct psp_firmware_header_v1_0 *asd_hdr;
3501 	int err = 0;
3502 
3503 	err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
3504 				   "amdgpu/%s_asd.bin", chip_name);
3505 	if (err)
3506 		goto out;
3507 
3508 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3509 	adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3510 	adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3511 	adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3512 	adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3513 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3514 	return 0;
3515 out:
3516 	amdgpu_ucode_release(&adev->psp.asd_fw);
3517 	return err;
3518 }
3519 
3520 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3521 {
3522 	struct amdgpu_device *adev = psp->adev;
3523 	const struct psp_firmware_header_v1_0 *toc_hdr;
3524 	int err = 0;
3525 
3526 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
3527 				   "amdgpu/%s_toc.bin", chip_name);
3528 	if (err)
3529 		goto out;
3530 
3531 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3532 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3533 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3534 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3535 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3536 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3537 	return 0;
3538 out:
3539 	amdgpu_ucode_release(&adev->psp.toc_fw);
3540 	return err;
3541 }
3542 
3543 static int parse_sos_bin_descriptor(struct psp_context *psp,
3544 				   const struct psp_fw_bin_desc *desc,
3545 				   const struct psp_firmware_header_v2_0 *sos_hdr)
3546 {
3547 	uint8_t *ucode_start_addr  = NULL;
3548 
3549 	if (!psp || !desc || !sos_hdr)
3550 		return -EINVAL;
3551 
3552 	ucode_start_addr  = (uint8_t *)sos_hdr +
3553 			    le32_to_cpu(desc->offset_bytes) +
3554 			    le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3555 
3556 	switch (desc->fw_type) {
3557 	case PSP_FW_TYPE_PSP_SOS:
3558 		psp->sos.fw_version        = le32_to_cpu(desc->fw_version);
3559 		psp->sos.feature_version   = le32_to_cpu(desc->fw_version);
3560 		psp->sos.size_bytes        = le32_to_cpu(desc->size_bytes);
3561 		psp->sos.start_addr	   = ucode_start_addr;
3562 		break;
3563 	case PSP_FW_TYPE_PSP_SYS_DRV:
3564 		psp->sys.fw_version        = le32_to_cpu(desc->fw_version);
3565 		psp->sys.feature_version   = le32_to_cpu(desc->fw_version);
3566 		psp->sys.size_bytes        = le32_to_cpu(desc->size_bytes);
3567 		psp->sys.start_addr        = ucode_start_addr;
3568 		break;
3569 	case PSP_FW_TYPE_PSP_KDB:
3570 		psp->kdb.fw_version        = le32_to_cpu(desc->fw_version);
3571 		psp->kdb.feature_version   = le32_to_cpu(desc->fw_version);
3572 		psp->kdb.size_bytes        = le32_to_cpu(desc->size_bytes);
3573 		psp->kdb.start_addr        = ucode_start_addr;
3574 		break;
3575 	case PSP_FW_TYPE_PSP_TOC:
3576 		psp->toc.fw_version        = le32_to_cpu(desc->fw_version);
3577 		psp->toc.feature_version   = le32_to_cpu(desc->fw_version);
3578 		psp->toc.size_bytes        = le32_to_cpu(desc->size_bytes);
3579 		psp->toc.start_addr        = ucode_start_addr;
3580 		break;
3581 	case PSP_FW_TYPE_PSP_SPL:
3582 		psp->spl.fw_version        = le32_to_cpu(desc->fw_version);
3583 		psp->spl.feature_version   = le32_to_cpu(desc->fw_version);
3584 		psp->spl.size_bytes        = le32_to_cpu(desc->size_bytes);
3585 		psp->spl.start_addr        = ucode_start_addr;
3586 		break;
3587 	case PSP_FW_TYPE_PSP_RL:
3588 		psp->rl.fw_version         = le32_to_cpu(desc->fw_version);
3589 		psp->rl.feature_version    = le32_to_cpu(desc->fw_version);
3590 		psp->rl.size_bytes         = le32_to_cpu(desc->size_bytes);
3591 		psp->rl.start_addr         = ucode_start_addr;
3592 		break;
3593 	case PSP_FW_TYPE_PSP_SOC_DRV:
3594 		psp->soc_drv.fw_version         = le32_to_cpu(desc->fw_version);
3595 		psp->soc_drv.feature_version    = le32_to_cpu(desc->fw_version);
3596 		psp->soc_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3597 		psp->soc_drv.start_addr         = ucode_start_addr;
3598 		break;
3599 	case PSP_FW_TYPE_PSP_INTF_DRV:
3600 		psp->intf_drv.fw_version        = le32_to_cpu(desc->fw_version);
3601 		psp->intf_drv.feature_version   = le32_to_cpu(desc->fw_version);
3602 		psp->intf_drv.size_bytes        = le32_to_cpu(desc->size_bytes);
3603 		psp->intf_drv.start_addr        = ucode_start_addr;
3604 		break;
3605 	case PSP_FW_TYPE_PSP_DBG_DRV:
3606 		psp->dbg_drv.fw_version         = le32_to_cpu(desc->fw_version);
3607 		psp->dbg_drv.feature_version    = le32_to_cpu(desc->fw_version);
3608 		psp->dbg_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3609 		psp->dbg_drv.start_addr         = ucode_start_addr;
3610 		break;
3611 	case PSP_FW_TYPE_PSP_RAS_DRV:
3612 		psp->ras_drv.fw_version         = le32_to_cpu(desc->fw_version);
3613 		psp->ras_drv.feature_version    = le32_to_cpu(desc->fw_version);
3614 		psp->ras_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3615 		psp->ras_drv.start_addr         = ucode_start_addr;
3616 		break;
3617 	case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3618 		psp->ipkeymgr_drv.fw_version         = le32_to_cpu(desc->fw_version);
3619 		psp->ipkeymgr_drv.feature_version    = le32_to_cpu(desc->fw_version);
3620 		psp->ipkeymgr_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3621 		psp->ipkeymgr_drv.start_addr         = ucode_start_addr;
3622 		break;
3623 	case PSP_FW_TYPE_PSP_SPDM_DRV:
3624 		psp->spdm_drv.fw_version	= le32_to_cpu(desc->fw_version);
3625 		psp->spdm_drv.feature_version	= le32_to_cpu(desc->fw_version);
3626 		psp->spdm_drv.size_bytes	= le32_to_cpu(desc->size_bytes);
3627 		psp->spdm_drv.start_addr	= ucode_start_addr;
3628 		break;
3629 	default:
3630 		dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3631 		break;
3632 	}
3633 
3634 	return 0;
3635 }
3636 
3637 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3638 {
3639 	const struct psp_firmware_header_v1_0 *sos_hdr;
3640 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3641 	uint8_t *ucode_array_start_addr;
3642 
3643 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3644 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3645 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3646 
3647 	if (adev->gmc.xgmi.connected_to_cpu ||
3648 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3649 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3650 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3651 
3652 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3653 		adev->psp.sys.start_addr = ucode_array_start_addr;
3654 
3655 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3656 		adev->psp.sos.start_addr = ucode_array_start_addr +
3657 				le32_to_cpu(sos_hdr->sos.offset_bytes);
3658 	} else {
3659 		/* Load alternate PSP SOS FW */
3660 		sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3661 
3662 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3663 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3664 
3665 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3666 		adev->psp.sys.start_addr = ucode_array_start_addr +
3667 			le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3668 
3669 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3670 		adev->psp.sos.start_addr = ucode_array_start_addr +
3671 			le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3672 	}
3673 
3674 	if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3675 		dev_warn(adev->dev, "PSP SOS FW not available");
3676 		return -EINVAL;
3677 	}
3678 
3679 	return 0;
3680 }
3681 
3682 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3683 {
3684 	struct amdgpu_device *adev = psp->adev;
3685 	const struct psp_firmware_header_v1_0 *sos_hdr;
3686 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3687 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3688 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3689 	const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3690 	const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
3691 	int fw_index, fw_bin_count, start_index = 0;
3692 	const struct psp_fw_bin_desc *fw_bin;
3693 	uint8_t *ucode_array_start_addr;
3694 	int err = 0;
3695 
3696 	if (amdgpu_is_kicker_fw(adev))
3697 		err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3698 					   "amdgpu/%s_sos_kicker.bin", chip_name);
3699 	else
3700 		err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3701 					   "amdgpu/%s_sos.bin", chip_name);
3702 	if (err)
3703 		goto out;
3704 
3705 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3706 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3707 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3708 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3709 
3710 	switch (sos_hdr->header.header_version_major) {
3711 	case 1:
3712 		err = psp_init_sos_base_fw(adev);
3713 		if (err)
3714 			goto out;
3715 
3716 		if (sos_hdr->header.header_version_minor == 1) {
3717 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3718 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3719 			adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3720 					le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3721 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3722 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3723 					le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3724 		}
3725 		if (sos_hdr->header.header_version_minor == 2) {
3726 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3727 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3728 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3729 						    le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3730 		}
3731 		if (sos_hdr->header.header_version_minor == 3) {
3732 			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3733 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3734 			adev->psp.toc.start_addr = ucode_array_start_addr +
3735 				le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3736 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3737 			adev->psp.kdb.start_addr = ucode_array_start_addr +
3738 				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3739 			adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3740 			adev->psp.spl.start_addr = ucode_array_start_addr +
3741 				le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3742 			adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3743 			adev->psp.rl.start_addr = ucode_array_start_addr +
3744 				le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3745 		}
3746 		break;
3747 	case 2:
3748 		sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3749 
3750 		fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
3751 
3752 		if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
3753 			dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3754 			err = -EINVAL;
3755 			goto out;
3756 		}
3757 
3758 		if (sos_hdr_v2_0->header.header_version_minor == 1) {
3759 			sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
3760 
3761 			fw_bin = sos_hdr_v2_1->psp_fw_bin;
3762 
3763 			if (psp_is_aux_sos_load_required(psp))
3764 				start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3765 			else
3766 				fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3767 
3768 		} else {
3769 			fw_bin = sos_hdr_v2_0->psp_fw_bin;
3770 		}
3771 
3772 		for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
3773 			err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
3774 						       sos_hdr_v2_0);
3775 			if (err)
3776 				goto out;
3777 		}
3778 		break;
3779 	default:
3780 		dev_err(adev->dev,
3781 			"unsupported psp sos firmware\n");
3782 		err = -EINVAL;
3783 		goto out;
3784 	}
3785 
3786 	return 0;
3787 out:
3788 	amdgpu_ucode_release(&adev->psp.sos_fw);
3789 
3790 	return err;
3791 }
3792 
3793 static bool is_ta_fw_applicable(struct psp_context *psp,
3794 			     const struct psp_fw_bin_desc *desc)
3795 {
3796 	struct amdgpu_device *adev = psp->adev;
3797 	uint32_t fw_version;
3798 
3799 	switch (desc->fw_type) {
3800 	case TA_FW_TYPE_PSP_XGMI:
3801 	case TA_FW_TYPE_PSP_XGMI_AUX:
3802 		/* for now, AUX TA only exists on 13.0.6 ta bin,
3803 		 * from v20.00.0x.14
3804 		 */
3805 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3806 		    IP_VERSION(13, 0, 6)) {
3807 			fw_version = le32_to_cpu(desc->fw_version);
3808 
3809 			if (adev->flags & AMD_IS_APU &&
3810 			    (fw_version & 0xff) >= 0x14)
3811 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
3812 			else
3813 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
3814 		}
3815 		break;
3816 	default:
3817 		break;
3818 	}
3819 
3820 	return true;
3821 }
3822 
3823 static int parse_ta_bin_descriptor(struct psp_context *psp,
3824 				   const struct psp_fw_bin_desc *desc,
3825 				   const struct ta_firmware_header_v2_0 *ta_hdr)
3826 {
3827 	uint8_t *ucode_start_addr  = NULL;
3828 
3829 	if (!psp || !desc || !ta_hdr)
3830 		return -EINVAL;
3831 
3832 	if (!is_ta_fw_applicable(psp, desc))
3833 		return 0;
3834 
3835 	ucode_start_addr  = (uint8_t *)ta_hdr +
3836 			    le32_to_cpu(desc->offset_bytes) +
3837 			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3838 
3839 	switch (desc->fw_type) {
3840 	case TA_FW_TYPE_PSP_ASD:
3841 		psp->asd_context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3842 		psp->asd_context.bin_desc.feature_version   = le32_to_cpu(desc->fw_version);
3843 		psp->asd_context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3844 		psp->asd_context.bin_desc.start_addr        = ucode_start_addr;
3845 		break;
3846 	case TA_FW_TYPE_PSP_XGMI:
3847 	case TA_FW_TYPE_PSP_XGMI_AUX:
3848 		psp->xgmi_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3849 		psp->xgmi_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3850 		psp->xgmi_context.context.bin_desc.start_addr       = ucode_start_addr;
3851 		break;
3852 	case TA_FW_TYPE_PSP_RAS:
3853 		psp->ras_context.context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3854 		psp->ras_context.context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3855 		psp->ras_context.context.bin_desc.start_addr        = ucode_start_addr;
3856 		break;
3857 	case TA_FW_TYPE_PSP_HDCP:
3858 		psp->hdcp_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3859 		psp->hdcp_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3860 		psp->hdcp_context.context.bin_desc.start_addr       = ucode_start_addr;
3861 		break;
3862 	case TA_FW_TYPE_PSP_DTM:
3863 		psp->dtm_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3864 		psp->dtm_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3865 		psp->dtm_context.context.bin_desc.start_addr       = ucode_start_addr;
3866 		break;
3867 	case TA_FW_TYPE_PSP_RAP:
3868 		psp->rap_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3869 		psp->rap_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3870 		psp->rap_context.context.bin_desc.start_addr       = ucode_start_addr;
3871 		break;
3872 	case TA_FW_TYPE_PSP_SECUREDISPLAY:
3873 		psp->securedisplay_context.context.bin_desc.fw_version =
3874 			le32_to_cpu(desc->fw_version);
3875 		psp->securedisplay_context.context.bin_desc.size_bytes =
3876 			le32_to_cpu(desc->size_bytes);
3877 		psp->securedisplay_context.context.bin_desc.start_addr =
3878 			ucode_start_addr;
3879 		break;
3880 	default:
3881 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3882 		break;
3883 	}
3884 
3885 	return 0;
3886 }
3887 
3888 static int parse_ta_v1_microcode(struct psp_context *psp)
3889 {
3890 	const struct ta_firmware_header_v1_0 *ta_hdr;
3891 	struct amdgpu_device *adev = psp->adev;
3892 
3893 	ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3894 
3895 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3896 		return -EINVAL;
3897 
3898 	adev->psp.xgmi_context.context.bin_desc.fw_version =
3899 		le32_to_cpu(ta_hdr->xgmi.fw_version);
3900 	adev->psp.xgmi_context.context.bin_desc.size_bytes =
3901 		le32_to_cpu(ta_hdr->xgmi.size_bytes);
3902 	adev->psp.xgmi_context.context.bin_desc.start_addr =
3903 		(uint8_t *)ta_hdr +
3904 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3905 
3906 	adev->psp.ras_context.context.bin_desc.fw_version =
3907 		le32_to_cpu(ta_hdr->ras.fw_version);
3908 	adev->psp.ras_context.context.bin_desc.size_bytes =
3909 		le32_to_cpu(ta_hdr->ras.size_bytes);
3910 	adev->psp.ras_context.context.bin_desc.start_addr =
3911 		(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3912 		le32_to_cpu(ta_hdr->ras.offset_bytes);
3913 
3914 	adev->psp.hdcp_context.context.bin_desc.fw_version =
3915 		le32_to_cpu(ta_hdr->hdcp.fw_version);
3916 	adev->psp.hdcp_context.context.bin_desc.size_bytes =
3917 		le32_to_cpu(ta_hdr->hdcp.size_bytes);
3918 	adev->psp.hdcp_context.context.bin_desc.start_addr =
3919 		(uint8_t *)ta_hdr +
3920 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3921 
3922 	adev->psp.dtm_context.context.bin_desc.fw_version =
3923 		le32_to_cpu(ta_hdr->dtm.fw_version);
3924 	adev->psp.dtm_context.context.bin_desc.size_bytes =
3925 		le32_to_cpu(ta_hdr->dtm.size_bytes);
3926 	adev->psp.dtm_context.context.bin_desc.start_addr =
3927 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3928 		le32_to_cpu(ta_hdr->dtm.offset_bytes);
3929 
3930 	adev->psp.securedisplay_context.context.bin_desc.fw_version =
3931 		le32_to_cpu(ta_hdr->securedisplay.fw_version);
3932 	adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3933 		le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3934 	adev->psp.securedisplay_context.context.bin_desc.start_addr =
3935 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3936 		le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3937 
3938 	adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3939 
3940 	return 0;
3941 }
3942 
3943 static int parse_ta_v2_microcode(struct psp_context *psp)
3944 {
3945 	const struct ta_firmware_header_v2_0 *ta_hdr;
3946 	struct amdgpu_device *adev = psp->adev;
3947 	int err = 0;
3948 	int ta_index = 0;
3949 
3950 	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3951 
3952 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3953 		return -EINVAL;
3954 
3955 	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3956 		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3957 		return -EINVAL;
3958 	}
3959 
3960 	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3961 		err = parse_ta_bin_descriptor(psp,
3962 					      &ta_hdr->ta_fw_bin[ta_index],
3963 					      ta_hdr);
3964 		if (err)
3965 			return err;
3966 	}
3967 
3968 	return 0;
3969 }
3970 
3971 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3972 {
3973 	const struct common_firmware_header *hdr;
3974 	struct amdgpu_device *adev = psp->adev;
3975 	int err;
3976 
3977 	if (amdgpu_is_kicker_fw(adev))
3978 		err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3979 					   "amdgpu/%s_ta_kicker.bin", chip_name);
3980 	else
3981 		err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3982 					   "amdgpu/%s_ta.bin", chip_name);
3983 	if (err)
3984 		return err;
3985 
3986 	hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3987 	switch (le16_to_cpu(hdr->header_version_major)) {
3988 	case 1:
3989 		err = parse_ta_v1_microcode(psp);
3990 		break;
3991 	case 2:
3992 		err = parse_ta_v2_microcode(psp);
3993 		break;
3994 	default:
3995 		dev_err(adev->dev, "unsupported TA header version\n");
3996 		err = -EINVAL;
3997 	}
3998 
3999 	if (err)
4000 		amdgpu_ucode_release(&adev->psp.ta_fw);
4001 
4002 	return err;
4003 }
4004 
4005 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
4006 {
4007 	struct amdgpu_device *adev = psp->adev;
4008 	const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
4009 	struct amdgpu_firmware_info *info = NULL;
4010 	int err = 0;
4011 
4012 	if (!amdgpu_sriov_vf(adev)) {
4013 		dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
4014 		return -EINVAL;
4015 	}
4016 
4017 	err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
4018 				   "amdgpu/%s_cap.bin", chip_name);
4019 	if (err) {
4020 		if (err == -ENODEV) {
4021 			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
4022 			err = 0;
4023 		} else {
4024 			dev_err(adev->dev, "fail to initialize cap microcode\n");
4025 		}
4026 		goto out;
4027 	}
4028 
4029 	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
4030 	info->ucode_id = AMDGPU_UCODE_ID_CAP;
4031 	info->fw = adev->psp.cap_fw;
4032 	cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
4033 		adev->psp.cap_fw->data;
4034 	adev->firmware.fw_size += ALIGN(
4035 			le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
4036 	adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
4037 	adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
4038 	adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
4039 
4040 	return 0;
4041 
4042 out:
4043 	amdgpu_ucode_release(&adev->psp.cap_fw);
4044 	return err;
4045 }
4046 
4047 int psp_config_sq_perfmon(struct psp_context *psp,
4048 		uint32_t xcp_id, bool core_override_enable,
4049 		bool reg_override_enable, bool perfmon_override_enable)
4050 {
4051 	int ret;
4052 
4053 	if (amdgpu_sriov_vf(psp->adev))
4054 		return 0;
4055 
4056 	if (xcp_id > MAX_XCP) {
4057 		dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
4058 		return -EINVAL;
4059 	}
4060 
4061 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
4062 		dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
4063 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
4064 		return -EINVAL;
4065 	}
4066 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
4067 
4068 	cmd->cmd_id	=	GFX_CMD_ID_CONFIG_SQ_PERFMON;
4069 	cmd->cmd.config_sq_perfmon.gfx_xcp_mask	=	BIT_MASK(xcp_id);
4070 	cmd->cmd.config_sq_perfmon.core_override	=	core_override_enable;
4071 	cmd->cmd.config_sq_perfmon.reg_override	=	reg_override_enable;
4072 	cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
4073 
4074 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
4075 	if (ret)
4076 		dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
4077 			xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
4078 
4079 	release_psp_cmd_buf(psp);
4080 	return ret;
4081 }
4082 
4083 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
4084 					enum amd_clockgating_state state)
4085 {
4086 	return 0;
4087 }
4088 
4089 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
4090 				     enum amd_powergating_state state)
4091 {
4092 	return 0;
4093 }
4094 
4095 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
4096 					 struct device_attribute *attr,
4097 					 char *buf)
4098 {
4099 	struct drm_device *ddev = dev_get_drvdata(dev);
4100 	struct amdgpu_device *adev = drm_to_adev(ddev);
4101 	struct amdgpu_ip_block *ip_block;
4102 	uint32_t fw_ver;
4103 	int ret;
4104 
4105 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
4106 	if (!ip_block || !ip_block->status.late_initialized) {
4107 		dev_info(adev->dev, "PSP block is not ready yet\n.");
4108 		return -EBUSY;
4109 	}
4110 
4111 	mutex_lock(&adev->psp.mutex);
4112 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
4113 	mutex_unlock(&adev->psp.mutex);
4114 
4115 	if (ret) {
4116 		dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
4117 		return ret;
4118 	}
4119 
4120 	return sysfs_emit(buf, "%x\n", fw_ver);
4121 }
4122 
4123 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
4124 						       struct device_attribute *attr,
4125 						       const char *buf,
4126 						       size_t count)
4127 {
4128 	struct drm_device *ddev = dev_get_drvdata(dev);
4129 	struct amdgpu_device *adev = drm_to_adev(ddev);
4130 	int ret, idx;
4131 	const struct firmware *usbc_pd_fw;
4132 	struct amdgpu_bo *fw_buf_bo = NULL;
4133 	uint64_t fw_pri_mc_addr;
4134 	void *fw_pri_cpu_addr;
4135 	struct amdgpu_ip_block *ip_block;
4136 
4137 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
4138 	if (!ip_block || !ip_block->status.late_initialized) {
4139 		dev_err(adev->dev, "PSP block is not ready yet.");
4140 		return -EBUSY;
4141 	}
4142 
4143 	if (!drm_dev_enter(ddev, &idx))
4144 		return -ENODEV;
4145 
4146 	ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
4147 				   "amdgpu/%s", buf);
4148 	if (ret)
4149 		goto fail;
4150 
4151 	/* LFB address which is aligned to 1MB boundary per PSP request */
4152 	ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
4153 				      AMDGPU_GEM_DOMAIN_VRAM |
4154 				      AMDGPU_GEM_DOMAIN_GTT,
4155 				      &fw_buf_bo, &fw_pri_mc_addr,
4156 				      &fw_pri_cpu_addr);
4157 	if (ret)
4158 		goto rel_buf;
4159 
4160 	memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
4161 
4162 	mutex_lock(&adev->psp.mutex);
4163 	ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
4164 	mutex_unlock(&adev->psp.mutex);
4165 
4166 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4167 
4168 rel_buf:
4169 	amdgpu_ucode_release(&usbc_pd_fw);
4170 fail:
4171 	if (ret) {
4172 		dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
4173 		count = ret;
4174 	}
4175 
4176 	drm_dev_exit(idx);
4177 	return count;
4178 }
4179 
4180 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
4181 {
4182 	int idx;
4183 
4184 	if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
4185 		return;
4186 
4187 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
4188 	memcpy(psp->fw_pri_buf, start_addr, bin_size);
4189 
4190 	drm_dev_exit(idx);
4191 }
4192 
4193 /**
4194  * DOC: usbc_pd_fw
4195  * Reading from this file will retrieve the USB-C PD firmware version. Writing to
4196  * this file will trigger the update process.
4197  */
4198 static DEVICE_ATTR(usbc_pd_fw, 0644,
4199 		   psp_usbc_pd_fw_sysfs_read,
4200 		   psp_usbc_pd_fw_sysfs_write);
4201 
4202 int is_psp_fw_valid(struct psp_bin_desc bin)
4203 {
4204 	return bin.size_bytes;
4205 }
4206 
4207 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
4208 					const struct bin_attribute *bin_attr,
4209 					char *buffer, loff_t pos, size_t count)
4210 {
4211 	struct device *dev = kobj_to_dev(kobj);
4212 	struct drm_device *ddev = dev_get_drvdata(dev);
4213 	struct amdgpu_device *adev = drm_to_adev(ddev);
4214 
4215 	adev->psp.vbflash_done = false;
4216 
4217 	/* Safeguard against memory drain */
4218 	if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
4219 		dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
4220 		kvfree(adev->psp.vbflash_tmp_buf);
4221 		adev->psp.vbflash_tmp_buf = NULL;
4222 		adev->psp.vbflash_image_size = 0;
4223 		return -ENOMEM;
4224 	}
4225 
4226 	/* TODO Just allocate max for now and optimize to realloc later if needed */
4227 	if (!adev->psp.vbflash_tmp_buf) {
4228 		adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
4229 		if (!adev->psp.vbflash_tmp_buf)
4230 			return -ENOMEM;
4231 	}
4232 
4233 	mutex_lock(&adev->psp.mutex);
4234 	memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
4235 	adev->psp.vbflash_image_size += count;
4236 	mutex_unlock(&adev->psp.mutex);
4237 
4238 	dev_dbg(adev->dev, "IFWI staged for update\n");
4239 
4240 	return count;
4241 }
4242 
4243 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
4244 				       const struct bin_attribute *bin_attr, char *buffer,
4245 				       loff_t pos, size_t count)
4246 {
4247 	struct device *dev = kobj_to_dev(kobj);
4248 	struct drm_device *ddev = dev_get_drvdata(dev);
4249 	struct amdgpu_device *adev = drm_to_adev(ddev);
4250 	struct amdgpu_bo *fw_buf_bo = NULL;
4251 	uint64_t fw_pri_mc_addr;
4252 	void *fw_pri_cpu_addr;
4253 	int ret;
4254 
4255 	if (adev->psp.vbflash_image_size == 0)
4256 		return -EINVAL;
4257 
4258 	dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
4259 
4260 	ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
4261 					AMDGPU_GPU_PAGE_SIZE,
4262 					AMDGPU_GEM_DOMAIN_VRAM,
4263 					&fw_buf_bo,
4264 					&fw_pri_mc_addr,
4265 					&fw_pri_cpu_addr);
4266 	if (ret)
4267 		goto rel_buf;
4268 
4269 	memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
4270 
4271 	mutex_lock(&adev->psp.mutex);
4272 	ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
4273 	mutex_unlock(&adev->psp.mutex);
4274 
4275 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4276 
4277 rel_buf:
4278 	kvfree(adev->psp.vbflash_tmp_buf);
4279 	adev->psp.vbflash_tmp_buf = NULL;
4280 	adev->psp.vbflash_image_size = 0;
4281 
4282 	if (ret) {
4283 		dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
4284 		return ret;
4285 	}
4286 
4287 	dev_dbg(adev->dev, "PSP IFWI flash process done\n");
4288 	return 0;
4289 }
4290 
4291 /**
4292  * DOC: psp_vbflash
4293  * Writing to this file will stage an IFWI for update. Reading from this file
4294  * will trigger the update process.
4295  */
4296 static const struct bin_attribute psp_vbflash_bin_attr = {
4297 	.attr = {.name = "psp_vbflash", .mode = 0660},
4298 	.size = 0,
4299 	.write = amdgpu_psp_vbflash_write,
4300 	.read = amdgpu_psp_vbflash_read,
4301 };
4302 
4303 /**
4304  * DOC: psp_vbflash_status
4305  * The status of the flash process.
4306  * 0: IFWI flash not complete.
4307  * 1: IFWI flash complete.
4308  */
4309 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
4310 					 struct device_attribute *attr,
4311 					 char *buf)
4312 {
4313 	struct drm_device *ddev = dev_get_drvdata(dev);
4314 	struct amdgpu_device *adev = drm_to_adev(ddev);
4315 	uint32_t vbflash_status;
4316 
4317 	vbflash_status = psp_vbflash_status(&adev->psp);
4318 	if (!adev->psp.vbflash_done)
4319 		vbflash_status = 0;
4320 	else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
4321 		vbflash_status = 1;
4322 
4323 	return sysfs_emit(buf, "0x%x\n", vbflash_status);
4324 }
4325 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
4326 
4327 static const struct bin_attribute *const bin_flash_attrs[] = {
4328 	&psp_vbflash_bin_attr,
4329 	NULL
4330 };
4331 
4332 static struct attribute *flash_attrs[] = {
4333 	&dev_attr_psp_vbflash_status.attr,
4334 	&dev_attr_usbc_pd_fw.attr,
4335 	NULL
4336 };
4337 
4338 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
4339 {
4340 	struct device *dev = kobj_to_dev(kobj);
4341 	struct drm_device *ddev = dev_get_drvdata(dev);
4342 	struct amdgpu_device *adev = drm_to_adev(ddev);
4343 
4344 	if (attr == &dev_attr_usbc_pd_fw.attr)
4345 		return adev->psp.sup_pd_fw_up ? 0660 : 0;
4346 
4347 	return adev->psp.sup_ifwi_up ? 0440 : 0;
4348 }
4349 
4350 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
4351 						const struct bin_attribute *attr,
4352 						int idx)
4353 {
4354 	struct device *dev = kobj_to_dev(kobj);
4355 	struct drm_device *ddev = dev_get_drvdata(dev);
4356 	struct amdgpu_device *adev = drm_to_adev(ddev);
4357 
4358 	return adev->psp.sup_ifwi_up ? 0660 : 0;
4359 }
4360 
4361 const struct attribute_group amdgpu_flash_attr_group = {
4362 	.attrs = flash_attrs,
4363 	.bin_attrs = bin_flash_attrs,
4364 	.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4365 	.is_visible = amdgpu_flash_attr_is_visible,
4366 };
4367 
4368 #if defined(CONFIG_DEBUG_FS)
4369 static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
4370 {
4371 	struct amdgpu_device *adev = filp->f_inode->i_private;
4372 	struct spirom_bo *bo_triplet;
4373 	int ret;
4374 
4375 	/* serialize the open() file calling */
4376 	if (!mutex_trylock(&adev->psp.mutex))
4377 		return -EBUSY;
4378 
4379 	/*
4380 	 * make sure only one userpace process is alive for dumping so that
4381 	 * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
4382 	 * let's say the case where one process try opening the file while
4383 	 * another one has proceeded to read or release. In this way, eliminate
4384 	 * the use of mutex for read() or release() callback as well.
4385 	 */
4386 	if (adev->psp.spirom_dump_trip) {
4387 		mutex_unlock(&adev->psp.mutex);
4388 		return -EBUSY;
4389 	}
4390 
4391 	bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
4392 	if (!bo_triplet) {
4393 		mutex_unlock(&adev->psp.mutex);
4394 		return -ENOMEM;
4395 	}
4396 
4397 	ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
4398 				      AMDGPU_GPU_PAGE_SIZE,
4399 				      AMDGPU_GEM_DOMAIN_GTT,
4400 				      &bo_triplet->bo,
4401 				      &bo_triplet->mc_addr,
4402 				      &bo_triplet->cpu_addr);
4403 	if (ret)
4404 		goto rel_trip;
4405 
4406 	ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
4407 	if (ret)
4408 		goto rel_bo;
4409 
4410 	adev->psp.spirom_dump_trip = bo_triplet;
4411 	mutex_unlock(&adev->psp.mutex);
4412 	return 0;
4413 rel_bo:
4414 	amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4415 			      &bo_triplet->cpu_addr);
4416 rel_trip:
4417 	kfree(bo_triplet);
4418 	mutex_unlock(&adev->psp.mutex);
4419 	dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
4420 	return ret;
4421 }
4422 
4423 static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
4424 					    loff_t *pos)
4425 {
4426 	struct amdgpu_device *adev = filp->f_inode->i_private;
4427 	struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4428 
4429 	if (!bo_triplet)
4430 		return -EINVAL;
4431 
4432 	return simple_read_from_buffer(buf,
4433 				       size,
4434 				       pos, bo_triplet->cpu_addr,
4435 				       AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4436 }
4437 
4438 static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
4439 {
4440 	struct amdgpu_device *adev = filp->f_inode->i_private;
4441 	struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4442 
4443 	if (bo_triplet) {
4444 		amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4445 				      &bo_triplet->cpu_addr);
4446 		kfree(bo_triplet);
4447 	}
4448 
4449 	adev->psp.spirom_dump_trip = NULL;
4450 	return 0;
4451 }
4452 
4453 static const struct file_operations psp_dump_spirom_debugfs_ops = {
4454 	.owner = THIS_MODULE,
4455 	.open = psp_read_spirom_debugfs_open,
4456 	.read = psp_read_spirom_debugfs_read,
4457 	.release = psp_read_spirom_debugfs_release,
4458 	.llseek = default_llseek,
4459 };
4460 #endif
4461 
4462 void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
4463 {
4464 #if defined(CONFIG_DEBUG_FS)
4465 	struct drm_minor *minor = adev_to_drm(adev)->primary;
4466 
4467 	debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
4468 				 adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4469 #endif
4470 }
4471 
4472 const struct amd_ip_funcs psp_ip_funcs = {
4473 	.name = "psp",
4474 	.early_init = psp_early_init,
4475 	.sw_init = psp_sw_init,
4476 	.sw_fini = psp_sw_fini,
4477 	.hw_init = psp_hw_init,
4478 	.hw_fini = psp_hw_fini,
4479 	.suspend = psp_suspend,
4480 	.resume = psp_resume,
4481 	.set_clockgating_state = psp_set_clockgating_state,
4482 	.set_powergating_state = psp_set_powergating_state,
4483 };
4484 
4485 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4486 	.type = AMD_IP_BLOCK_TYPE_PSP,
4487 	.major = 3,
4488 	.minor = 1,
4489 	.rev = 0,
4490 	.funcs = &psp_ip_funcs,
4491 };
4492 
4493 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4494 	.type = AMD_IP_BLOCK_TYPE_PSP,
4495 	.major = 10,
4496 	.minor = 0,
4497 	.rev = 0,
4498 	.funcs = &psp_ip_funcs,
4499 };
4500 
4501 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4502 	.type = AMD_IP_BLOCK_TYPE_PSP,
4503 	.major = 11,
4504 	.minor = 0,
4505 	.rev = 0,
4506 	.funcs = &psp_ip_funcs,
4507 };
4508 
4509 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4510 	.type = AMD_IP_BLOCK_TYPE_PSP,
4511 	.major = 11,
4512 	.minor = 0,
4513 	.rev = 8,
4514 	.funcs = &psp_ip_funcs,
4515 };
4516 
4517 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4518 	.type = AMD_IP_BLOCK_TYPE_PSP,
4519 	.major = 12,
4520 	.minor = 0,
4521 	.rev = 0,
4522 	.funcs = &psp_ip_funcs,
4523 };
4524 
4525 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4526 	.type = AMD_IP_BLOCK_TYPE_PSP,
4527 	.major = 13,
4528 	.minor = 0,
4529 	.rev = 0,
4530 	.funcs = &psp_ip_funcs,
4531 };
4532 
4533 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4534 	.type = AMD_IP_BLOCK_TYPE_PSP,
4535 	.major = 13,
4536 	.minor = 0,
4537 	.rev = 4,
4538 	.funcs = &psp_ip_funcs,
4539 };
4540 
4541 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4542 	.type = AMD_IP_BLOCK_TYPE_PSP,
4543 	.major = 14,
4544 	.minor = 0,
4545 	.rev = 0,
4546 	.funcs = &psp_ip_funcs,
4547 };
4548 
4549 const struct amdgpu_ip_block_version psp_v15_0_8_ip_block = {
4550 	.type = AMD_IP_BLOCK_TYPE_PSP,
4551 	.major = 15,
4552 	.minor = 0,
4553 	.rev = 8,
4554 	.funcs = &psp_ip_funcs,
4555 };
4556