xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c (revision c909a49128a31bced8cfbd2dfb0a4fe56e01a6d0)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42 
43 #include "amdgpu_ras.h"
44 #include "amdgpu_securedisplay.h"
45 #include "amdgpu_atomfirmware.h"
46 
47 #define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*16)
48 
49 static int psp_load_smu_fw(struct psp_context *psp);
50 static int psp_rap_terminate(struct psp_context *psp);
51 static int psp_securedisplay_terminate(struct psp_context *psp);
52 
53 static int psp_ring_init(struct psp_context *psp,
54 			 enum psp_ring_type ring_type)
55 {
56 	int ret = 0;
57 	struct psp_ring *ring;
58 	struct amdgpu_device *adev = psp->adev;
59 
60 	ring = &psp->km_ring;
61 
62 	ring->ring_type = ring_type;
63 
64 	/* allocate 4k Page of Local Frame Buffer memory for ring */
65 	ring->ring_size = 0x1000;
66 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 				      AMDGPU_GEM_DOMAIN_VRAM |
68 				      AMDGPU_GEM_DOMAIN_GTT,
69 				      &adev->firmware.rbuf,
70 				      &ring->ring_mem_mc_addr,
71 				      (void **)&ring->ring_mem);
72 	if (ret) {
73 		ring->ring_size = 0;
74 		return ret;
75 	}
76 
77 	return 0;
78 }
79 
80 /*
81  * Due to DF Cstate management centralized to PMFW, the firmware
82  * loading sequence will be updated as below:
83  *   - Load KDB
84  *   - Load SYS_DRV
85  *   - Load tOS
86  *   - Load PMFW
87  *   - Setup TMR
88  *   - Load other non-psp fw
89  *   - Load ASD
90  *   - Load XGMI/RAS/HDCP/DTM TA if any
91  *
92  * This new sequence is required for
93  *   - Arcturus and onwards
94  */
95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96 {
97 	struct amdgpu_device *adev = psp->adev;
98 
99 	if (amdgpu_sriov_vf(adev)) {
100 		psp->pmfw_centralized_cstate_management = false;
101 		return;
102 	}
103 
104 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 	case IP_VERSION(11, 0, 0):
106 	case IP_VERSION(11, 0, 4):
107 	case IP_VERSION(11, 0, 5):
108 	case IP_VERSION(11, 0, 7):
109 	case IP_VERSION(11, 0, 9):
110 	case IP_VERSION(11, 0, 11):
111 	case IP_VERSION(11, 0, 12):
112 	case IP_VERSION(11, 0, 13):
113 	case IP_VERSION(13, 0, 0):
114 	case IP_VERSION(13, 0, 2):
115 	case IP_VERSION(13, 0, 7):
116 		psp->pmfw_centralized_cstate_management = true;
117 		break;
118 	default:
119 		psp->pmfw_centralized_cstate_management = false;
120 		break;
121 	}
122 }
123 
124 static int psp_init_sriov_microcode(struct psp_context *psp)
125 {
126 	struct amdgpu_device *adev = psp->adev;
127 	char ucode_prefix[30];
128 	int ret = 0;
129 
130 	amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131 
132 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 	case IP_VERSION(9, 0, 0):
134 	case IP_VERSION(11, 0, 7):
135 	case IP_VERSION(11, 0, 9):
136 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 		ret = psp_init_cap_microcode(psp, ucode_prefix);
138 		break;
139 	case IP_VERSION(13, 0, 2):
140 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 		ret = psp_init_cap_microcode(psp, ucode_prefix);
142 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 		break;
144 	case IP_VERSION(13, 0, 0):
145 		adev->virt.autoload_ucode_id = 0;
146 		break;
147 	case IP_VERSION(13, 0, 6):
148 	case IP_VERSION(13, 0, 14):
149 		ret = psp_init_cap_microcode(psp, ucode_prefix);
150 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
151 		break;
152 	case IP_VERSION(13, 0, 10):
153 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
154 		ret = psp_init_cap_microcode(psp, ucode_prefix);
155 		break;
156 	default:
157 		return -EINVAL;
158 	}
159 	return ret;
160 }
161 
162 static int psp_early_init(struct amdgpu_ip_block *ip_block)
163 {
164 	struct amdgpu_device *adev = ip_block->adev;
165 	struct psp_context *psp = &adev->psp;
166 
167 	psp->autoload_supported = true;
168 	psp->boot_time_tmr = true;
169 
170 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
171 	case IP_VERSION(9, 0, 0):
172 		psp_v3_1_set_psp_funcs(psp);
173 		psp->autoload_supported = false;
174 		psp->boot_time_tmr = false;
175 		break;
176 	case IP_VERSION(10, 0, 0):
177 	case IP_VERSION(10, 0, 1):
178 		psp_v10_0_set_psp_funcs(psp);
179 		psp->autoload_supported = false;
180 		psp->boot_time_tmr = false;
181 		break;
182 	case IP_VERSION(11, 0, 2):
183 	case IP_VERSION(11, 0, 4):
184 		psp_v11_0_set_psp_funcs(psp);
185 		psp->autoload_supported = false;
186 		psp->boot_time_tmr = false;
187 		break;
188 	case IP_VERSION(11, 0, 0):
189 	case IP_VERSION(11, 0, 7):
190 		adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
191 		fallthrough;
192 	case IP_VERSION(11, 0, 5):
193 	case IP_VERSION(11, 0, 9):
194 	case IP_VERSION(11, 0, 11):
195 	case IP_VERSION(11, 5, 0):
196 	case IP_VERSION(11, 0, 12):
197 	case IP_VERSION(11, 0, 13):
198 		psp_v11_0_set_psp_funcs(psp);
199 		psp->boot_time_tmr = false;
200 		break;
201 	case IP_VERSION(11, 0, 3):
202 	case IP_VERSION(12, 0, 1):
203 		psp_v12_0_set_psp_funcs(psp);
204 		psp->autoload_supported = false;
205 		psp->boot_time_tmr = false;
206 		break;
207 	case IP_VERSION(13, 0, 2):
208 		psp->boot_time_tmr = false;
209 		fallthrough;
210 	case IP_VERSION(13, 0, 6):
211 	case IP_VERSION(13, 0, 12):
212 	case IP_VERSION(13, 0, 14):
213 		psp_v13_0_set_psp_funcs(psp);
214 		psp->autoload_supported = false;
215 		break;
216 	case IP_VERSION(13, 0, 1):
217 	case IP_VERSION(13, 0, 3):
218 	case IP_VERSION(13, 0, 5):
219 	case IP_VERSION(13, 0, 8):
220 	case IP_VERSION(13, 0, 11):
221 	case IP_VERSION(14, 0, 0):
222 	case IP_VERSION(14, 0, 1):
223 	case IP_VERSION(14, 0, 4):
224 		psp_v13_0_set_psp_funcs(psp);
225 		psp->boot_time_tmr = false;
226 		break;
227 	case IP_VERSION(11, 0, 8):
228 		if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
229 			psp_v11_0_8_set_psp_funcs(psp);
230 		}
231 		psp->autoload_supported = false;
232 		psp->boot_time_tmr = false;
233 		break;
234 	case IP_VERSION(13, 0, 0):
235 	case IP_VERSION(13, 0, 7):
236 	case IP_VERSION(13, 0, 10):
237 		psp_v13_0_set_psp_funcs(psp);
238 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
239 		psp->boot_time_tmr = false;
240 		break;
241 	case IP_VERSION(13, 0, 4):
242 		psp_v13_0_4_set_psp_funcs(psp);
243 		psp->boot_time_tmr = false;
244 		break;
245 	case IP_VERSION(14, 0, 2):
246 	case IP_VERSION(14, 0, 3):
247 		psp_v14_0_set_psp_funcs(psp);
248 		break;
249 	case IP_VERSION(14, 0, 5):
250 		psp_v14_0_set_psp_funcs(psp);
251 		psp->boot_time_tmr = false;
252 		break;
253 	default:
254 		return -EINVAL;
255 	}
256 
257 	psp->adev = adev;
258 
259 	adev->psp_timeout = 20000;
260 
261 	psp_check_pmfw_centralized_cstate_management(psp);
262 
263 	if (amdgpu_sriov_vf(adev))
264 		return psp_init_sriov_microcode(psp);
265 	else
266 		return psp_init_microcode(psp);
267 }
268 
269 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
270 {
271 	amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
272 			      &mem_ctx->shared_buf);
273 	mem_ctx->shared_bo = NULL;
274 }
275 
276 static void psp_free_shared_bufs(struct psp_context *psp)
277 {
278 	void *tmr_buf;
279 	void **pptr;
280 
281 	/* free TMR memory buffer */
282 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
283 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
284 	psp->tmr_bo = NULL;
285 
286 	/* free xgmi shared memory */
287 	psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
288 
289 	/* free ras shared memory */
290 	psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
291 
292 	/* free hdcp shared memory */
293 	psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
294 
295 	/* free dtm shared memory */
296 	psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
297 
298 	/* free rap shared memory */
299 	psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
300 
301 	/* free securedisplay shared memory */
302 	psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
303 
304 
305 }
306 
307 static void psp_memory_training_fini(struct psp_context *psp)
308 {
309 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
310 
311 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
312 	kfree(ctx->sys_cache);
313 	ctx->sys_cache = NULL;
314 }
315 
316 static int psp_memory_training_init(struct psp_context *psp)
317 {
318 	int ret;
319 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
320 
321 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
322 		dev_dbg(psp->adev->dev, "memory training is not supported!\n");
323 		return 0;
324 	}
325 
326 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
327 	if (ctx->sys_cache == NULL) {
328 		dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
329 		ret = -ENOMEM;
330 		goto Err_out;
331 	}
332 
333 	dev_dbg(psp->adev->dev,
334 		"train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
335 		ctx->train_data_size,
336 		ctx->p2c_train_data_offset,
337 		ctx->c2p_train_data_offset);
338 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
339 	return 0;
340 
341 Err_out:
342 	psp_memory_training_fini(psp);
343 	return ret;
344 }
345 
346 /*
347  * Helper funciton to query psp runtime database entry
348  *
349  * @adev: amdgpu_device pointer
350  * @entry_type: the type of psp runtime database entry
351  * @db_entry: runtime database entry pointer
352  *
353  * Return false if runtime database doesn't exit or entry is invalid
354  * or true if the specific database entry is found, and copy to @db_entry
355  */
356 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
357 				     enum psp_runtime_entry_type entry_type,
358 				     void *db_entry)
359 {
360 	uint64_t db_header_pos, db_dir_pos;
361 	struct psp_runtime_data_header db_header = {0};
362 	struct psp_runtime_data_directory db_dir = {0};
363 	bool ret = false;
364 	int i;
365 
366 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
367 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
368 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
369 		return false;
370 
371 	db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
372 	db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
373 
374 	/* read runtime db header from vram */
375 	amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
376 			sizeof(struct psp_runtime_data_header), false);
377 
378 	if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
379 		/* runtime db doesn't exist, exit */
380 		dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
381 		return false;
382 	}
383 
384 	/* read runtime database entry from vram */
385 	amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
386 			sizeof(struct psp_runtime_data_directory), false);
387 
388 	if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
389 		/* invalid db entry count, exit */
390 		dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
391 		return false;
392 	}
393 
394 	/* look up for requested entry type */
395 	for (i = 0; i < db_dir.entry_count && !ret; i++) {
396 		if (db_dir.entry_list[i].entry_type == entry_type) {
397 			switch (entry_type) {
398 			case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
399 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
400 					/* invalid db entry size */
401 					dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
402 					return false;
403 				}
404 				/* read runtime database entry */
405 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
406 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
407 				ret = true;
408 				break;
409 			case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
410 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
411 					/* invalid db entry size */
412 					dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
413 					return false;
414 				}
415 				/* read runtime database entry */
416 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
417 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
418 				ret = true;
419 				break;
420 			default:
421 				ret = false;
422 				break;
423 			}
424 		}
425 	}
426 
427 	return ret;
428 }
429 
430 static int psp_sw_init(struct amdgpu_ip_block *ip_block)
431 {
432 	struct amdgpu_device *adev = ip_block->adev;
433 	struct psp_context *psp = &adev->psp;
434 	int ret;
435 	struct psp_runtime_boot_cfg_entry boot_cfg_entry;
436 	struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
437 	struct psp_runtime_scpm_entry scpm_entry;
438 
439 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
440 	if (!psp->cmd) {
441 		dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
442 		ret = -ENOMEM;
443 	}
444 
445 	adev->psp.xgmi_context.supports_extended_data =
446 		!adev->gmc.xgmi.connected_to_cpu &&
447 		amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
448 
449 	memset(&scpm_entry, 0, sizeof(scpm_entry));
450 	if ((psp_get_runtime_db_entry(adev,
451 				PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
452 				&scpm_entry)) &&
453 	    (scpm_entry.scpm_status != SCPM_DISABLE)) {
454 		adev->scpm_enabled = true;
455 		adev->scpm_status = scpm_entry.scpm_status;
456 	} else {
457 		adev->scpm_enabled = false;
458 		adev->scpm_status = SCPM_DISABLE;
459 	}
460 
461 	/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
462 
463 	memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
464 	if (psp_get_runtime_db_entry(adev,
465 				PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
466 				&boot_cfg_entry)) {
467 		psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
468 		if ((psp->boot_cfg_bitmask) &
469 		    BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
470 			/* If psp runtime database exists, then
471 			 * only enable two stage memory training
472 			 * when TWO_STAGE_DRAM_TRAINING bit is set
473 			 * in runtime database
474 			 */
475 			mem_training_ctx->enable_mem_training = true;
476 		}
477 
478 	} else {
479 		/* If psp runtime database doesn't exist or is
480 		 * invalid, force enable two stage memory training
481 		 */
482 		mem_training_ctx->enable_mem_training = true;
483 	}
484 
485 	if (mem_training_ctx->enable_mem_training) {
486 		ret = psp_memory_training_init(psp);
487 		if (ret) {
488 			dev_err(adev->dev, "Failed to initialize memory training!\n");
489 			return ret;
490 		}
491 
492 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
493 		if (ret) {
494 			dev_err(adev->dev, "Failed to process memory training!\n");
495 			return ret;
496 		}
497 	}
498 
499 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
500 				      (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
501 				      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
502 				      &psp->fw_pri_bo,
503 				      &psp->fw_pri_mc_addr,
504 				      &psp->fw_pri_buf);
505 	if (ret)
506 		return ret;
507 
508 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
509 				      AMDGPU_GEM_DOMAIN_VRAM |
510 				      AMDGPU_GEM_DOMAIN_GTT,
511 				      &psp->fence_buf_bo,
512 				      &psp->fence_buf_mc_addr,
513 				      &psp->fence_buf);
514 	if (ret)
515 		goto failed1;
516 
517 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
518 				      AMDGPU_GEM_DOMAIN_VRAM |
519 				      AMDGPU_GEM_DOMAIN_GTT,
520 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
521 				      (void **)&psp->cmd_buf_mem);
522 	if (ret)
523 		goto failed2;
524 
525 	return 0;
526 
527 failed2:
528 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
529 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
530 failed1:
531 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
532 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
533 	return ret;
534 }
535 
536 static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
537 {
538 	struct amdgpu_device *adev = ip_block->adev;
539 	struct psp_context *psp = &adev->psp;
540 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
541 
542 	psp_memory_training_fini(psp);
543 
544 	amdgpu_ucode_release(&psp->sos_fw);
545 	amdgpu_ucode_release(&psp->asd_fw);
546 	amdgpu_ucode_release(&psp->ta_fw);
547 	amdgpu_ucode_release(&psp->cap_fw);
548 	amdgpu_ucode_release(&psp->toc_fw);
549 
550 	kfree(cmd);
551 	cmd = NULL;
552 
553 	psp_free_shared_bufs(psp);
554 
555 	if (psp->km_ring.ring_mem)
556 		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
557 				      &psp->km_ring.ring_mem_mc_addr,
558 				      (void **)&psp->km_ring.ring_mem);
559 
560 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
561 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
562 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
563 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
564 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
565 			      (void **)&psp->cmd_buf_mem);
566 
567 	return 0;
568 }
569 
570 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
571 		 uint32_t reg_val, uint32_t mask, bool check_changed)
572 {
573 	uint32_t val;
574 	int i;
575 	struct amdgpu_device *adev = psp->adev;
576 
577 	if (psp->adev->no_hw_access)
578 		return 0;
579 
580 	for (i = 0; i < adev->usec_timeout; i++) {
581 		val = RREG32(reg_index);
582 		if (check_changed) {
583 			if (val != reg_val)
584 				return 0;
585 		} else {
586 			if ((val & mask) == reg_val)
587 				return 0;
588 		}
589 		udelay(1);
590 	}
591 
592 	return -ETIME;
593 }
594 
595 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
596 			       uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
597 {
598 	uint32_t val;
599 	int i;
600 	struct amdgpu_device *adev = psp->adev;
601 
602 	if (psp->adev->no_hw_access)
603 		return 0;
604 
605 	for (i = 0; i < msec_timeout; i++) {
606 		val = RREG32(reg_index);
607 		if ((val & mask) == reg_val)
608 			return 0;
609 		msleep(1);
610 	}
611 
612 	return -ETIME;
613 }
614 
615 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
616 {
617 	switch (cmd_id) {
618 	case GFX_CMD_ID_LOAD_TA:
619 		return "LOAD_TA";
620 	case GFX_CMD_ID_UNLOAD_TA:
621 		return "UNLOAD_TA";
622 	case GFX_CMD_ID_INVOKE_CMD:
623 		return "INVOKE_CMD";
624 	case GFX_CMD_ID_LOAD_ASD:
625 		return "LOAD_ASD";
626 	case GFX_CMD_ID_SETUP_TMR:
627 		return "SETUP_TMR";
628 	case GFX_CMD_ID_LOAD_IP_FW:
629 		return "LOAD_IP_FW";
630 	case GFX_CMD_ID_DESTROY_TMR:
631 		return "DESTROY_TMR";
632 	case GFX_CMD_ID_SAVE_RESTORE:
633 		return "SAVE_RESTORE_IP_FW";
634 	case GFX_CMD_ID_SETUP_VMR:
635 		return "SETUP_VMR";
636 	case GFX_CMD_ID_DESTROY_VMR:
637 		return "DESTROY_VMR";
638 	case GFX_CMD_ID_PROG_REG:
639 		return "PROG_REG";
640 	case GFX_CMD_ID_GET_FW_ATTESTATION:
641 		return "GET_FW_ATTESTATION";
642 	case GFX_CMD_ID_LOAD_TOC:
643 		return "ID_LOAD_TOC";
644 	case GFX_CMD_ID_AUTOLOAD_RLC:
645 		return "AUTOLOAD_RLC";
646 	case GFX_CMD_ID_BOOT_CFG:
647 		return "BOOT_CFG";
648 	case GFX_CMD_ID_CONFIG_SQ_PERFMON:
649 		return "CONFIG_SQ_PERFMON";
650 	default:
651 		return "UNKNOWN CMD";
652 	}
653 }
654 
655 static bool psp_err_warn(struct psp_context *psp)
656 {
657 	struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
658 
659 	/* This response indicates reg list is already loaded */
660 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
661 	    cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
662 	    cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
663 	    cmd->resp.status == TEE_ERROR_CANCEL)
664 		return false;
665 
666 	return true;
667 }
668 
669 static int
670 psp_cmd_submit_buf(struct psp_context *psp,
671 		   struct amdgpu_firmware_info *ucode,
672 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
673 {
674 	int ret;
675 	int index;
676 	int timeout = psp->adev->psp_timeout;
677 	bool ras_intr = false;
678 	bool skip_unsupport = false;
679 
680 	if (psp->adev->no_hw_access)
681 		return 0;
682 
683 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
684 
685 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
686 
687 	index = atomic_inc_return(&psp->fence_value);
688 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
689 	if (ret) {
690 		atomic_dec(&psp->fence_value);
691 		goto exit;
692 	}
693 
694 	amdgpu_device_invalidate_hdp(psp->adev, NULL);
695 	while (*((unsigned int *)psp->fence_buf) != index) {
696 		if (--timeout == 0)
697 			break;
698 		/*
699 		 * Shouldn't wait for timeout when err_event_athub occurs,
700 		 * because gpu reset thread triggered and lock resource should
701 		 * be released for psp resume sequence.
702 		 */
703 		ras_intr = amdgpu_ras_intr_triggered();
704 		if (ras_intr)
705 			break;
706 		usleep_range(10, 100);
707 		amdgpu_device_invalidate_hdp(psp->adev, NULL);
708 	}
709 
710 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
711 	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
712 		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
713 
714 	memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
715 
716 	/* In some cases, psp response status is not 0 even there is no
717 	 * problem while the command is submitted. Some version of PSP FW
718 	 * doesn't write 0 to that field.
719 	 * So here we would like to only print a warning instead of an error
720 	 * during psp initialization to avoid breaking hw_init and it doesn't
721 	 * return -EINVAL.
722 	 */
723 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
724 		if (ucode)
725 			dev_warn(psp->adev->dev,
726 				 "failed to load ucode %s(0x%X) ",
727 				 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
728 		if (psp_err_warn(psp))
729 			dev_warn(
730 				psp->adev->dev,
731 				"psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
732 				psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
733 				psp->cmd_buf_mem->cmd_id,
734 				psp->cmd_buf_mem->resp.status);
735 		/* If any firmware (including CAP) load fails under SRIOV, it should
736 		 * return failure to stop the VF from initializing.
737 		 * Also return failure in case of timeout
738 		 */
739 		if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
740 			ret = -EINVAL;
741 			goto exit;
742 		}
743 	}
744 
745 	if (ucode) {
746 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
747 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
748 	}
749 
750 exit:
751 	return ret;
752 }
753 
754 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
755 {
756 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
757 
758 	mutex_lock(&psp->mutex);
759 
760 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
761 
762 	return cmd;
763 }
764 
765 static void release_psp_cmd_buf(struct psp_context *psp)
766 {
767 	mutex_unlock(&psp->mutex);
768 }
769 
770 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
771 				 struct psp_gfx_cmd_resp *cmd,
772 				 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
773 {
774 	struct amdgpu_device *adev = psp->adev;
775 	uint32_t size = 0;
776 	uint64_t tmr_pa = 0;
777 
778 	if (tmr_bo) {
779 		size = amdgpu_bo_size(tmr_bo);
780 		tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
781 	}
782 
783 	if (amdgpu_sriov_vf(psp->adev))
784 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
785 	else
786 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
787 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
788 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
789 	cmd->cmd.cmd_setup_tmr.buf_size = size;
790 	cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
791 	cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
792 	cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
793 }
794 
795 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
796 				      uint64_t pri_buf_mc, uint32_t size)
797 {
798 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
799 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
800 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
801 	cmd->cmd.cmd_load_toc.toc_size = size;
802 }
803 
804 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
805 static int psp_load_toc(struct psp_context *psp,
806 			uint32_t *tmr_size)
807 {
808 	int ret;
809 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
810 
811 	/* Copy toc to psp firmware private buffer */
812 	psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
813 
814 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
815 
816 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
817 				 psp->fence_buf_mc_addr);
818 	if (!ret)
819 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
820 
821 	release_psp_cmd_buf(psp);
822 
823 	return ret;
824 }
825 
826 /* Set up Trusted Memory Region */
827 static int psp_tmr_init(struct psp_context *psp)
828 {
829 	int ret = 0;
830 	int tmr_size;
831 	void *tmr_buf;
832 	void **pptr;
833 
834 	/*
835 	 * According to HW engineer, they prefer the TMR address be "naturally
836 	 * aligned" , e.g. the start address be an integer divide of TMR size.
837 	 *
838 	 * Note: this memory need be reserved till the driver
839 	 * uninitializes.
840 	 */
841 	tmr_size = PSP_TMR_SIZE(psp->adev);
842 
843 	/* For ASICs support RLC autoload, psp will parse the toc
844 	 * and calculate the total size of TMR needed
845 	 */
846 	if (!amdgpu_sriov_vf(psp->adev) &&
847 	    psp->toc.start_addr &&
848 	    psp->toc.size_bytes &&
849 	    psp->fw_pri_buf) {
850 		ret = psp_load_toc(psp, &tmr_size);
851 		if (ret) {
852 			dev_err(psp->adev->dev, "Failed to load toc\n");
853 			return ret;
854 		}
855 	}
856 
857 	if (!psp->tmr_bo && !psp->boot_time_tmr) {
858 		pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
859 		ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
860 					      PSP_TMR_ALIGNMENT,
861 					      AMDGPU_HAS_VRAM(psp->adev) ?
862 					      AMDGPU_GEM_DOMAIN_VRAM :
863 					      AMDGPU_GEM_DOMAIN_GTT,
864 					      &psp->tmr_bo, &psp->tmr_mc_addr,
865 					      pptr);
866 	}
867 
868 	return ret;
869 }
870 
871 static bool psp_skip_tmr(struct psp_context *psp)
872 {
873 	switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
874 	case IP_VERSION(11, 0, 9):
875 	case IP_VERSION(11, 0, 7):
876 	case IP_VERSION(13, 0, 2):
877 	case IP_VERSION(13, 0, 6):
878 	case IP_VERSION(13, 0, 10):
879 	case IP_VERSION(13, 0, 12):
880 	case IP_VERSION(13, 0, 14):
881 		return true;
882 	default:
883 		return false;
884 	}
885 }
886 
887 static int psp_tmr_load(struct psp_context *psp)
888 {
889 	int ret;
890 	struct psp_gfx_cmd_resp *cmd;
891 
892 	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
893 	 * Already set up by host driver.
894 	 */
895 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
896 		return 0;
897 
898 	cmd = acquire_psp_cmd_buf(psp);
899 
900 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
901 	if (psp->tmr_bo)
902 		dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
903 			 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
904 
905 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
906 				 psp->fence_buf_mc_addr);
907 
908 	release_psp_cmd_buf(psp);
909 
910 	return ret;
911 }
912 
913 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
914 					struct psp_gfx_cmd_resp *cmd)
915 {
916 	if (amdgpu_sriov_vf(psp->adev))
917 		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
918 	else
919 		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
920 }
921 
922 static int psp_tmr_unload(struct psp_context *psp)
923 {
924 	int ret;
925 	struct psp_gfx_cmd_resp *cmd;
926 
927 	/* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
928 	 * as TMR is not loaded at all
929 	 */
930 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
931 		return 0;
932 
933 	cmd = acquire_psp_cmd_buf(psp);
934 
935 	psp_prep_tmr_unload_cmd_buf(psp, cmd);
936 	dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
937 
938 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
939 				 psp->fence_buf_mc_addr);
940 
941 	release_psp_cmd_buf(psp);
942 
943 	return ret;
944 }
945 
946 static int psp_tmr_terminate(struct psp_context *psp)
947 {
948 	return psp_tmr_unload(psp);
949 }
950 
951 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
952 					uint64_t *output_ptr)
953 {
954 	int ret;
955 	struct psp_gfx_cmd_resp *cmd;
956 
957 	if (!output_ptr)
958 		return -EINVAL;
959 
960 	if (amdgpu_sriov_vf(psp->adev))
961 		return 0;
962 
963 	cmd = acquire_psp_cmd_buf(psp);
964 
965 	cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
966 
967 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
968 				 psp->fence_buf_mc_addr);
969 
970 	if (!ret) {
971 		*output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
972 			      ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
973 	}
974 
975 	release_psp_cmd_buf(psp);
976 
977 	return ret;
978 }
979 
980 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
981 {
982 	struct psp_context *psp = &adev->psp;
983 	struct psp_gfx_cmd_resp *cmd;
984 	int ret;
985 
986 	if (amdgpu_sriov_vf(adev))
987 		return 0;
988 
989 	cmd = acquire_psp_cmd_buf(psp);
990 
991 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
992 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
993 
994 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
995 	if (!ret) {
996 		*boot_cfg =
997 			(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
998 	}
999 
1000 	release_psp_cmd_buf(psp);
1001 
1002 	return ret;
1003 }
1004 
1005 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
1006 {
1007 	int ret;
1008 	struct psp_context *psp = &adev->psp;
1009 	struct psp_gfx_cmd_resp *cmd;
1010 
1011 	if (amdgpu_sriov_vf(adev))
1012 		return 0;
1013 
1014 	cmd = acquire_psp_cmd_buf(psp);
1015 
1016 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1017 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1018 	cmd->cmd.boot_cfg.boot_config = boot_cfg;
1019 	cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1020 
1021 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1022 
1023 	release_psp_cmd_buf(psp);
1024 
1025 	return ret;
1026 }
1027 
1028 static int psp_rl_load(struct amdgpu_device *adev)
1029 {
1030 	int ret;
1031 	struct psp_context *psp = &adev->psp;
1032 	struct psp_gfx_cmd_resp *cmd;
1033 
1034 	if (!is_psp_fw_valid(psp->rl))
1035 		return 0;
1036 
1037 	cmd = acquire_psp_cmd_buf(psp);
1038 
1039 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1040 	memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1041 
1042 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1043 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1044 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1045 	cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1046 	cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1047 
1048 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1049 
1050 	release_psp_cmd_buf(psp);
1051 
1052 	return ret;
1053 }
1054 
1055 int psp_memory_partition(struct psp_context *psp, int mode)
1056 {
1057 	struct psp_gfx_cmd_resp *cmd;
1058 	int ret;
1059 
1060 	if (amdgpu_sriov_vf(psp->adev))
1061 		return 0;
1062 
1063 	cmd = acquire_psp_cmd_buf(psp);
1064 
1065 	cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
1066 	cmd->cmd.cmd_memory_part.mode = mode;
1067 
1068 	dev_info(psp->adev->dev,
1069 		 "Requesting %d memory partition change through PSP", mode);
1070 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1071 	if (ret)
1072 		dev_err(psp->adev->dev,
1073 			"PSP request failed to change to NPS%d mode\n", mode);
1074 
1075 	release_psp_cmd_buf(psp);
1076 
1077 	return ret;
1078 }
1079 
1080 int psp_spatial_partition(struct psp_context *psp, int mode)
1081 {
1082 	struct psp_gfx_cmd_resp *cmd;
1083 	int ret;
1084 
1085 	if (amdgpu_sriov_vf(psp->adev))
1086 		return 0;
1087 
1088 	cmd = acquire_psp_cmd_buf(psp);
1089 
1090 	cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1091 	cmd->cmd.cmd_spatial_part.mode = mode;
1092 
1093 	dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1094 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1095 
1096 	release_psp_cmd_buf(psp);
1097 
1098 	return ret;
1099 }
1100 
1101 static int psp_asd_initialize(struct psp_context *psp)
1102 {
1103 	int ret;
1104 
1105 	/* If PSP version doesn't match ASD version, asd loading will be failed.
1106 	 * add workaround to bypass it for sriov now.
1107 	 * TODO: add version check to make it common
1108 	 */
1109 	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1110 		return 0;
1111 
1112 	/* bypass asd if display hardware is not available */
1113 	if (!amdgpu_device_has_display_hardware(psp->adev) &&
1114 	    amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1115 		return 0;
1116 
1117 	psp->asd_context.mem_context.shared_mc_addr  = 0;
1118 	psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1119 	psp->asd_context.ta_load_type                = GFX_CMD_ID_LOAD_ASD;
1120 
1121 	ret = psp_ta_load(psp, &psp->asd_context);
1122 	if (!ret)
1123 		psp->asd_context.initialized = true;
1124 
1125 	return ret;
1126 }
1127 
1128 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1129 				       uint32_t session_id)
1130 {
1131 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1132 	cmd->cmd.cmd_unload_ta.session_id = session_id;
1133 }
1134 
1135 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1136 {
1137 	int ret;
1138 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1139 
1140 	psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1141 
1142 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1143 
1144 	context->resp_status = cmd->resp.status;
1145 
1146 	release_psp_cmd_buf(psp);
1147 
1148 	return ret;
1149 }
1150 
1151 static int psp_asd_terminate(struct psp_context *psp)
1152 {
1153 	int ret;
1154 
1155 	if (amdgpu_sriov_vf(psp->adev))
1156 		return 0;
1157 
1158 	if (!psp->asd_context.initialized)
1159 		return 0;
1160 
1161 	ret = psp_ta_unload(psp, &psp->asd_context);
1162 	if (!ret)
1163 		psp->asd_context.initialized = false;
1164 
1165 	return ret;
1166 }
1167 
1168 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1169 		uint32_t id, uint32_t value)
1170 {
1171 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1172 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1173 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1174 }
1175 
1176 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1177 		uint32_t value)
1178 {
1179 	struct psp_gfx_cmd_resp *cmd;
1180 	int ret = 0;
1181 
1182 	if (reg >= PSP_REG_LAST)
1183 		return -EINVAL;
1184 
1185 	cmd = acquire_psp_cmd_buf(psp);
1186 
1187 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1188 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1189 	if (ret)
1190 		dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1191 
1192 	release_psp_cmd_buf(psp);
1193 
1194 	return ret;
1195 }
1196 
1197 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1198 				     uint64_t ta_bin_mc,
1199 				     struct ta_context *context)
1200 {
1201 	cmd->cmd_id				= context->ta_load_type;
1202 	cmd->cmd.cmd_load_ta.app_phy_addr_lo	= lower_32_bits(ta_bin_mc);
1203 	cmd->cmd.cmd_load_ta.app_phy_addr_hi	= upper_32_bits(ta_bin_mc);
1204 	cmd->cmd.cmd_load_ta.app_len		= context->bin_desc.size_bytes;
1205 
1206 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1207 		lower_32_bits(context->mem_context.shared_mc_addr);
1208 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1209 		upper_32_bits(context->mem_context.shared_mc_addr);
1210 	cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1211 }
1212 
1213 int psp_ta_init_shared_buf(struct psp_context *psp,
1214 				  struct ta_mem_context *mem_ctx)
1215 {
1216 	/*
1217 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1218 	 * physical) for ta to host memory
1219 	 */
1220 	return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1221 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1222 				      AMDGPU_GEM_DOMAIN_GTT,
1223 				      &mem_ctx->shared_bo,
1224 				      &mem_ctx->shared_mc_addr,
1225 				      &mem_ctx->shared_buf);
1226 }
1227 
1228 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1229 				       uint32_t ta_cmd_id,
1230 				       uint32_t session_id)
1231 {
1232 	cmd->cmd_id				= GFX_CMD_ID_INVOKE_CMD;
1233 	cmd->cmd.cmd_invoke_cmd.session_id	= session_id;
1234 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id	= ta_cmd_id;
1235 }
1236 
1237 int psp_ta_invoke(struct psp_context *psp,
1238 		  uint32_t ta_cmd_id,
1239 		  struct ta_context *context)
1240 {
1241 	int ret;
1242 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1243 
1244 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1245 
1246 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1247 				 psp->fence_buf_mc_addr);
1248 
1249 	context->resp_status = cmd->resp.status;
1250 
1251 	release_psp_cmd_buf(psp);
1252 
1253 	return ret;
1254 }
1255 
1256 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1257 {
1258 	int ret;
1259 	struct psp_gfx_cmd_resp *cmd;
1260 
1261 	cmd = acquire_psp_cmd_buf(psp);
1262 
1263 	psp_copy_fw(psp, context->bin_desc.start_addr,
1264 		    context->bin_desc.size_bytes);
1265 
1266 	psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1267 
1268 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1269 				 psp->fence_buf_mc_addr);
1270 
1271 	context->resp_status = cmd->resp.status;
1272 
1273 	if (!ret)
1274 		context->session_id = cmd->resp.session_id;
1275 
1276 	release_psp_cmd_buf(psp);
1277 
1278 	return ret;
1279 }
1280 
1281 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1282 {
1283 	return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1284 }
1285 
1286 int psp_xgmi_terminate(struct psp_context *psp)
1287 {
1288 	int ret;
1289 	struct amdgpu_device *adev = psp->adev;
1290 
1291 	/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1292 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1293 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1294 	     adev->gmc.xgmi.connected_to_cpu))
1295 		return 0;
1296 
1297 	if (!psp->xgmi_context.context.initialized)
1298 		return 0;
1299 
1300 	ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1301 
1302 	psp->xgmi_context.context.initialized = false;
1303 
1304 	return ret;
1305 }
1306 
1307 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1308 {
1309 	struct ta_xgmi_shared_memory *xgmi_cmd;
1310 	int ret;
1311 
1312 	if (!psp->ta_fw ||
1313 	    !psp->xgmi_context.context.bin_desc.size_bytes ||
1314 	    !psp->xgmi_context.context.bin_desc.start_addr)
1315 		return -ENOENT;
1316 
1317 	if (!load_ta)
1318 		goto invoke;
1319 
1320 	psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1321 	psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1322 
1323 	if (!psp->xgmi_context.context.mem_context.shared_buf) {
1324 		ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1325 		if (ret)
1326 			return ret;
1327 	}
1328 
1329 	/* Load XGMI TA */
1330 	ret = psp_ta_load(psp, &psp->xgmi_context.context);
1331 	if (!ret)
1332 		psp->xgmi_context.context.initialized = true;
1333 	else
1334 		return ret;
1335 
1336 invoke:
1337 	/* Initialize XGMI session */
1338 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1339 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1340 	xgmi_cmd->flag_extend_link_record = set_extended_data;
1341 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1342 
1343 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1344 	/* note down the capbility flag for XGMI TA */
1345 	psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1346 
1347 	return ret;
1348 }
1349 
1350 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1351 {
1352 	struct ta_xgmi_shared_memory *xgmi_cmd;
1353 	int ret;
1354 
1355 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1356 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1357 
1358 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1359 
1360 	/* Invoke xgmi ta to get hive id */
1361 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1362 	if (ret)
1363 		return ret;
1364 
1365 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1366 
1367 	return 0;
1368 }
1369 
1370 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1371 {
1372 	struct ta_xgmi_shared_memory *xgmi_cmd;
1373 	int ret;
1374 
1375 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1376 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1377 
1378 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1379 
1380 	/* Invoke xgmi ta to get the node id */
1381 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1382 	if (ret)
1383 		return ret;
1384 
1385 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1386 
1387 	return 0;
1388 }
1389 
1390 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1391 {
1392 	return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1393 			IP_VERSION(13, 0, 2) &&
1394 		psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1395 	       amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1396 		       IP_VERSION(13, 0, 6);
1397 }
1398 
1399 /*
1400  * Chips that support extended topology information require the driver to
1401  * reflect topology information in the opposite direction.  This is
1402  * because the TA has already exceeded its link record limit and if the
1403  * TA holds bi-directional information, the driver would have to do
1404  * multiple fetches instead of just two.
1405  */
1406 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1407 					struct psp_xgmi_node_info node_info)
1408 {
1409 	struct amdgpu_device *mirror_adev;
1410 	struct amdgpu_hive_info *hive;
1411 	uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1412 	uint64_t dst_node_id = node_info.node_id;
1413 	uint8_t dst_num_hops = node_info.num_hops;
1414 	uint8_t dst_num_links = node_info.num_links;
1415 
1416 	hive = amdgpu_get_xgmi_hive(psp->adev);
1417 	if (WARN_ON(!hive))
1418 		return;
1419 
1420 	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1421 		struct psp_xgmi_topology_info *mirror_top_info;
1422 		int j;
1423 
1424 		if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1425 			continue;
1426 
1427 		mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1428 		for (j = 0; j < mirror_top_info->num_nodes; j++) {
1429 			if (mirror_top_info->nodes[j].node_id != src_node_id)
1430 				continue;
1431 
1432 			mirror_top_info->nodes[j].num_hops = dst_num_hops;
1433 			/*
1434 			 * prevent 0 num_links value re-reflection since reflection
1435 			 * criteria is based on num_hops (direct or indirect).
1436 			 *
1437 			 */
1438 			if (dst_num_links)
1439 				mirror_top_info->nodes[j].num_links = dst_num_links;
1440 
1441 			break;
1442 		}
1443 
1444 		break;
1445 	}
1446 
1447 	amdgpu_put_xgmi_hive(hive);
1448 }
1449 
1450 int psp_xgmi_get_topology_info(struct psp_context *psp,
1451 			       int number_devices,
1452 			       struct psp_xgmi_topology_info *topology,
1453 			       bool get_extended_data)
1454 {
1455 	struct ta_xgmi_shared_memory *xgmi_cmd;
1456 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1457 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1458 	int i;
1459 	int ret;
1460 
1461 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1462 		return -EINVAL;
1463 
1464 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1465 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1466 	xgmi_cmd->flag_extend_link_record = get_extended_data;
1467 
1468 	/* Fill in the shared memory with topology information as input */
1469 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1470 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1471 	topology_info_input->num_nodes = number_devices;
1472 
1473 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1474 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1475 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1476 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1477 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1478 	}
1479 
1480 	/* Invoke xgmi ta to get the topology information */
1481 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1482 	if (ret)
1483 		return ret;
1484 
1485 	/* Read the output topology information from the shared memory */
1486 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1487 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1488 	for (i = 0; i < topology->num_nodes; i++) {
1489 		/* extended data will either be 0 or equal to non-extended data */
1490 		if (topology_info_output->nodes[i].num_hops)
1491 			topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1492 
1493 		/* non-extended data gets everything here so no need to update */
1494 		if (!get_extended_data) {
1495 			topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1496 			topology->nodes[i].is_sharing_enabled =
1497 					topology_info_output->nodes[i].is_sharing_enabled;
1498 			topology->nodes[i].sdma_engine =
1499 					topology_info_output->nodes[i].sdma_engine;
1500 		}
1501 
1502 	}
1503 
1504 	/* Invoke xgmi ta again to get the link information */
1505 	if (psp_xgmi_peer_link_info_supported(psp)) {
1506 		struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1507 		struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1508 		bool requires_reflection =
1509 			(psp->xgmi_context.supports_extended_data &&
1510 			 get_extended_data) ||
1511 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1512 				IP_VERSION(13, 0, 6) ||
1513 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1514 				IP_VERSION(13, 0, 14);
1515 		bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1516 				psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1517 
1518 		/* popluate the shared output buffer rather than the cmd input buffer
1519 		 * with node_ids as the input for GET_PEER_LINKS command execution.
1520 		 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1521 		 * The same requirement for GET_EXTEND_PEER_LINKS command.
1522 		 */
1523 		if (ta_port_num_support) {
1524 			link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1525 
1526 			for (i = 0; i < topology->num_nodes; i++)
1527 				link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1528 
1529 			link_extend_info_output->num_nodes = topology->num_nodes;
1530 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1531 		} else {
1532 			link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1533 
1534 			for (i = 0; i < topology->num_nodes; i++)
1535 				link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1536 
1537 			link_info_output->num_nodes = topology->num_nodes;
1538 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1539 		}
1540 
1541 		ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1542 		if (ret)
1543 			return ret;
1544 
1545 		for (i = 0; i < topology->num_nodes; i++) {
1546 			uint8_t node_num_links = ta_port_num_support ?
1547 				link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1548 			/* accumulate num_links on extended data */
1549 			if (get_extended_data) {
1550 				topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1551 			} else {
1552 				topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1553 								topology->nodes[i].num_links : node_num_links;
1554 			}
1555 			/* popluate the connected port num info if supported and available */
1556 			if (ta_port_num_support && topology->nodes[i].num_links) {
1557 				memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1558 				       sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1559 			}
1560 
1561 			/* reflect the topology information for bi-directionality */
1562 			if (requires_reflection && topology->nodes[i].num_hops)
1563 				psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1564 		}
1565 	}
1566 
1567 	return 0;
1568 }
1569 
1570 int psp_xgmi_set_topology_info(struct psp_context *psp,
1571 			       int number_devices,
1572 			       struct psp_xgmi_topology_info *topology)
1573 {
1574 	struct ta_xgmi_shared_memory *xgmi_cmd;
1575 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1576 	int i;
1577 
1578 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1579 		return -EINVAL;
1580 
1581 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1582 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1583 
1584 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1585 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1586 	topology_info_input->num_nodes = number_devices;
1587 
1588 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1589 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1590 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1591 		topology_info_input->nodes[i].is_sharing_enabled = 1;
1592 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1593 	}
1594 
1595 	/* Invoke xgmi ta to set topology information */
1596 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1597 }
1598 
1599 // ras begin
1600 static void psp_ras_ta_check_status(struct psp_context *psp)
1601 {
1602 	struct ta_ras_shared_memory *ras_cmd =
1603 		(struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1604 
1605 	switch (ras_cmd->ras_status) {
1606 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1607 		dev_warn(psp->adev->dev,
1608 			 "RAS WARNING: cmd failed due to unsupported ip\n");
1609 		break;
1610 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1611 		dev_warn(psp->adev->dev,
1612 			 "RAS WARNING: cmd failed due to unsupported error injection\n");
1613 		break;
1614 	case TA_RAS_STATUS__SUCCESS:
1615 		break;
1616 	case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1617 		if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1618 			dev_warn(psp->adev->dev,
1619 				 "RAS WARNING: Inject error to critical region is not allowed\n");
1620 		break;
1621 	default:
1622 		dev_warn(psp->adev->dev,
1623 			 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1624 		break;
1625 	}
1626 }
1627 
1628 static int psp_ras_send_cmd(struct psp_context *psp,
1629 		enum ras_command cmd_id, void *in, void *out)
1630 {
1631 	struct ta_ras_shared_memory *ras_cmd;
1632 	uint32_t cmd = cmd_id;
1633 	int ret = 0;
1634 
1635 	if (!in)
1636 		return -EINVAL;
1637 
1638 	mutex_lock(&psp->ras_context.mutex);
1639 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1640 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1641 
1642 	switch (cmd) {
1643 	case TA_RAS_COMMAND__ENABLE_FEATURES:
1644 	case TA_RAS_COMMAND__DISABLE_FEATURES:
1645 		memcpy(&ras_cmd->ras_in_message,
1646 			in, sizeof(ras_cmd->ras_in_message));
1647 		break;
1648 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1649 		memcpy(&ras_cmd->ras_in_message.trigger_error,
1650 			in, sizeof(ras_cmd->ras_in_message.trigger_error));
1651 		break;
1652 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1653 		memcpy(&ras_cmd->ras_in_message.address,
1654 			in, sizeof(ras_cmd->ras_in_message.address));
1655 		break;
1656 	default:
1657 		dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1658 		ret = -EINVAL;
1659 		goto err_out;
1660 	}
1661 
1662 	ras_cmd->cmd_id = cmd;
1663 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1664 
1665 	switch (cmd) {
1666 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1667 		if (!ret && out)
1668 			memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1669 		break;
1670 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1671 		if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1672 			ret = -EINVAL;
1673 		else if (out)
1674 			memcpy(out,
1675 				&ras_cmd->ras_out_message.address,
1676 				sizeof(ras_cmd->ras_out_message.address));
1677 		break;
1678 	default:
1679 		break;
1680 	}
1681 
1682 err_out:
1683 	mutex_unlock(&psp->ras_context.mutex);
1684 
1685 	return ret;
1686 }
1687 
1688 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1689 {
1690 	struct ta_ras_shared_memory *ras_cmd;
1691 	int ret;
1692 
1693 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1694 
1695 	/*
1696 	 * TODO: bypass the loading in sriov for now
1697 	 */
1698 	if (amdgpu_sriov_vf(psp->adev))
1699 		return 0;
1700 
1701 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1702 
1703 	if (amdgpu_ras_intr_triggered())
1704 		return ret;
1705 
1706 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1707 		dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1708 		return -EINVAL;
1709 	}
1710 
1711 	if (!ret) {
1712 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1713 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1714 
1715 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1716 		} else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1717 			dev_warn(psp->adev->dev,
1718 				 "RAS internal register access blocked\n");
1719 
1720 		psp_ras_ta_check_status(psp);
1721 	}
1722 
1723 	return ret;
1724 }
1725 
1726 int psp_ras_enable_features(struct psp_context *psp,
1727 		union ta_ras_cmd_input *info, bool enable)
1728 {
1729 	enum ras_command cmd_id;
1730 	int ret;
1731 
1732 	if (!psp->ras_context.context.initialized || !info)
1733 		return -EINVAL;
1734 
1735 	cmd_id = enable ?
1736 		TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1737 	ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
1738 	if (ret)
1739 		return -EINVAL;
1740 
1741 	return 0;
1742 }
1743 
1744 int psp_ras_terminate(struct psp_context *psp)
1745 {
1746 	int ret;
1747 
1748 	/*
1749 	 * TODO: bypass the terminate in sriov for now
1750 	 */
1751 	if (amdgpu_sriov_vf(psp->adev))
1752 		return 0;
1753 
1754 	if (!psp->ras_context.context.initialized)
1755 		return 0;
1756 
1757 	ret = psp_ta_unload(psp, &psp->ras_context.context);
1758 
1759 	psp->ras_context.context.initialized = false;
1760 
1761 	mutex_destroy(&psp->ras_context.mutex);
1762 
1763 	return ret;
1764 }
1765 
1766 int psp_ras_initialize(struct psp_context *psp)
1767 {
1768 	int ret;
1769 	uint32_t boot_cfg = 0xFF;
1770 	struct amdgpu_device *adev = psp->adev;
1771 	struct ta_ras_shared_memory *ras_cmd;
1772 
1773 	/*
1774 	 * TODO: bypass the initialize in sriov for now
1775 	 */
1776 	if (amdgpu_sriov_vf(adev))
1777 		return 0;
1778 
1779 	if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1780 	    !adev->psp.ras_context.context.bin_desc.start_addr) {
1781 		dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1782 		return 0;
1783 	}
1784 
1785 	if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1786 		/* query GECC enablement status from boot config
1787 		 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1788 		 */
1789 		ret = psp_boot_config_get(adev, &boot_cfg);
1790 		if (ret)
1791 			dev_warn(adev->dev, "PSP get boot config failed\n");
1792 
1793 		if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
1794 			if (!boot_cfg) {
1795 				dev_info(adev->dev, "GECC is disabled\n");
1796 			} else {
1797 				/* disable GECC in next boot cycle if ras is
1798 				 * disabled by module parameter amdgpu_ras_enable
1799 				 * and/or amdgpu_ras_mask, or boot_config_get call
1800 				 * is failed
1801 				 */
1802 				ret = psp_boot_config_set(adev, 0);
1803 				if (ret)
1804 					dev_warn(adev->dev, "PSP set boot config failed\n");
1805 				else
1806 					dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1807 			}
1808 		} else {
1809 			if (boot_cfg == 1) {
1810 				dev_info(adev->dev, "GECC is enabled\n");
1811 			} else {
1812 				/* enable GECC in next boot cycle if it is disabled
1813 				 * in boot config, or force enable GECC if failed to
1814 				 * get boot configuration
1815 				 */
1816 				ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1817 				if (ret)
1818 					dev_warn(adev->dev, "PSP set boot config failed\n");
1819 				else
1820 					dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1821 			}
1822 		}
1823 	}
1824 
1825 	psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1826 	psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1827 
1828 	if (!psp->ras_context.context.mem_context.shared_buf) {
1829 		ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1830 		if (ret)
1831 			return ret;
1832 	}
1833 
1834 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1835 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1836 
1837 	if (amdgpu_ras_is_poison_mode_supported(adev))
1838 		ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1839 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1840 		ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1841 	ras_cmd->ras_in_message.init_flags.xcc_mask =
1842 		adev->gfx.xcc_mask;
1843 	ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1844 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
1845 		ras_cmd->ras_in_message.init_flags.nps_mode =
1846 			adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1847 
1848 	ret = psp_ta_load(psp, &psp->ras_context.context);
1849 
1850 	if (!ret && !ras_cmd->ras_status) {
1851 		psp->ras_context.context.initialized = true;
1852 		mutex_init(&psp->ras_context.mutex);
1853 	} else {
1854 		if (ras_cmd->ras_status)
1855 			dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1856 
1857 		/* fail to load RAS TA */
1858 		psp->ras_context.context.initialized = false;
1859 	}
1860 
1861 	return ret;
1862 }
1863 
1864 int psp_ras_trigger_error(struct psp_context *psp,
1865 			  struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1866 {
1867 	struct amdgpu_device *adev = psp->adev;
1868 	int ret;
1869 	uint32_t dev_mask;
1870 	uint32_t ras_status = 0;
1871 
1872 	if (!psp->ras_context.context.initialized || !info)
1873 		return -EINVAL;
1874 
1875 	switch (info->block_id) {
1876 	case TA_RAS_BLOCK__GFX:
1877 		dev_mask = GET_MASK(GC, instance_mask);
1878 		break;
1879 	case TA_RAS_BLOCK__SDMA:
1880 		dev_mask = GET_MASK(SDMA0, instance_mask);
1881 		break;
1882 	case TA_RAS_BLOCK__VCN:
1883 	case TA_RAS_BLOCK__JPEG:
1884 		dev_mask = GET_MASK(VCN, instance_mask);
1885 		break;
1886 	default:
1887 		dev_mask = instance_mask;
1888 		break;
1889 	}
1890 
1891 	/* reuse sub_block_index for backward compatibility */
1892 	dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1893 	dev_mask &= AMDGPU_RAS_INST_MASK;
1894 	info->sub_block_index |= dev_mask;
1895 
1896 	ret = psp_ras_send_cmd(psp,
1897 			TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
1898 	if (ret)
1899 		return -EINVAL;
1900 
1901 	/* If err_event_athub occurs error inject was successful, however
1902 	 *  return status from TA is no long reliable
1903 	 */
1904 	if (amdgpu_ras_intr_triggered())
1905 		return 0;
1906 
1907 	if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1908 		return -EACCES;
1909 	else if (ras_status)
1910 		return -EINVAL;
1911 
1912 	return 0;
1913 }
1914 
1915 int psp_ras_query_address(struct psp_context *psp,
1916 			  struct ta_ras_query_address_input *addr_in,
1917 			  struct ta_ras_query_address_output *addr_out)
1918 {
1919 	int ret;
1920 
1921 	if (!psp->ras_context.context.initialized ||
1922 		!addr_in || !addr_out)
1923 		return -EINVAL;
1924 
1925 	ret = psp_ras_send_cmd(psp,
1926 			TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
1927 
1928 	return ret;
1929 }
1930 // ras end
1931 
1932 // HDCP start
1933 static int psp_hdcp_initialize(struct psp_context *psp)
1934 {
1935 	int ret;
1936 
1937 	/*
1938 	 * TODO: bypass the initialize in sriov for now
1939 	 */
1940 	if (amdgpu_sriov_vf(psp->adev))
1941 		return 0;
1942 
1943 	/* bypass hdcp initialization if dmu is harvested */
1944 	if (!amdgpu_device_has_display_hardware(psp->adev))
1945 		return 0;
1946 
1947 	if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1948 	    !psp->hdcp_context.context.bin_desc.start_addr) {
1949 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1950 		return 0;
1951 	}
1952 
1953 	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1954 	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1955 
1956 	if (!psp->hdcp_context.context.mem_context.shared_buf) {
1957 		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1958 		if (ret)
1959 			return ret;
1960 	}
1961 
1962 	ret = psp_ta_load(psp, &psp->hdcp_context.context);
1963 	if (!ret) {
1964 		psp->hdcp_context.context.initialized = true;
1965 		mutex_init(&psp->hdcp_context.mutex);
1966 	}
1967 
1968 	return ret;
1969 }
1970 
1971 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1972 {
1973 	/*
1974 	 * TODO: bypass the loading in sriov for now
1975 	 */
1976 	if (amdgpu_sriov_vf(psp->adev))
1977 		return 0;
1978 
1979 	if (!psp->hdcp_context.context.initialized)
1980 		return 0;
1981 
1982 	return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
1983 }
1984 
1985 static int psp_hdcp_terminate(struct psp_context *psp)
1986 {
1987 	int ret;
1988 
1989 	/*
1990 	 * TODO: bypass the terminate in sriov for now
1991 	 */
1992 	if (amdgpu_sriov_vf(psp->adev))
1993 		return 0;
1994 
1995 	if (!psp->hdcp_context.context.initialized)
1996 		return 0;
1997 
1998 	ret = psp_ta_unload(psp, &psp->hdcp_context.context);
1999 
2000 	psp->hdcp_context.context.initialized = false;
2001 
2002 	return ret;
2003 }
2004 // HDCP end
2005 
2006 // DTM start
2007 static int psp_dtm_initialize(struct psp_context *psp)
2008 {
2009 	int ret;
2010 
2011 	/*
2012 	 * TODO: bypass the initialize in sriov for now
2013 	 */
2014 	if (amdgpu_sriov_vf(psp->adev))
2015 		return 0;
2016 
2017 	/* bypass dtm initialization if dmu is harvested */
2018 	if (!amdgpu_device_has_display_hardware(psp->adev))
2019 		return 0;
2020 
2021 	if (!psp->dtm_context.context.bin_desc.size_bytes ||
2022 	    !psp->dtm_context.context.bin_desc.start_addr) {
2023 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
2024 		return 0;
2025 	}
2026 
2027 	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
2028 	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2029 
2030 	if (!psp->dtm_context.context.mem_context.shared_buf) {
2031 		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
2032 		if (ret)
2033 			return ret;
2034 	}
2035 
2036 	ret = psp_ta_load(psp, &psp->dtm_context.context);
2037 	if (!ret) {
2038 		psp->dtm_context.context.initialized = true;
2039 		mutex_init(&psp->dtm_context.mutex);
2040 	}
2041 
2042 	return ret;
2043 }
2044 
2045 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2046 {
2047 	/*
2048 	 * TODO: bypass the loading in sriov for now
2049 	 */
2050 	if (amdgpu_sriov_vf(psp->adev))
2051 		return 0;
2052 
2053 	if (!psp->dtm_context.context.initialized)
2054 		return 0;
2055 
2056 	return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
2057 }
2058 
2059 static int psp_dtm_terminate(struct psp_context *psp)
2060 {
2061 	int ret;
2062 
2063 	/*
2064 	 * TODO: bypass the terminate in sriov for now
2065 	 */
2066 	if (amdgpu_sriov_vf(psp->adev))
2067 		return 0;
2068 
2069 	if (!psp->dtm_context.context.initialized)
2070 		return 0;
2071 
2072 	ret = psp_ta_unload(psp, &psp->dtm_context.context);
2073 
2074 	psp->dtm_context.context.initialized = false;
2075 
2076 	return ret;
2077 }
2078 // DTM end
2079 
2080 // RAP start
2081 static int psp_rap_initialize(struct psp_context *psp)
2082 {
2083 	int ret;
2084 	enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2085 
2086 	/*
2087 	 * TODO: bypass the initialize in sriov for now
2088 	 */
2089 	if (amdgpu_sriov_vf(psp->adev))
2090 		return 0;
2091 
2092 	if (!psp->rap_context.context.bin_desc.size_bytes ||
2093 	    !psp->rap_context.context.bin_desc.start_addr) {
2094 		dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2095 		return 0;
2096 	}
2097 
2098 	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2099 	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2100 
2101 	if (!psp->rap_context.context.mem_context.shared_buf) {
2102 		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2103 		if (ret)
2104 			return ret;
2105 	}
2106 
2107 	ret = psp_ta_load(psp, &psp->rap_context.context);
2108 	if (!ret) {
2109 		psp->rap_context.context.initialized = true;
2110 		mutex_init(&psp->rap_context.mutex);
2111 	} else
2112 		return ret;
2113 
2114 	ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2115 	if (ret || status != TA_RAP_STATUS__SUCCESS) {
2116 		psp_rap_terminate(psp);
2117 		/* free rap shared memory */
2118 		psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2119 
2120 		dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2121 			 ret, status);
2122 
2123 		return ret;
2124 	}
2125 
2126 	return 0;
2127 }
2128 
2129 static int psp_rap_terminate(struct psp_context *psp)
2130 {
2131 	int ret;
2132 
2133 	if (!psp->rap_context.context.initialized)
2134 		return 0;
2135 
2136 	ret = psp_ta_unload(psp, &psp->rap_context.context);
2137 
2138 	psp->rap_context.context.initialized = false;
2139 
2140 	return ret;
2141 }
2142 
2143 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2144 {
2145 	struct ta_rap_shared_memory *rap_cmd;
2146 	int ret = 0;
2147 
2148 	if (!psp->rap_context.context.initialized)
2149 		return 0;
2150 
2151 	if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2152 	    ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2153 		return -EINVAL;
2154 
2155 	mutex_lock(&psp->rap_context.mutex);
2156 
2157 	rap_cmd = (struct ta_rap_shared_memory *)
2158 		  psp->rap_context.context.mem_context.shared_buf;
2159 	memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2160 
2161 	rap_cmd->cmd_id = ta_cmd_id;
2162 	rap_cmd->validation_method_id = METHOD_A;
2163 
2164 	ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2165 	if (ret)
2166 		goto out_unlock;
2167 
2168 	if (status)
2169 		*status = rap_cmd->rap_status;
2170 
2171 out_unlock:
2172 	mutex_unlock(&psp->rap_context.mutex);
2173 
2174 	return ret;
2175 }
2176 // RAP end
2177 
2178 /* securedisplay start */
2179 static int psp_securedisplay_initialize(struct psp_context *psp)
2180 {
2181 	int ret;
2182 	struct ta_securedisplay_cmd *securedisplay_cmd;
2183 
2184 	/*
2185 	 * TODO: bypass the initialize in sriov for now
2186 	 */
2187 	if (amdgpu_sriov_vf(psp->adev))
2188 		return 0;
2189 
2190 	/* bypass securedisplay initialization if dmu is harvested */
2191 	if (!amdgpu_device_has_display_hardware(psp->adev))
2192 		return 0;
2193 
2194 	if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2195 	    !psp->securedisplay_context.context.bin_desc.start_addr) {
2196 		dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
2197 		return 0;
2198 	}
2199 
2200 	psp->securedisplay_context.context.mem_context.shared_mem_size =
2201 		PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2202 	psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2203 
2204 	if (!psp->securedisplay_context.context.initialized) {
2205 		ret = psp_ta_init_shared_buf(psp,
2206 					     &psp->securedisplay_context.context.mem_context);
2207 		if (ret)
2208 			return ret;
2209 	}
2210 
2211 	ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2212 	if (!ret) {
2213 		psp->securedisplay_context.context.initialized = true;
2214 		mutex_init(&psp->securedisplay_context.mutex);
2215 	} else
2216 		return ret;
2217 
2218 	mutex_lock(&psp->securedisplay_context.mutex);
2219 
2220 	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2221 			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2222 
2223 	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2224 
2225 	mutex_unlock(&psp->securedisplay_context.mutex);
2226 
2227 	if (ret) {
2228 		psp_securedisplay_terminate(psp);
2229 		/* free securedisplay shared memory */
2230 		psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2231 		dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2232 		return -EINVAL;
2233 	}
2234 
2235 	if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2236 		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2237 		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2238 			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2239 		/* don't try again */
2240 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2241 	}
2242 
2243 	return 0;
2244 }
2245 
2246 static int psp_securedisplay_terminate(struct psp_context *psp)
2247 {
2248 	int ret;
2249 
2250 	/*
2251 	 * TODO:bypass the terminate in sriov for now
2252 	 */
2253 	if (amdgpu_sriov_vf(psp->adev))
2254 		return 0;
2255 
2256 	if (!psp->securedisplay_context.context.initialized)
2257 		return 0;
2258 
2259 	ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2260 
2261 	psp->securedisplay_context.context.initialized = false;
2262 
2263 	return ret;
2264 }
2265 
2266 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2267 {
2268 	int ret;
2269 
2270 	if (!psp->securedisplay_context.context.initialized)
2271 		return -EINVAL;
2272 
2273 	if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2274 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
2275 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
2276 		return -EINVAL;
2277 
2278 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2279 
2280 	return ret;
2281 }
2282 /* SECUREDISPLAY end */
2283 
2284 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2285 {
2286 	struct psp_context *psp = &adev->psp;
2287 	int ret = 0;
2288 
2289 	if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2290 		ret = psp->funcs->wait_for_bootloader(psp);
2291 
2292 	return ret;
2293 }
2294 
2295 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2296 {
2297 	if (psp->funcs &&
2298 	    psp->funcs->get_ras_capability) {
2299 		return psp->funcs->get_ras_capability(psp);
2300 	} else {
2301 		return false;
2302 	}
2303 }
2304 
2305 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
2306 {
2307 	struct psp_context *psp = &adev->psp;
2308 
2309 	if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
2310 		return false;
2311 
2312 	if (psp->funcs && psp->funcs->is_reload_needed)
2313 		return psp->funcs->is_reload_needed(psp);
2314 
2315 	return false;
2316 }
2317 
2318 static int psp_hw_start(struct psp_context *psp)
2319 {
2320 	struct amdgpu_device *adev = psp->adev;
2321 	int ret;
2322 
2323 	if (!amdgpu_sriov_vf(adev)) {
2324 		if ((is_psp_fw_valid(psp->kdb)) &&
2325 		    (psp->funcs->bootloader_load_kdb != NULL)) {
2326 			ret = psp_bootloader_load_kdb(psp);
2327 			if (ret) {
2328 				dev_err(adev->dev, "PSP load kdb failed!\n");
2329 				return ret;
2330 			}
2331 		}
2332 
2333 		if ((is_psp_fw_valid(psp->spl)) &&
2334 		    (psp->funcs->bootloader_load_spl != NULL)) {
2335 			ret = psp_bootloader_load_spl(psp);
2336 			if (ret) {
2337 				dev_err(adev->dev, "PSP load spl failed!\n");
2338 				return ret;
2339 			}
2340 		}
2341 
2342 		if ((is_psp_fw_valid(psp->sys)) &&
2343 		    (psp->funcs->bootloader_load_sysdrv != NULL)) {
2344 			ret = psp_bootloader_load_sysdrv(psp);
2345 			if (ret) {
2346 				dev_err(adev->dev, "PSP load sys drv failed!\n");
2347 				return ret;
2348 			}
2349 		}
2350 
2351 		if ((is_psp_fw_valid(psp->soc_drv)) &&
2352 		    (psp->funcs->bootloader_load_soc_drv != NULL)) {
2353 			ret = psp_bootloader_load_soc_drv(psp);
2354 			if (ret) {
2355 				dev_err(adev->dev, "PSP load soc drv failed!\n");
2356 				return ret;
2357 			}
2358 		}
2359 
2360 		if ((is_psp_fw_valid(psp->intf_drv)) &&
2361 		    (psp->funcs->bootloader_load_intf_drv != NULL)) {
2362 			ret = psp_bootloader_load_intf_drv(psp);
2363 			if (ret) {
2364 				dev_err(adev->dev, "PSP load intf drv failed!\n");
2365 				return ret;
2366 			}
2367 		}
2368 
2369 		if ((is_psp_fw_valid(psp->dbg_drv)) &&
2370 		    (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2371 			ret = psp_bootloader_load_dbg_drv(psp);
2372 			if (ret) {
2373 				dev_err(adev->dev, "PSP load dbg drv failed!\n");
2374 				return ret;
2375 			}
2376 		}
2377 
2378 		if ((is_psp_fw_valid(psp->ras_drv)) &&
2379 		    (psp->funcs->bootloader_load_ras_drv != NULL)) {
2380 			ret = psp_bootloader_load_ras_drv(psp);
2381 			if (ret) {
2382 				dev_err(adev->dev, "PSP load ras_drv failed!\n");
2383 				return ret;
2384 			}
2385 		}
2386 
2387 		if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2388 		    (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2389 			ret = psp_bootloader_load_ipkeymgr_drv(psp);
2390 			if (ret) {
2391 				dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2392 				return ret;
2393 			}
2394 		}
2395 
2396 		if ((is_psp_fw_valid(psp->spdm_drv)) &&
2397 		    (psp->funcs->bootloader_load_spdm_drv != NULL)) {
2398 			ret = psp_bootloader_load_spdm_drv(psp);
2399 			if (ret) {
2400 				dev_err(adev->dev, "PSP load spdm_drv failed!\n");
2401 				return ret;
2402 			}
2403 		}
2404 
2405 		if ((is_psp_fw_valid(psp->sos)) &&
2406 		    (psp->funcs->bootloader_load_sos != NULL)) {
2407 			ret = psp_bootloader_load_sos(psp);
2408 			if (ret) {
2409 				dev_err(adev->dev, "PSP load sos failed!\n");
2410 				return ret;
2411 			}
2412 		}
2413 	}
2414 
2415 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2416 	if (ret) {
2417 		dev_err(adev->dev, "PSP create ring failed!\n");
2418 		return ret;
2419 	}
2420 
2421 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2422 		goto skip_pin_bo;
2423 
2424 	if (!psp->boot_time_tmr || psp->autoload_supported) {
2425 		ret = psp_tmr_init(psp);
2426 		if (ret) {
2427 			dev_err(adev->dev, "PSP tmr init failed!\n");
2428 			return ret;
2429 		}
2430 	}
2431 
2432 skip_pin_bo:
2433 	/*
2434 	 * For ASICs with DF Cstate management centralized
2435 	 * to PMFW, TMR setup should be performed after PMFW
2436 	 * loaded and before other non-psp firmware loaded.
2437 	 */
2438 	if (psp->pmfw_centralized_cstate_management) {
2439 		ret = psp_load_smu_fw(psp);
2440 		if (ret)
2441 			return ret;
2442 	}
2443 
2444 	if (!psp->boot_time_tmr || !psp->autoload_supported) {
2445 		ret = psp_tmr_load(psp);
2446 		if (ret) {
2447 			dev_err(adev->dev, "PSP load tmr failed!\n");
2448 			return ret;
2449 		}
2450 	}
2451 
2452 	return 0;
2453 }
2454 
2455 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2456 			   enum psp_gfx_fw_type *type)
2457 {
2458 	switch (ucode->ucode_id) {
2459 	case AMDGPU_UCODE_ID_CAP:
2460 		*type = GFX_FW_TYPE_CAP;
2461 		break;
2462 	case AMDGPU_UCODE_ID_SDMA0:
2463 		*type = GFX_FW_TYPE_SDMA0;
2464 		break;
2465 	case AMDGPU_UCODE_ID_SDMA1:
2466 		*type = GFX_FW_TYPE_SDMA1;
2467 		break;
2468 	case AMDGPU_UCODE_ID_SDMA2:
2469 		*type = GFX_FW_TYPE_SDMA2;
2470 		break;
2471 	case AMDGPU_UCODE_ID_SDMA3:
2472 		*type = GFX_FW_TYPE_SDMA3;
2473 		break;
2474 	case AMDGPU_UCODE_ID_SDMA4:
2475 		*type = GFX_FW_TYPE_SDMA4;
2476 		break;
2477 	case AMDGPU_UCODE_ID_SDMA5:
2478 		*type = GFX_FW_TYPE_SDMA5;
2479 		break;
2480 	case AMDGPU_UCODE_ID_SDMA6:
2481 		*type = GFX_FW_TYPE_SDMA6;
2482 		break;
2483 	case AMDGPU_UCODE_ID_SDMA7:
2484 		*type = GFX_FW_TYPE_SDMA7;
2485 		break;
2486 	case AMDGPU_UCODE_ID_CP_MES:
2487 		*type = GFX_FW_TYPE_CP_MES;
2488 		break;
2489 	case AMDGPU_UCODE_ID_CP_MES_DATA:
2490 		*type = GFX_FW_TYPE_MES_STACK;
2491 		break;
2492 	case AMDGPU_UCODE_ID_CP_MES1:
2493 		*type = GFX_FW_TYPE_CP_MES_KIQ;
2494 		break;
2495 	case AMDGPU_UCODE_ID_CP_MES1_DATA:
2496 		*type = GFX_FW_TYPE_MES_KIQ_STACK;
2497 		break;
2498 	case AMDGPU_UCODE_ID_CP_CE:
2499 		*type = GFX_FW_TYPE_CP_CE;
2500 		break;
2501 	case AMDGPU_UCODE_ID_CP_PFP:
2502 		*type = GFX_FW_TYPE_CP_PFP;
2503 		break;
2504 	case AMDGPU_UCODE_ID_CP_ME:
2505 		*type = GFX_FW_TYPE_CP_ME;
2506 		break;
2507 	case AMDGPU_UCODE_ID_CP_MEC1:
2508 		*type = GFX_FW_TYPE_CP_MEC;
2509 		break;
2510 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
2511 		*type = GFX_FW_TYPE_CP_MEC_ME1;
2512 		break;
2513 	case AMDGPU_UCODE_ID_CP_MEC2:
2514 		*type = GFX_FW_TYPE_CP_MEC;
2515 		break;
2516 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
2517 		*type = GFX_FW_TYPE_CP_MEC_ME2;
2518 		break;
2519 	case AMDGPU_UCODE_ID_RLC_P:
2520 		*type = GFX_FW_TYPE_RLC_P;
2521 		break;
2522 	case AMDGPU_UCODE_ID_RLC_V:
2523 		*type = GFX_FW_TYPE_RLC_V;
2524 		break;
2525 	case AMDGPU_UCODE_ID_RLC_G:
2526 		*type = GFX_FW_TYPE_RLC_G;
2527 		break;
2528 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2529 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2530 		break;
2531 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2532 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2533 		break;
2534 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2535 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2536 		break;
2537 	case AMDGPU_UCODE_ID_RLC_IRAM:
2538 		*type = GFX_FW_TYPE_RLC_IRAM;
2539 		break;
2540 	case AMDGPU_UCODE_ID_RLC_DRAM:
2541 		*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2542 		break;
2543 	case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2544 		*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2545 		break;
2546 	case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2547 		*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2548 		break;
2549 	case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2550 		*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2551 		break;
2552 	case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2553 		*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2554 		break;
2555 	case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2556 		*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2557 		break;
2558 	case AMDGPU_UCODE_ID_SMC:
2559 		*type = GFX_FW_TYPE_SMU;
2560 		break;
2561 	case AMDGPU_UCODE_ID_PPTABLE:
2562 		*type = GFX_FW_TYPE_PPTABLE;
2563 		break;
2564 	case AMDGPU_UCODE_ID_UVD:
2565 		*type = GFX_FW_TYPE_UVD;
2566 		break;
2567 	case AMDGPU_UCODE_ID_UVD1:
2568 		*type = GFX_FW_TYPE_UVD1;
2569 		break;
2570 	case AMDGPU_UCODE_ID_VCE:
2571 		*type = GFX_FW_TYPE_VCE;
2572 		break;
2573 	case AMDGPU_UCODE_ID_VCN:
2574 		*type = GFX_FW_TYPE_VCN;
2575 		break;
2576 	case AMDGPU_UCODE_ID_VCN1:
2577 		*type = GFX_FW_TYPE_VCN1;
2578 		break;
2579 	case AMDGPU_UCODE_ID_DMCU_ERAM:
2580 		*type = GFX_FW_TYPE_DMCU_ERAM;
2581 		break;
2582 	case AMDGPU_UCODE_ID_DMCU_INTV:
2583 		*type = GFX_FW_TYPE_DMCU_ISR;
2584 		break;
2585 	case AMDGPU_UCODE_ID_VCN0_RAM:
2586 		*type = GFX_FW_TYPE_VCN0_RAM;
2587 		break;
2588 	case AMDGPU_UCODE_ID_VCN1_RAM:
2589 		*type = GFX_FW_TYPE_VCN1_RAM;
2590 		break;
2591 	case AMDGPU_UCODE_ID_DMCUB:
2592 		*type = GFX_FW_TYPE_DMUB;
2593 		break;
2594 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2595 	case AMDGPU_UCODE_ID_SDMA_RS64:
2596 		*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2597 		break;
2598 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2599 		*type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2600 		break;
2601 	case AMDGPU_UCODE_ID_IMU_I:
2602 		*type = GFX_FW_TYPE_IMU_I;
2603 		break;
2604 	case AMDGPU_UCODE_ID_IMU_D:
2605 		*type = GFX_FW_TYPE_IMU_D;
2606 		break;
2607 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
2608 		*type = GFX_FW_TYPE_RS64_PFP;
2609 		break;
2610 	case AMDGPU_UCODE_ID_CP_RS64_ME:
2611 		*type = GFX_FW_TYPE_RS64_ME;
2612 		break;
2613 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
2614 		*type = GFX_FW_TYPE_RS64_MEC;
2615 		break;
2616 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2617 		*type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2618 		break;
2619 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2620 		*type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2621 		break;
2622 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2623 		*type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2624 		break;
2625 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2626 		*type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2627 		break;
2628 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2629 		*type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2630 		break;
2631 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2632 		*type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2633 		break;
2634 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2635 		*type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2636 		break;
2637 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2638 		*type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2639 		break;
2640 	case AMDGPU_UCODE_ID_VPE_CTX:
2641 		*type = GFX_FW_TYPE_VPEC_FW1;
2642 		break;
2643 	case AMDGPU_UCODE_ID_VPE_CTL:
2644 		*type = GFX_FW_TYPE_VPEC_FW2;
2645 		break;
2646 	case AMDGPU_UCODE_ID_VPE:
2647 		*type = GFX_FW_TYPE_VPE;
2648 		break;
2649 	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2650 		*type = GFX_FW_TYPE_UMSCH_UCODE;
2651 		break;
2652 	case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2653 		*type = GFX_FW_TYPE_UMSCH_DATA;
2654 		break;
2655 	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2656 		*type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2657 		break;
2658 	case AMDGPU_UCODE_ID_P2S_TABLE:
2659 		*type = GFX_FW_TYPE_P2S_TABLE;
2660 		break;
2661 	case AMDGPU_UCODE_ID_JPEG_RAM:
2662 		*type = GFX_FW_TYPE_JPEG_RAM;
2663 		break;
2664 	case AMDGPU_UCODE_ID_ISP:
2665 		*type = GFX_FW_TYPE_ISP;
2666 		break;
2667 	case AMDGPU_UCODE_ID_MAXIMUM:
2668 	default:
2669 		return -EINVAL;
2670 	}
2671 
2672 	return 0;
2673 }
2674 
2675 static void psp_print_fw_hdr(struct psp_context *psp,
2676 			     struct amdgpu_firmware_info *ucode)
2677 {
2678 	struct amdgpu_device *adev = psp->adev;
2679 	struct common_firmware_header *hdr;
2680 
2681 	switch (ucode->ucode_id) {
2682 	case AMDGPU_UCODE_ID_SDMA0:
2683 	case AMDGPU_UCODE_ID_SDMA1:
2684 	case AMDGPU_UCODE_ID_SDMA2:
2685 	case AMDGPU_UCODE_ID_SDMA3:
2686 	case AMDGPU_UCODE_ID_SDMA4:
2687 	case AMDGPU_UCODE_ID_SDMA5:
2688 	case AMDGPU_UCODE_ID_SDMA6:
2689 	case AMDGPU_UCODE_ID_SDMA7:
2690 		hdr = (struct common_firmware_header *)
2691 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2692 		amdgpu_ucode_print_sdma_hdr(hdr);
2693 		break;
2694 	case AMDGPU_UCODE_ID_CP_CE:
2695 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2696 		amdgpu_ucode_print_gfx_hdr(hdr);
2697 		break;
2698 	case AMDGPU_UCODE_ID_CP_PFP:
2699 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2700 		amdgpu_ucode_print_gfx_hdr(hdr);
2701 		break;
2702 	case AMDGPU_UCODE_ID_CP_ME:
2703 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2704 		amdgpu_ucode_print_gfx_hdr(hdr);
2705 		break;
2706 	case AMDGPU_UCODE_ID_CP_MEC1:
2707 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2708 		amdgpu_ucode_print_gfx_hdr(hdr);
2709 		break;
2710 	case AMDGPU_UCODE_ID_RLC_G:
2711 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2712 		amdgpu_ucode_print_rlc_hdr(hdr);
2713 		break;
2714 	case AMDGPU_UCODE_ID_SMC:
2715 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
2716 		amdgpu_ucode_print_smc_hdr(hdr);
2717 		break;
2718 	default:
2719 		break;
2720 	}
2721 }
2722 
2723 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2724 				       struct amdgpu_firmware_info *ucode,
2725 				       struct psp_gfx_cmd_resp *cmd)
2726 {
2727 	int ret;
2728 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
2729 
2730 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2731 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2732 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2733 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2734 
2735 	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2736 	if (ret)
2737 		dev_err(psp->adev->dev, "Unknown firmware type\n");
2738 
2739 	return ret;
2740 }
2741 
2742 int psp_execute_ip_fw_load(struct psp_context *psp,
2743 			   struct amdgpu_firmware_info *ucode)
2744 {
2745 	int ret = 0;
2746 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2747 
2748 	ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2749 	if (!ret) {
2750 		ret = psp_cmd_submit_buf(psp, ucode, cmd,
2751 					 psp->fence_buf_mc_addr);
2752 	}
2753 
2754 	release_psp_cmd_buf(psp);
2755 
2756 	return ret;
2757 }
2758 
2759 static int psp_load_p2s_table(struct psp_context *psp)
2760 {
2761 	int ret;
2762 	struct amdgpu_device *adev = psp->adev;
2763 	struct amdgpu_firmware_info *ucode =
2764 		&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2765 
2766 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2767 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2768 		return 0;
2769 
2770 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2771 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2772 		uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2773 								0x0036003C;
2774 		if (psp->sos.fw_version < supp_vers)
2775 			return 0;
2776 	}
2777 
2778 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2779 		return 0;
2780 
2781 	ret = psp_execute_ip_fw_load(psp, ucode);
2782 
2783 	return ret;
2784 }
2785 
2786 static int psp_load_smu_fw(struct psp_context *psp)
2787 {
2788 	int ret;
2789 	struct amdgpu_device *adev = psp->adev;
2790 	struct amdgpu_firmware_info *ucode =
2791 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2792 	struct amdgpu_ras *ras = psp->ras_context.ras;
2793 
2794 	/*
2795 	 * Skip SMU FW reloading in case of using BACO for runpm only,
2796 	 * as SMU is always alive.
2797 	 */
2798 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2799 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2800 		return 0;
2801 
2802 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2803 		return 0;
2804 
2805 	if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2806 	     (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2807 	      amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2808 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2809 		if (ret)
2810 			dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2811 	}
2812 
2813 	ret = psp_execute_ip_fw_load(psp, ucode);
2814 
2815 	if (ret)
2816 		dev_err(adev->dev, "PSP load smu failed!\n");
2817 
2818 	return ret;
2819 }
2820 
2821 static bool fw_load_skip_check(struct psp_context *psp,
2822 			       struct amdgpu_firmware_info *ucode)
2823 {
2824 	if (!ucode->fw || !ucode->ucode_size)
2825 		return true;
2826 
2827 	if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2828 		return true;
2829 
2830 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2831 	    (psp_smu_reload_quirk(psp) ||
2832 	     psp->autoload_supported ||
2833 	     psp->pmfw_centralized_cstate_management))
2834 		return true;
2835 
2836 	if (amdgpu_sriov_vf(psp->adev) &&
2837 	    amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2838 		return true;
2839 
2840 	if (psp->autoload_supported &&
2841 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2842 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2843 		/* skip mec JT when autoload is enabled */
2844 		return true;
2845 
2846 	return false;
2847 }
2848 
2849 int psp_load_fw_list(struct psp_context *psp,
2850 		     struct amdgpu_firmware_info **ucode_list, int ucode_count)
2851 {
2852 	int ret = 0, i;
2853 	struct amdgpu_firmware_info *ucode;
2854 
2855 	for (i = 0; i < ucode_count; ++i) {
2856 		ucode = ucode_list[i];
2857 		psp_print_fw_hdr(psp, ucode);
2858 		ret = psp_execute_ip_fw_load(psp, ucode);
2859 		if (ret)
2860 			return ret;
2861 	}
2862 	return ret;
2863 }
2864 
2865 static int psp_load_non_psp_fw(struct psp_context *psp)
2866 {
2867 	int i, ret;
2868 	struct amdgpu_firmware_info *ucode;
2869 	struct amdgpu_device *adev = psp->adev;
2870 
2871 	if (psp->autoload_supported &&
2872 	    !psp->pmfw_centralized_cstate_management) {
2873 		ret = psp_load_smu_fw(psp);
2874 		if (ret)
2875 			return ret;
2876 	}
2877 
2878 	/* Load P2S table first if it's available */
2879 	psp_load_p2s_table(psp);
2880 
2881 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
2882 		ucode = &adev->firmware.ucode[i];
2883 
2884 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2885 		    !fw_load_skip_check(psp, ucode)) {
2886 			ret = psp_load_smu_fw(psp);
2887 			if (ret)
2888 				return ret;
2889 			continue;
2890 		}
2891 
2892 		if (fw_load_skip_check(psp, ucode))
2893 			continue;
2894 
2895 		if (psp->autoload_supported &&
2896 		    (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2897 			     IP_VERSION(11, 0, 7) ||
2898 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2899 			     IP_VERSION(11, 0, 11) ||
2900 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2901 			     IP_VERSION(11, 0, 12)) &&
2902 		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2903 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2904 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2905 			/* PSP only receive one SDMA fw for sienna_cichlid,
2906 			 * as all four sdma fw are same
2907 			 */
2908 			continue;
2909 
2910 		psp_print_fw_hdr(psp, ucode);
2911 
2912 		ret = psp_execute_ip_fw_load(psp, ucode);
2913 		if (ret)
2914 			return ret;
2915 
2916 		/* Start rlc autoload after psp received all the gfx firmware */
2917 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2918 		    adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2919 			ret = psp_rlc_autoload_start(psp);
2920 			if (ret) {
2921 				dev_err(adev->dev, "Failed to start rlc autoload\n");
2922 				return ret;
2923 			}
2924 		}
2925 	}
2926 
2927 	return 0;
2928 }
2929 
2930 static int psp_load_fw(struct amdgpu_device *adev)
2931 {
2932 	int ret;
2933 	struct psp_context *psp = &adev->psp;
2934 
2935 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2936 		/* should not destroy ring, only stop */
2937 		psp_ring_stop(psp, PSP_RING_TYPE__KM);
2938 	} else {
2939 		memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2940 
2941 		ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2942 		if (ret) {
2943 			dev_err(adev->dev, "PSP ring init failed!\n");
2944 			goto failed;
2945 		}
2946 	}
2947 
2948 	ret = psp_hw_start(psp);
2949 	if (ret)
2950 		goto failed;
2951 
2952 	ret = psp_load_non_psp_fw(psp);
2953 	if (ret)
2954 		goto failed1;
2955 
2956 	ret = psp_asd_initialize(psp);
2957 	if (ret) {
2958 		dev_err(adev->dev, "PSP load asd failed!\n");
2959 		goto failed1;
2960 	}
2961 
2962 	ret = psp_rl_load(adev);
2963 	if (ret) {
2964 		dev_err(adev->dev, "PSP load RL failed!\n");
2965 		goto failed1;
2966 	}
2967 
2968 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2969 		if (adev->gmc.xgmi.num_physical_nodes > 1) {
2970 			ret = psp_xgmi_initialize(psp, false, true);
2971 			/* Warning the XGMI seesion initialize failure
2972 			 * Instead of stop driver initialization
2973 			 */
2974 			if (ret)
2975 				dev_err(psp->adev->dev,
2976 					"XGMI: Failed to initialize XGMI session\n");
2977 		}
2978 	}
2979 
2980 	if (psp->ta_fw) {
2981 		ret = psp_ras_initialize(psp);
2982 		if (ret)
2983 			dev_err(psp->adev->dev,
2984 				"RAS: Failed to initialize RAS\n");
2985 
2986 		ret = psp_hdcp_initialize(psp);
2987 		if (ret)
2988 			dev_err(psp->adev->dev,
2989 				"HDCP: Failed to initialize HDCP\n");
2990 
2991 		ret = psp_dtm_initialize(psp);
2992 		if (ret)
2993 			dev_err(psp->adev->dev,
2994 				"DTM: Failed to initialize DTM\n");
2995 
2996 		ret = psp_rap_initialize(psp);
2997 		if (ret)
2998 			dev_err(psp->adev->dev,
2999 				"RAP: Failed to initialize RAP\n");
3000 
3001 		ret = psp_securedisplay_initialize(psp);
3002 		if (ret)
3003 			dev_err(psp->adev->dev,
3004 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3005 	}
3006 
3007 	return 0;
3008 
3009 failed1:
3010 	psp_free_shared_bufs(psp);
3011 failed:
3012 	/*
3013 	 * all cleanup jobs (xgmi terminate, ras terminate,
3014 	 * ring destroy, cmd/fence/fw buffers destory,
3015 	 * psp->cmd destory) are delayed to psp_hw_fini
3016 	 */
3017 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3018 	return ret;
3019 }
3020 
3021 static int psp_hw_init(struct amdgpu_ip_block *ip_block)
3022 {
3023 	int ret;
3024 	struct amdgpu_device *adev = ip_block->adev;
3025 
3026 	mutex_lock(&adev->firmware.mutex);
3027 
3028 	ret = amdgpu_ucode_init_bo(adev);
3029 	if (ret)
3030 		goto failed;
3031 
3032 	ret = psp_load_fw(adev);
3033 	if (ret) {
3034 		dev_err(adev->dev, "PSP firmware loading failed\n");
3035 		goto failed;
3036 	}
3037 
3038 	mutex_unlock(&adev->firmware.mutex);
3039 	return 0;
3040 
3041 failed:
3042 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
3043 	mutex_unlock(&adev->firmware.mutex);
3044 	return -EINVAL;
3045 }
3046 
3047 static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
3048 {
3049 	struct amdgpu_device *adev = ip_block->adev;
3050 	struct psp_context *psp = &adev->psp;
3051 
3052 	if (psp->ta_fw) {
3053 		psp_ras_terminate(psp);
3054 		psp_securedisplay_terminate(psp);
3055 		psp_rap_terminate(psp);
3056 		psp_dtm_terminate(psp);
3057 		psp_hdcp_terminate(psp);
3058 
3059 		if (adev->gmc.xgmi.num_physical_nodes > 1)
3060 			psp_xgmi_terminate(psp);
3061 	}
3062 
3063 	psp_asd_terminate(psp);
3064 	psp_tmr_terminate(psp);
3065 
3066 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3067 
3068 	return 0;
3069 }
3070 
3071 static int psp_suspend(struct amdgpu_ip_block *ip_block)
3072 {
3073 	int ret = 0;
3074 	struct amdgpu_device *adev = ip_block->adev;
3075 	struct psp_context *psp = &adev->psp;
3076 
3077 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3078 	    psp->xgmi_context.context.initialized) {
3079 		ret = psp_xgmi_terminate(psp);
3080 		if (ret) {
3081 			dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3082 			goto out;
3083 		}
3084 	}
3085 
3086 	if (psp->ta_fw) {
3087 		ret = psp_ras_terminate(psp);
3088 		if (ret) {
3089 			dev_err(adev->dev, "Failed to terminate ras ta\n");
3090 			goto out;
3091 		}
3092 		ret = psp_hdcp_terminate(psp);
3093 		if (ret) {
3094 			dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3095 			goto out;
3096 		}
3097 		ret = psp_dtm_terminate(psp);
3098 		if (ret) {
3099 			dev_err(adev->dev, "Failed to terminate dtm ta\n");
3100 			goto out;
3101 		}
3102 		ret = psp_rap_terminate(psp);
3103 		if (ret) {
3104 			dev_err(adev->dev, "Failed to terminate rap ta\n");
3105 			goto out;
3106 		}
3107 		ret = psp_securedisplay_terminate(psp);
3108 		if (ret) {
3109 			dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3110 			goto out;
3111 		}
3112 	}
3113 
3114 	ret = psp_asd_terminate(psp);
3115 	if (ret) {
3116 		dev_err(adev->dev, "Failed to terminate asd\n");
3117 		goto out;
3118 	}
3119 
3120 	ret = psp_tmr_terminate(psp);
3121 	if (ret) {
3122 		dev_err(adev->dev, "Failed to terminate tmr\n");
3123 		goto out;
3124 	}
3125 
3126 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3127 	if (ret)
3128 		dev_err(adev->dev, "PSP ring stop failed\n");
3129 
3130 out:
3131 	return ret;
3132 }
3133 
3134 static int psp_resume(struct amdgpu_ip_block *ip_block)
3135 {
3136 	int ret;
3137 	struct amdgpu_device *adev = ip_block->adev;
3138 	struct psp_context *psp = &adev->psp;
3139 
3140 	dev_info(adev->dev, "PSP is resuming...\n");
3141 
3142 	if (psp->mem_train_ctx.enable_mem_training) {
3143 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3144 		if (ret) {
3145 			dev_err(adev->dev, "Failed to process memory training!\n");
3146 			return ret;
3147 		}
3148 	}
3149 
3150 	mutex_lock(&adev->firmware.mutex);
3151 
3152 	ret = amdgpu_ucode_init_bo(adev);
3153 	if (ret)
3154 		goto failed;
3155 
3156 	ret = psp_hw_start(psp);
3157 	if (ret)
3158 		goto failed;
3159 
3160 	ret = psp_load_non_psp_fw(psp);
3161 	if (ret)
3162 		goto failed;
3163 
3164 	ret = psp_asd_initialize(psp);
3165 	if (ret) {
3166 		dev_err(adev->dev, "PSP load asd failed!\n");
3167 		goto failed;
3168 	}
3169 
3170 	ret = psp_rl_load(adev);
3171 	if (ret) {
3172 		dev_err(adev->dev, "PSP load RL failed!\n");
3173 		goto failed;
3174 	}
3175 
3176 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3177 		ret = psp_xgmi_initialize(psp, false, true);
3178 		/* Warning the XGMI seesion initialize failure
3179 		 * Instead of stop driver initialization
3180 		 */
3181 		if (ret)
3182 			dev_err(psp->adev->dev,
3183 				"XGMI: Failed to initialize XGMI session\n");
3184 	}
3185 
3186 	if (psp->ta_fw) {
3187 		ret = psp_ras_initialize(psp);
3188 		if (ret)
3189 			dev_err(psp->adev->dev,
3190 				"RAS: Failed to initialize RAS\n");
3191 
3192 		ret = psp_hdcp_initialize(psp);
3193 		if (ret)
3194 			dev_err(psp->adev->dev,
3195 				"HDCP: Failed to initialize HDCP\n");
3196 
3197 		ret = psp_dtm_initialize(psp);
3198 		if (ret)
3199 			dev_err(psp->adev->dev,
3200 				"DTM: Failed to initialize DTM\n");
3201 
3202 		ret = psp_rap_initialize(psp);
3203 		if (ret)
3204 			dev_err(psp->adev->dev,
3205 				"RAP: Failed to initialize RAP\n");
3206 
3207 		ret = psp_securedisplay_initialize(psp);
3208 		if (ret)
3209 			dev_err(psp->adev->dev,
3210 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3211 	}
3212 
3213 	mutex_unlock(&adev->firmware.mutex);
3214 
3215 	return 0;
3216 
3217 failed:
3218 	dev_err(adev->dev, "PSP resume failed\n");
3219 	mutex_unlock(&adev->firmware.mutex);
3220 	return ret;
3221 }
3222 
3223 int psp_gpu_reset(struct amdgpu_device *adev)
3224 {
3225 	int ret;
3226 
3227 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3228 		return 0;
3229 
3230 	mutex_lock(&adev->psp.mutex);
3231 	ret = psp_mode1_reset(&adev->psp);
3232 	mutex_unlock(&adev->psp.mutex);
3233 
3234 	return ret;
3235 }
3236 
3237 int psp_rlc_autoload_start(struct psp_context *psp)
3238 {
3239 	int ret;
3240 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3241 
3242 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3243 
3244 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
3245 				 psp->fence_buf_mc_addr);
3246 
3247 	release_psp_cmd_buf(psp);
3248 
3249 	return ret;
3250 }
3251 
3252 int psp_ring_cmd_submit(struct psp_context *psp,
3253 			uint64_t cmd_buf_mc_addr,
3254 			uint64_t fence_mc_addr,
3255 			int index)
3256 {
3257 	unsigned int psp_write_ptr_reg = 0;
3258 	struct psp_gfx_rb_frame *write_frame;
3259 	struct psp_ring *ring = &psp->km_ring;
3260 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3261 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3262 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3263 	struct amdgpu_device *adev = psp->adev;
3264 	uint32_t ring_size_dw = ring->ring_size / 4;
3265 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3266 
3267 	/* KM (GPCOM) prepare write pointer */
3268 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
3269 
3270 	/* Update KM RB frame pointer to new frame */
3271 	/* write_frame ptr increments by size of rb_frame in bytes */
3272 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3273 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
3274 		write_frame = ring_buffer_start;
3275 	else
3276 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3277 	/* Check invalid write_frame ptr address */
3278 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3279 		dev_err(adev->dev,
3280 			"ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3281 			ring_buffer_start, ring_buffer_end, write_frame);
3282 		dev_err(adev->dev,
3283 			"write_frame is pointing to address out of bounds\n");
3284 		return -EINVAL;
3285 	}
3286 
3287 	/* Initialize KM RB frame */
3288 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3289 
3290 	/* Update KM RB frame */
3291 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3292 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3293 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3294 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3295 	write_frame->fence_value = index;
3296 	amdgpu_device_flush_hdp(adev, NULL);
3297 
3298 	/* Update the write Pointer in DWORDs */
3299 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3300 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
3301 	return 0;
3302 }
3303 
3304 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3305 {
3306 	struct amdgpu_device *adev = psp->adev;
3307 	const struct psp_firmware_header_v1_0 *asd_hdr;
3308 	int err = 0;
3309 
3310 	err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
3311 				   "amdgpu/%s_asd.bin", chip_name);
3312 	if (err)
3313 		goto out;
3314 
3315 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3316 	adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3317 	adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3318 	adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3319 	adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3320 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3321 	return 0;
3322 out:
3323 	amdgpu_ucode_release(&adev->psp.asd_fw);
3324 	return err;
3325 }
3326 
3327 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3328 {
3329 	struct amdgpu_device *adev = psp->adev;
3330 	const struct psp_firmware_header_v1_0 *toc_hdr;
3331 	int err = 0;
3332 
3333 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
3334 				   "amdgpu/%s_toc.bin", chip_name);
3335 	if (err)
3336 		goto out;
3337 
3338 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3339 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3340 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3341 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3342 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3343 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3344 	return 0;
3345 out:
3346 	amdgpu_ucode_release(&adev->psp.toc_fw);
3347 	return err;
3348 }
3349 
3350 static int parse_sos_bin_descriptor(struct psp_context *psp,
3351 				   const struct psp_fw_bin_desc *desc,
3352 				   const struct psp_firmware_header_v2_0 *sos_hdr)
3353 {
3354 	uint8_t *ucode_start_addr  = NULL;
3355 
3356 	if (!psp || !desc || !sos_hdr)
3357 		return -EINVAL;
3358 
3359 	ucode_start_addr  = (uint8_t *)sos_hdr +
3360 			    le32_to_cpu(desc->offset_bytes) +
3361 			    le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3362 
3363 	switch (desc->fw_type) {
3364 	case PSP_FW_TYPE_PSP_SOS:
3365 		psp->sos.fw_version        = le32_to_cpu(desc->fw_version);
3366 		psp->sos.feature_version   = le32_to_cpu(desc->fw_version);
3367 		psp->sos.size_bytes        = le32_to_cpu(desc->size_bytes);
3368 		psp->sos.start_addr	   = ucode_start_addr;
3369 		break;
3370 	case PSP_FW_TYPE_PSP_SYS_DRV:
3371 		psp->sys.fw_version        = le32_to_cpu(desc->fw_version);
3372 		psp->sys.feature_version   = le32_to_cpu(desc->fw_version);
3373 		psp->sys.size_bytes        = le32_to_cpu(desc->size_bytes);
3374 		psp->sys.start_addr        = ucode_start_addr;
3375 		break;
3376 	case PSP_FW_TYPE_PSP_KDB:
3377 		psp->kdb.fw_version        = le32_to_cpu(desc->fw_version);
3378 		psp->kdb.feature_version   = le32_to_cpu(desc->fw_version);
3379 		psp->kdb.size_bytes        = le32_to_cpu(desc->size_bytes);
3380 		psp->kdb.start_addr        = ucode_start_addr;
3381 		break;
3382 	case PSP_FW_TYPE_PSP_TOC:
3383 		psp->toc.fw_version        = le32_to_cpu(desc->fw_version);
3384 		psp->toc.feature_version   = le32_to_cpu(desc->fw_version);
3385 		psp->toc.size_bytes        = le32_to_cpu(desc->size_bytes);
3386 		psp->toc.start_addr        = ucode_start_addr;
3387 		break;
3388 	case PSP_FW_TYPE_PSP_SPL:
3389 		psp->spl.fw_version        = le32_to_cpu(desc->fw_version);
3390 		psp->spl.feature_version   = le32_to_cpu(desc->fw_version);
3391 		psp->spl.size_bytes        = le32_to_cpu(desc->size_bytes);
3392 		psp->spl.start_addr        = ucode_start_addr;
3393 		break;
3394 	case PSP_FW_TYPE_PSP_RL:
3395 		psp->rl.fw_version         = le32_to_cpu(desc->fw_version);
3396 		psp->rl.feature_version    = le32_to_cpu(desc->fw_version);
3397 		psp->rl.size_bytes         = le32_to_cpu(desc->size_bytes);
3398 		psp->rl.start_addr         = ucode_start_addr;
3399 		break;
3400 	case PSP_FW_TYPE_PSP_SOC_DRV:
3401 		psp->soc_drv.fw_version         = le32_to_cpu(desc->fw_version);
3402 		psp->soc_drv.feature_version    = le32_to_cpu(desc->fw_version);
3403 		psp->soc_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3404 		psp->soc_drv.start_addr         = ucode_start_addr;
3405 		break;
3406 	case PSP_FW_TYPE_PSP_INTF_DRV:
3407 		psp->intf_drv.fw_version        = le32_to_cpu(desc->fw_version);
3408 		psp->intf_drv.feature_version   = le32_to_cpu(desc->fw_version);
3409 		psp->intf_drv.size_bytes        = le32_to_cpu(desc->size_bytes);
3410 		psp->intf_drv.start_addr        = ucode_start_addr;
3411 		break;
3412 	case PSP_FW_TYPE_PSP_DBG_DRV:
3413 		psp->dbg_drv.fw_version         = le32_to_cpu(desc->fw_version);
3414 		psp->dbg_drv.feature_version    = le32_to_cpu(desc->fw_version);
3415 		psp->dbg_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3416 		psp->dbg_drv.start_addr         = ucode_start_addr;
3417 		break;
3418 	case PSP_FW_TYPE_PSP_RAS_DRV:
3419 		psp->ras_drv.fw_version         = le32_to_cpu(desc->fw_version);
3420 		psp->ras_drv.feature_version    = le32_to_cpu(desc->fw_version);
3421 		psp->ras_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3422 		psp->ras_drv.start_addr         = ucode_start_addr;
3423 		break;
3424 	case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3425 		psp->ipkeymgr_drv.fw_version         = le32_to_cpu(desc->fw_version);
3426 		psp->ipkeymgr_drv.feature_version    = le32_to_cpu(desc->fw_version);
3427 		psp->ipkeymgr_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3428 		psp->ipkeymgr_drv.start_addr         = ucode_start_addr;
3429 		break;
3430 	case PSP_FW_TYPE_PSP_SPDM_DRV:
3431 		psp->spdm_drv.fw_version	= le32_to_cpu(desc->fw_version);
3432 		psp->spdm_drv.feature_version	= le32_to_cpu(desc->fw_version);
3433 		psp->spdm_drv.size_bytes	= le32_to_cpu(desc->size_bytes);
3434 		psp->spdm_drv.start_addr	= ucode_start_addr;
3435 		break;
3436 	default:
3437 		dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3438 		break;
3439 	}
3440 
3441 	return 0;
3442 }
3443 
3444 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3445 {
3446 	const struct psp_firmware_header_v1_0 *sos_hdr;
3447 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3448 	uint8_t *ucode_array_start_addr;
3449 
3450 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3451 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3452 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3453 
3454 	if (adev->gmc.xgmi.connected_to_cpu ||
3455 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3456 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3457 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3458 
3459 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3460 		adev->psp.sys.start_addr = ucode_array_start_addr;
3461 
3462 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3463 		adev->psp.sos.start_addr = ucode_array_start_addr +
3464 				le32_to_cpu(sos_hdr->sos.offset_bytes);
3465 	} else {
3466 		/* Load alternate PSP SOS FW */
3467 		sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3468 
3469 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3470 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3471 
3472 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3473 		adev->psp.sys.start_addr = ucode_array_start_addr +
3474 			le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3475 
3476 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3477 		adev->psp.sos.start_addr = ucode_array_start_addr +
3478 			le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3479 	}
3480 
3481 	if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3482 		dev_warn(adev->dev, "PSP SOS FW not available");
3483 		return -EINVAL;
3484 	}
3485 
3486 	return 0;
3487 }
3488 
3489 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3490 {
3491 	struct amdgpu_device *adev = psp->adev;
3492 	const struct psp_firmware_header_v1_0 *sos_hdr;
3493 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3494 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3495 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3496 	const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3497 	const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
3498 	int fw_index, fw_bin_count, start_index = 0;
3499 	const struct psp_fw_bin_desc *fw_bin;
3500 	uint8_t *ucode_array_start_addr;
3501 	int err = 0;
3502 
3503 	err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3504 				   "amdgpu/%s_sos.bin", chip_name);
3505 	if (err)
3506 		goto out;
3507 
3508 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3509 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3510 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3511 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3512 
3513 	switch (sos_hdr->header.header_version_major) {
3514 	case 1:
3515 		err = psp_init_sos_base_fw(adev);
3516 		if (err)
3517 			goto out;
3518 
3519 		if (sos_hdr->header.header_version_minor == 1) {
3520 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3521 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3522 			adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3523 					le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3524 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3525 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3526 					le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3527 		}
3528 		if (sos_hdr->header.header_version_minor == 2) {
3529 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3530 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3531 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3532 						    le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3533 		}
3534 		if (sos_hdr->header.header_version_minor == 3) {
3535 			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3536 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3537 			adev->psp.toc.start_addr = ucode_array_start_addr +
3538 				le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3539 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3540 			adev->psp.kdb.start_addr = ucode_array_start_addr +
3541 				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3542 			adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3543 			adev->psp.spl.start_addr = ucode_array_start_addr +
3544 				le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3545 			adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3546 			adev->psp.rl.start_addr = ucode_array_start_addr +
3547 				le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3548 		}
3549 		break;
3550 	case 2:
3551 		sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3552 
3553 		fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
3554 
3555 		if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
3556 			dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3557 			err = -EINVAL;
3558 			goto out;
3559 		}
3560 
3561 		if (sos_hdr_v2_0->header.header_version_minor == 1) {
3562 			sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
3563 
3564 			fw_bin = sos_hdr_v2_1->psp_fw_bin;
3565 
3566 			if (psp_is_aux_sos_load_required(psp))
3567 				start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3568 			else
3569 				fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3570 
3571 		} else {
3572 			fw_bin = sos_hdr_v2_0->psp_fw_bin;
3573 		}
3574 
3575 		for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
3576 			err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
3577 						       sos_hdr_v2_0);
3578 			if (err)
3579 				goto out;
3580 		}
3581 		break;
3582 	default:
3583 		dev_err(adev->dev,
3584 			"unsupported psp sos firmware\n");
3585 		err = -EINVAL;
3586 		goto out;
3587 	}
3588 
3589 	return 0;
3590 out:
3591 	amdgpu_ucode_release(&adev->psp.sos_fw);
3592 
3593 	return err;
3594 }
3595 
3596 static bool is_ta_fw_applicable(struct psp_context *psp,
3597 			     const struct psp_fw_bin_desc *desc)
3598 {
3599 	struct amdgpu_device *adev = psp->adev;
3600 	uint32_t fw_version;
3601 
3602 	switch (desc->fw_type) {
3603 	case TA_FW_TYPE_PSP_XGMI:
3604 	case TA_FW_TYPE_PSP_XGMI_AUX:
3605 		/* for now, AUX TA only exists on 13.0.6 ta bin,
3606 		 * from v20.00.0x.14
3607 		 */
3608 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3609 		    IP_VERSION(13, 0, 6)) {
3610 			fw_version = le32_to_cpu(desc->fw_version);
3611 
3612 			if (adev->flags & AMD_IS_APU &&
3613 			    (fw_version & 0xff) >= 0x14)
3614 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
3615 			else
3616 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
3617 		}
3618 		break;
3619 	default:
3620 		break;
3621 	}
3622 
3623 	return true;
3624 }
3625 
3626 static int parse_ta_bin_descriptor(struct psp_context *psp,
3627 				   const struct psp_fw_bin_desc *desc,
3628 				   const struct ta_firmware_header_v2_0 *ta_hdr)
3629 {
3630 	uint8_t *ucode_start_addr  = NULL;
3631 
3632 	if (!psp || !desc || !ta_hdr)
3633 		return -EINVAL;
3634 
3635 	if (!is_ta_fw_applicable(psp, desc))
3636 		return 0;
3637 
3638 	ucode_start_addr  = (uint8_t *)ta_hdr +
3639 			    le32_to_cpu(desc->offset_bytes) +
3640 			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3641 
3642 	switch (desc->fw_type) {
3643 	case TA_FW_TYPE_PSP_ASD:
3644 		psp->asd_context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3645 		psp->asd_context.bin_desc.feature_version   = le32_to_cpu(desc->fw_version);
3646 		psp->asd_context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3647 		psp->asd_context.bin_desc.start_addr        = ucode_start_addr;
3648 		break;
3649 	case TA_FW_TYPE_PSP_XGMI:
3650 	case TA_FW_TYPE_PSP_XGMI_AUX:
3651 		psp->xgmi_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3652 		psp->xgmi_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3653 		psp->xgmi_context.context.bin_desc.start_addr       = ucode_start_addr;
3654 		break;
3655 	case TA_FW_TYPE_PSP_RAS:
3656 		psp->ras_context.context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3657 		psp->ras_context.context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3658 		psp->ras_context.context.bin_desc.start_addr        = ucode_start_addr;
3659 		break;
3660 	case TA_FW_TYPE_PSP_HDCP:
3661 		psp->hdcp_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3662 		psp->hdcp_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3663 		psp->hdcp_context.context.bin_desc.start_addr       = ucode_start_addr;
3664 		break;
3665 	case TA_FW_TYPE_PSP_DTM:
3666 		psp->dtm_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3667 		psp->dtm_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3668 		psp->dtm_context.context.bin_desc.start_addr       = ucode_start_addr;
3669 		break;
3670 	case TA_FW_TYPE_PSP_RAP:
3671 		psp->rap_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3672 		psp->rap_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3673 		psp->rap_context.context.bin_desc.start_addr       = ucode_start_addr;
3674 		break;
3675 	case TA_FW_TYPE_PSP_SECUREDISPLAY:
3676 		psp->securedisplay_context.context.bin_desc.fw_version =
3677 			le32_to_cpu(desc->fw_version);
3678 		psp->securedisplay_context.context.bin_desc.size_bytes =
3679 			le32_to_cpu(desc->size_bytes);
3680 		psp->securedisplay_context.context.bin_desc.start_addr =
3681 			ucode_start_addr;
3682 		break;
3683 	default:
3684 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3685 		break;
3686 	}
3687 
3688 	return 0;
3689 }
3690 
3691 static int parse_ta_v1_microcode(struct psp_context *psp)
3692 {
3693 	const struct ta_firmware_header_v1_0 *ta_hdr;
3694 	struct amdgpu_device *adev = psp->adev;
3695 
3696 	ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3697 
3698 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3699 		return -EINVAL;
3700 
3701 	adev->psp.xgmi_context.context.bin_desc.fw_version =
3702 		le32_to_cpu(ta_hdr->xgmi.fw_version);
3703 	adev->psp.xgmi_context.context.bin_desc.size_bytes =
3704 		le32_to_cpu(ta_hdr->xgmi.size_bytes);
3705 	adev->psp.xgmi_context.context.bin_desc.start_addr =
3706 		(uint8_t *)ta_hdr +
3707 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3708 
3709 	adev->psp.ras_context.context.bin_desc.fw_version =
3710 		le32_to_cpu(ta_hdr->ras.fw_version);
3711 	adev->psp.ras_context.context.bin_desc.size_bytes =
3712 		le32_to_cpu(ta_hdr->ras.size_bytes);
3713 	adev->psp.ras_context.context.bin_desc.start_addr =
3714 		(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3715 		le32_to_cpu(ta_hdr->ras.offset_bytes);
3716 
3717 	adev->psp.hdcp_context.context.bin_desc.fw_version =
3718 		le32_to_cpu(ta_hdr->hdcp.fw_version);
3719 	adev->psp.hdcp_context.context.bin_desc.size_bytes =
3720 		le32_to_cpu(ta_hdr->hdcp.size_bytes);
3721 	adev->psp.hdcp_context.context.bin_desc.start_addr =
3722 		(uint8_t *)ta_hdr +
3723 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3724 
3725 	adev->psp.dtm_context.context.bin_desc.fw_version =
3726 		le32_to_cpu(ta_hdr->dtm.fw_version);
3727 	adev->psp.dtm_context.context.bin_desc.size_bytes =
3728 		le32_to_cpu(ta_hdr->dtm.size_bytes);
3729 	adev->psp.dtm_context.context.bin_desc.start_addr =
3730 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3731 		le32_to_cpu(ta_hdr->dtm.offset_bytes);
3732 
3733 	adev->psp.securedisplay_context.context.bin_desc.fw_version =
3734 		le32_to_cpu(ta_hdr->securedisplay.fw_version);
3735 	adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3736 		le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3737 	adev->psp.securedisplay_context.context.bin_desc.start_addr =
3738 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3739 		le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3740 
3741 	adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3742 
3743 	return 0;
3744 }
3745 
3746 static int parse_ta_v2_microcode(struct psp_context *psp)
3747 {
3748 	const struct ta_firmware_header_v2_0 *ta_hdr;
3749 	struct amdgpu_device *adev = psp->adev;
3750 	int err = 0;
3751 	int ta_index = 0;
3752 
3753 	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3754 
3755 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3756 		return -EINVAL;
3757 
3758 	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3759 		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3760 		return -EINVAL;
3761 	}
3762 
3763 	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3764 		err = parse_ta_bin_descriptor(psp,
3765 					      &ta_hdr->ta_fw_bin[ta_index],
3766 					      ta_hdr);
3767 		if (err)
3768 			return err;
3769 	}
3770 
3771 	return 0;
3772 }
3773 
3774 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3775 {
3776 	const struct common_firmware_header *hdr;
3777 	struct amdgpu_device *adev = psp->adev;
3778 	int err;
3779 
3780 	err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3781 				   "amdgpu/%s_ta.bin", chip_name);
3782 	if (err)
3783 		return err;
3784 
3785 	hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3786 	switch (le16_to_cpu(hdr->header_version_major)) {
3787 	case 1:
3788 		err = parse_ta_v1_microcode(psp);
3789 		break;
3790 	case 2:
3791 		err = parse_ta_v2_microcode(psp);
3792 		break;
3793 	default:
3794 		dev_err(adev->dev, "unsupported TA header version\n");
3795 		err = -EINVAL;
3796 	}
3797 
3798 	if (err)
3799 		amdgpu_ucode_release(&adev->psp.ta_fw);
3800 
3801 	return err;
3802 }
3803 
3804 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3805 {
3806 	struct amdgpu_device *adev = psp->adev;
3807 	const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3808 	struct amdgpu_firmware_info *info = NULL;
3809 	int err = 0;
3810 
3811 	if (!amdgpu_sriov_vf(adev)) {
3812 		dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3813 		return -EINVAL;
3814 	}
3815 
3816 	err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
3817 				   "amdgpu/%s_cap.bin", chip_name);
3818 	if (err) {
3819 		if (err == -ENODEV) {
3820 			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3821 			err = 0;
3822 		} else {
3823 			dev_err(adev->dev, "fail to initialize cap microcode\n");
3824 		}
3825 		goto out;
3826 	}
3827 
3828 	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3829 	info->ucode_id = AMDGPU_UCODE_ID_CAP;
3830 	info->fw = adev->psp.cap_fw;
3831 	cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3832 		adev->psp.cap_fw->data;
3833 	adev->firmware.fw_size += ALIGN(
3834 			le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3835 	adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3836 	adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3837 	adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3838 
3839 	return 0;
3840 
3841 out:
3842 	amdgpu_ucode_release(&adev->psp.cap_fw);
3843 	return err;
3844 }
3845 
3846 int psp_config_sq_perfmon(struct psp_context *psp,
3847 		uint32_t xcp_id, bool core_override_enable,
3848 		bool reg_override_enable, bool perfmon_override_enable)
3849 {
3850 	int ret;
3851 
3852 	if (amdgpu_sriov_vf(psp->adev))
3853 		return 0;
3854 
3855 	if (xcp_id > MAX_XCP) {
3856 		dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
3857 		return -EINVAL;
3858 	}
3859 
3860 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
3861 		dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
3862 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
3863 		return -EINVAL;
3864 	}
3865 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3866 
3867 	cmd->cmd_id	=	GFX_CMD_ID_CONFIG_SQ_PERFMON;
3868 	cmd->cmd.config_sq_perfmon.gfx_xcp_mask	=	BIT_MASK(xcp_id);
3869 	cmd->cmd.config_sq_perfmon.core_override	=	core_override_enable;
3870 	cmd->cmd.config_sq_perfmon.reg_override	=	reg_override_enable;
3871 	cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
3872 
3873 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
3874 	if (ret)
3875 		dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
3876 			xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
3877 
3878 	release_psp_cmd_buf(psp);
3879 	return ret;
3880 }
3881 
3882 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3883 					enum amd_clockgating_state state)
3884 {
3885 	return 0;
3886 }
3887 
3888 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
3889 				     enum amd_powergating_state state)
3890 {
3891 	return 0;
3892 }
3893 
3894 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3895 					 struct device_attribute *attr,
3896 					 char *buf)
3897 {
3898 	struct drm_device *ddev = dev_get_drvdata(dev);
3899 	struct amdgpu_device *adev = drm_to_adev(ddev);
3900 	struct amdgpu_ip_block *ip_block;
3901 	uint32_t fw_ver;
3902 	int ret;
3903 
3904 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
3905 	if (!ip_block || !ip_block->status.late_initialized) {
3906 		dev_info(adev->dev, "PSP block is not ready yet\n.");
3907 		return -EBUSY;
3908 	}
3909 
3910 	mutex_lock(&adev->psp.mutex);
3911 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3912 	mutex_unlock(&adev->psp.mutex);
3913 
3914 	if (ret) {
3915 		dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
3916 		return ret;
3917 	}
3918 
3919 	return sysfs_emit(buf, "%x\n", fw_ver);
3920 }
3921 
3922 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3923 						       struct device_attribute *attr,
3924 						       const char *buf,
3925 						       size_t count)
3926 {
3927 	struct drm_device *ddev = dev_get_drvdata(dev);
3928 	struct amdgpu_device *adev = drm_to_adev(ddev);
3929 	int ret, idx;
3930 	const struct firmware *usbc_pd_fw;
3931 	struct amdgpu_bo *fw_buf_bo = NULL;
3932 	uint64_t fw_pri_mc_addr;
3933 	void *fw_pri_cpu_addr;
3934 	struct amdgpu_ip_block *ip_block;
3935 
3936 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
3937 	if (!ip_block || !ip_block->status.late_initialized) {
3938 		dev_err(adev->dev, "PSP block is not ready yet.");
3939 		return -EBUSY;
3940 	}
3941 
3942 	if (!drm_dev_enter(ddev, &idx))
3943 		return -ENODEV;
3944 
3945 	ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
3946 				   "amdgpu/%s", buf);
3947 	if (ret)
3948 		goto fail;
3949 
3950 	/* LFB address which is aligned to 1MB boundary per PSP request */
3951 	ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3952 				      AMDGPU_GEM_DOMAIN_VRAM |
3953 				      AMDGPU_GEM_DOMAIN_GTT,
3954 				      &fw_buf_bo, &fw_pri_mc_addr,
3955 				      &fw_pri_cpu_addr);
3956 	if (ret)
3957 		goto rel_buf;
3958 
3959 	memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3960 
3961 	mutex_lock(&adev->psp.mutex);
3962 	ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3963 	mutex_unlock(&adev->psp.mutex);
3964 
3965 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3966 
3967 rel_buf:
3968 	amdgpu_ucode_release(&usbc_pd_fw);
3969 fail:
3970 	if (ret) {
3971 		dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
3972 		count = ret;
3973 	}
3974 
3975 	drm_dev_exit(idx);
3976 	return count;
3977 }
3978 
3979 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3980 {
3981 	int idx;
3982 
3983 	if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
3984 		return;
3985 
3986 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3987 	memcpy(psp->fw_pri_buf, start_addr, bin_size);
3988 
3989 	drm_dev_exit(idx);
3990 }
3991 
3992 /**
3993  * DOC: usbc_pd_fw
3994  * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3995  * this file will trigger the update process.
3996  */
3997 static DEVICE_ATTR(usbc_pd_fw, 0644,
3998 		   psp_usbc_pd_fw_sysfs_read,
3999 		   psp_usbc_pd_fw_sysfs_write);
4000 
4001 int is_psp_fw_valid(struct psp_bin_desc bin)
4002 {
4003 	return bin.size_bytes;
4004 }
4005 
4006 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
4007 					struct bin_attribute *bin_attr,
4008 					char *buffer, loff_t pos, size_t count)
4009 {
4010 	struct device *dev = kobj_to_dev(kobj);
4011 	struct drm_device *ddev = dev_get_drvdata(dev);
4012 	struct amdgpu_device *adev = drm_to_adev(ddev);
4013 
4014 	adev->psp.vbflash_done = false;
4015 
4016 	/* Safeguard against memory drain */
4017 	if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
4018 		dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
4019 		kvfree(adev->psp.vbflash_tmp_buf);
4020 		adev->psp.vbflash_tmp_buf = NULL;
4021 		adev->psp.vbflash_image_size = 0;
4022 		return -ENOMEM;
4023 	}
4024 
4025 	/* TODO Just allocate max for now and optimize to realloc later if needed */
4026 	if (!adev->psp.vbflash_tmp_buf) {
4027 		adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
4028 		if (!adev->psp.vbflash_tmp_buf)
4029 			return -ENOMEM;
4030 	}
4031 
4032 	mutex_lock(&adev->psp.mutex);
4033 	memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
4034 	adev->psp.vbflash_image_size += count;
4035 	mutex_unlock(&adev->psp.mutex);
4036 
4037 	dev_dbg(adev->dev, "IFWI staged for update\n");
4038 
4039 	return count;
4040 }
4041 
4042 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
4043 				       struct bin_attribute *bin_attr, char *buffer,
4044 				       loff_t pos, size_t count)
4045 {
4046 	struct device *dev = kobj_to_dev(kobj);
4047 	struct drm_device *ddev = dev_get_drvdata(dev);
4048 	struct amdgpu_device *adev = drm_to_adev(ddev);
4049 	struct amdgpu_bo *fw_buf_bo = NULL;
4050 	uint64_t fw_pri_mc_addr;
4051 	void *fw_pri_cpu_addr;
4052 	int ret;
4053 
4054 	if (adev->psp.vbflash_image_size == 0)
4055 		return -EINVAL;
4056 
4057 	dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
4058 
4059 	ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
4060 					AMDGPU_GPU_PAGE_SIZE,
4061 					AMDGPU_GEM_DOMAIN_VRAM,
4062 					&fw_buf_bo,
4063 					&fw_pri_mc_addr,
4064 					&fw_pri_cpu_addr);
4065 	if (ret)
4066 		goto rel_buf;
4067 
4068 	memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
4069 
4070 	mutex_lock(&adev->psp.mutex);
4071 	ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
4072 	mutex_unlock(&adev->psp.mutex);
4073 
4074 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4075 
4076 rel_buf:
4077 	kvfree(adev->psp.vbflash_tmp_buf);
4078 	adev->psp.vbflash_tmp_buf = NULL;
4079 	adev->psp.vbflash_image_size = 0;
4080 
4081 	if (ret) {
4082 		dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
4083 		return ret;
4084 	}
4085 
4086 	dev_dbg(adev->dev, "PSP IFWI flash process done\n");
4087 	return 0;
4088 }
4089 
4090 /**
4091  * DOC: psp_vbflash
4092  * Writing to this file will stage an IFWI for update. Reading from this file
4093  * will trigger the update process.
4094  */
4095 static struct bin_attribute psp_vbflash_bin_attr = {
4096 	.attr = {.name = "psp_vbflash", .mode = 0660},
4097 	.size = 0,
4098 	.write = amdgpu_psp_vbflash_write,
4099 	.read = amdgpu_psp_vbflash_read,
4100 };
4101 
4102 /**
4103  * DOC: psp_vbflash_status
4104  * The status of the flash process.
4105  * 0: IFWI flash not complete.
4106  * 1: IFWI flash complete.
4107  */
4108 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
4109 					 struct device_attribute *attr,
4110 					 char *buf)
4111 {
4112 	struct drm_device *ddev = dev_get_drvdata(dev);
4113 	struct amdgpu_device *adev = drm_to_adev(ddev);
4114 	uint32_t vbflash_status;
4115 
4116 	vbflash_status = psp_vbflash_status(&adev->psp);
4117 	if (!adev->psp.vbflash_done)
4118 		vbflash_status = 0;
4119 	else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
4120 		vbflash_status = 1;
4121 
4122 	return sysfs_emit(buf, "0x%x\n", vbflash_status);
4123 }
4124 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
4125 
4126 static struct bin_attribute *bin_flash_attrs[] = {
4127 	&psp_vbflash_bin_attr,
4128 	NULL
4129 };
4130 
4131 static struct attribute *flash_attrs[] = {
4132 	&dev_attr_psp_vbflash_status.attr,
4133 	&dev_attr_usbc_pd_fw.attr,
4134 	NULL
4135 };
4136 
4137 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
4138 {
4139 	struct device *dev = kobj_to_dev(kobj);
4140 	struct drm_device *ddev = dev_get_drvdata(dev);
4141 	struct amdgpu_device *adev = drm_to_adev(ddev);
4142 
4143 	if (attr == &dev_attr_usbc_pd_fw.attr)
4144 		return adev->psp.sup_pd_fw_up ? 0660 : 0;
4145 
4146 	return adev->psp.sup_ifwi_up ? 0440 : 0;
4147 }
4148 
4149 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
4150 						const struct bin_attribute *attr,
4151 						int idx)
4152 {
4153 	struct device *dev = kobj_to_dev(kobj);
4154 	struct drm_device *ddev = dev_get_drvdata(dev);
4155 	struct amdgpu_device *adev = drm_to_adev(ddev);
4156 
4157 	return adev->psp.sup_ifwi_up ? 0660 : 0;
4158 }
4159 
4160 const struct attribute_group amdgpu_flash_attr_group = {
4161 	.attrs = flash_attrs,
4162 	.bin_attrs = bin_flash_attrs,
4163 	.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4164 	.is_visible = amdgpu_flash_attr_is_visible,
4165 };
4166 
4167 const struct amd_ip_funcs psp_ip_funcs = {
4168 	.name = "psp",
4169 	.early_init = psp_early_init,
4170 	.sw_init = psp_sw_init,
4171 	.sw_fini = psp_sw_fini,
4172 	.hw_init = psp_hw_init,
4173 	.hw_fini = psp_hw_fini,
4174 	.suspend = psp_suspend,
4175 	.resume = psp_resume,
4176 	.set_clockgating_state = psp_set_clockgating_state,
4177 	.set_powergating_state = psp_set_powergating_state,
4178 };
4179 
4180 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4181 	.type = AMD_IP_BLOCK_TYPE_PSP,
4182 	.major = 3,
4183 	.minor = 1,
4184 	.rev = 0,
4185 	.funcs = &psp_ip_funcs,
4186 };
4187 
4188 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4189 	.type = AMD_IP_BLOCK_TYPE_PSP,
4190 	.major = 10,
4191 	.minor = 0,
4192 	.rev = 0,
4193 	.funcs = &psp_ip_funcs,
4194 };
4195 
4196 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4197 	.type = AMD_IP_BLOCK_TYPE_PSP,
4198 	.major = 11,
4199 	.minor = 0,
4200 	.rev = 0,
4201 	.funcs = &psp_ip_funcs,
4202 };
4203 
4204 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4205 	.type = AMD_IP_BLOCK_TYPE_PSP,
4206 	.major = 11,
4207 	.minor = 0,
4208 	.rev = 8,
4209 	.funcs = &psp_ip_funcs,
4210 };
4211 
4212 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4213 	.type = AMD_IP_BLOCK_TYPE_PSP,
4214 	.major = 12,
4215 	.minor = 0,
4216 	.rev = 0,
4217 	.funcs = &psp_ip_funcs,
4218 };
4219 
4220 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4221 	.type = AMD_IP_BLOCK_TYPE_PSP,
4222 	.major = 13,
4223 	.minor = 0,
4224 	.rev = 0,
4225 	.funcs = &psp_ip_funcs,
4226 };
4227 
4228 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4229 	.type = AMD_IP_BLOCK_TYPE_PSP,
4230 	.major = 13,
4231 	.minor = 0,
4232 	.rev = 4,
4233 	.funcs = &psp_ip_funcs,
4234 };
4235 
4236 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4237 	.type = AMD_IP_BLOCK_TYPE_PSP,
4238 	.major = 14,
4239 	.minor = 0,
4240 	.rev = 0,
4241 	.funcs = &psp_ip_funcs,
4242 };
4243