xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c (revision 9ab737f3aeea29129903de6ddebf4bbce3ec0644)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42 
43 #include "amdgpu_ras.h"
44 #include "amdgpu_securedisplay.h"
45 #include "amdgpu_atomfirmware.h"
46 
47 #define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*16)
48 
49 static int psp_load_smu_fw(struct psp_context *psp);
50 static int psp_rap_terminate(struct psp_context *psp);
51 static int psp_securedisplay_terminate(struct psp_context *psp);
52 
53 static int psp_ring_init(struct psp_context *psp,
54 			 enum psp_ring_type ring_type)
55 {
56 	int ret = 0;
57 	struct psp_ring *ring;
58 	struct amdgpu_device *adev = psp->adev;
59 
60 	ring = &psp->km_ring;
61 
62 	ring->ring_type = ring_type;
63 
64 	/* allocate 4k Page of Local Frame Buffer memory for ring */
65 	ring->ring_size = 0x1000;
66 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 				      AMDGPU_GEM_DOMAIN_VRAM |
68 				      AMDGPU_GEM_DOMAIN_GTT,
69 				      &adev->firmware.rbuf,
70 				      &ring->ring_mem_mc_addr,
71 				      (void **)&ring->ring_mem);
72 	if (ret) {
73 		ring->ring_size = 0;
74 		return ret;
75 	}
76 
77 	return 0;
78 }
79 
80 /*
81  * Due to DF Cstate management centralized to PMFW, the firmware
82  * loading sequence will be updated as below:
83  *   - Load KDB
84  *   - Load SYS_DRV
85  *   - Load tOS
86  *   - Load PMFW
87  *   - Setup TMR
88  *   - Load other non-psp fw
89  *   - Load ASD
90  *   - Load XGMI/RAS/HDCP/DTM TA if any
91  *
92  * This new sequence is required for
93  *   - Arcturus and onwards
94  */
95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96 {
97 	struct amdgpu_device *adev = psp->adev;
98 
99 	if (amdgpu_sriov_vf(adev)) {
100 		psp->pmfw_centralized_cstate_management = false;
101 		return;
102 	}
103 
104 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 	case IP_VERSION(11, 0, 0):
106 	case IP_VERSION(11, 0, 4):
107 	case IP_VERSION(11, 0, 5):
108 	case IP_VERSION(11, 0, 7):
109 	case IP_VERSION(11, 0, 9):
110 	case IP_VERSION(11, 0, 11):
111 	case IP_VERSION(11, 0, 12):
112 	case IP_VERSION(11, 0, 13):
113 	case IP_VERSION(13, 0, 0):
114 	case IP_VERSION(13, 0, 2):
115 	case IP_VERSION(13, 0, 7):
116 		psp->pmfw_centralized_cstate_management = true;
117 		break;
118 	default:
119 		psp->pmfw_centralized_cstate_management = false;
120 		break;
121 	}
122 }
123 
124 static int psp_init_sriov_microcode(struct psp_context *psp)
125 {
126 	struct amdgpu_device *adev = psp->adev;
127 	char ucode_prefix[30];
128 	int ret = 0;
129 
130 	amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131 
132 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 	case IP_VERSION(9, 0, 0):
134 	case IP_VERSION(11, 0, 7):
135 	case IP_VERSION(11, 0, 9):
136 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 		ret = psp_init_cap_microcode(psp, ucode_prefix);
138 		break;
139 	case IP_VERSION(13, 0, 2):
140 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 		ret = psp_init_cap_microcode(psp, ucode_prefix);
142 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 		break;
144 	case IP_VERSION(13, 0, 0):
145 		adev->virt.autoload_ucode_id = 0;
146 		break;
147 	case IP_VERSION(13, 0, 6):
148 	case IP_VERSION(13, 0, 14):
149 		ret = psp_init_cap_microcode(psp, ucode_prefix);
150 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
151 		break;
152 	case IP_VERSION(13, 0, 10):
153 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
154 		ret = psp_init_cap_microcode(psp, ucode_prefix);
155 		break;
156 	default:
157 		return -EINVAL;
158 	}
159 	return ret;
160 }
161 
162 static int psp_early_init(struct amdgpu_ip_block *ip_block)
163 {
164 	struct amdgpu_device *adev = ip_block->adev;
165 	struct psp_context *psp = &adev->psp;
166 
167 	psp->autoload_supported = true;
168 	psp->boot_time_tmr = true;
169 
170 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
171 	case IP_VERSION(9, 0, 0):
172 		psp_v3_1_set_psp_funcs(psp);
173 		psp->autoload_supported = false;
174 		psp->boot_time_tmr = false;
175 		break;
176 	case IP_VERSION(10, 0, 0):
177 	case IP_VERSION(10, 0, 1):
178 		psp_v10_0_set_psp_funcs(psp);
179 		psp->autoload_supported = false;
180 		psp->boot_time_tmr = false;
181 		break;
182 	case IP_VERSION(11, 0, 2):
183 	case IP_VERSION(11, 0, 4):
184 		psp_v11_0_set_psp_funcs(psp);
185 		psp->autoload_supported = false;
186 		psp->boot_time_tmr = false;
187 		break;
188 	case IP_VERSION(11, 0, 0):
189 	case IP_VERSION(11, 0, 7):
190 		adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
191 		fallthrough;
192 	case IP_VERSION(11, 0, 5):
193 	case IP_VERSION(11, 0, 9):
194 	case IP_VERSION(11, 0, 11):
195 	case IP_VERSION(11, 5, 0):
196 	case IP_VERSION(11, 0, 12):
197 	case IP_VERSION(11, 0, 13):
198 		psp_v11_0_set_psp_funcs(psp);
199 		psp->boot_time_tmr = false;
200 		break;
201 	case IP_VERSION(11, 0, 3):
202 	case IP_VERSION(12, 0, 1):
203 		psp_v12_0_set_psp_funcs(psp);
204 		psp->autoload_supported = false;
205 		psp->boot_time_tmr = false;
206 		break;
207 	case IP_VERSION(13, 0, 2):
208 		psp->boot_time_tmr = false;
209 		fallthrough;
210 	case IP_VERSION(13, 0, 6):
211 	case IP_VERSION(13, 0, 14):
212 		psp_v13_0_set_psp_funcs(psp);
213 		psp->autoload_supported = false;
214 		break;
215 	case IP_VERSION(13, 0, 12):
216 		psp_v13_0_set_psp_funcs(psp);
217 		psp->autoload_supported = false;
218 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
219 		break;
220 	case IP_VERSION(13, 0, 1):
221 	case IP_VERSION(13, 0, 3):
222 	case IP_VERSION(13, 0, 5):
223 	case IP_VERSION(13, 0, 8):
224 	case IP_VERSION(13, 0, 11):
225 	case IP_VERSION(14, 0, 0):
226 	case IP_VERSION(14, 0, 1):
227 	case IP_VERSION(14, 0, 4):
228 		psp_v13_0_set_psp_funcs(psp);
229 		psp->boot_time_tmr = false;
230 		break;
231 	case IP_VERSION(11, 0, 8):
232 		if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
233 			psp_v11_0_8_set_psp_funcs(psp);
234 		}
235 		psp->autoload_supported = false;
236 		psp->boot_time_tmr = false;
237 		break;
238 	case IP_VERSION(13, 0, 0):
239 	case IP_VERSION(13, 0, 7):
240 	case IP_VERSION(13, 0, 10):
241 		psp_v13_0_set_psp_funcs(psp);
242 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
243 		psp->boot_time_tmr = false;
244 		break;
245 	case IP_VERSION(13, 0, 4):
246 		psp_v13_0_4_set_psp_funcs(psp);
247 		psp->boot_time_tmr = false;
248 		break;
249 	case IP_VERSION(14, 0, 2):
250 	case IP_VERSION(14, 0, 3):
251 		psp_v14_0_set_psp_funcs(psp);
252 		break;
253 	case IP_VERSION(14, 0, 5):
254 		psp_v14_0_set_psp_funcs(psp);
255 		psp->boot_time_tmr = false;
256 		break;
257 	default:
258 		return -EINVAL;
259 	}
260 
261 	psp->adev = adev;
262 
263 	adev->psp_timeout = 20000;
264 
265 	psp_check_pmfw_centralized_cstate_management(psp);
266 
267 	if (amdgpu_sriov_vf(adev))
268 		return psp_init_sriov_microcode(psp);
269 	else
270 		return psp_init_microcode(psp);
271 }
272 
273 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
274 {
275 	amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
276 			      &mem_ctx->shared_buf);
277 	mem_ctx->shared_bo = NULL;
278 }
279 
280 static void psp_free_shared_bufs(struct psp_context *psp)
281 {
282 	void *tmr_buf;
283 	void **pptr;
284 
285 	/* free TMR memory buffer */
286 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
287 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
288 	psp->tmr_bo = NULL;
289 
290 	/* free xgmi shared memory */
291 	psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
292 
293 	/* free ras shared memory */
294 	psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
295 
296 	/* free hdcp shared memory */
297 	psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
298 
299 	/* free dtm shared memory */
300 	psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
301 
302 	/* free rap shared memory */
303 	psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
304 
305 	/* free securedisplay shared memory */
306 	psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
307 
308 
309 }
310 
311 static void psp_memory_training_fini(struct psp_context *psp)
312 {
313 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
314 
315 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
316 	kfree(ctx->sys_cache);
317 	ctx->sys_cache = NULL;
318 }
319 
320 static int psp_memory_training_init(struct psp_context *psp)
321 {
322 	int ret;
323 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
324 
325 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
326 		dev_dbg(psp->adev->dev, "memory training is not supported!\n");
327 		return 0;
328 	}
329 
330 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
331 	if (ctx->sys_cache == NULL) {
332 		dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
333 		ret = -ENOMEM;
334 		goto Err_out;
335 	}
336 
337 	dev_dbg(psp->adev->dev,
338 		"train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
339 		ctx->train_data_size,
340 		ctx->p2c_train_data_offset,
341 		ctx->c2p_train_data_offset);
342 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
343 	return 0;
344 
345 Err_out:
346 	psp_memory_training_fini(psp);
347 	return ret;
348 }
349 
350 /*
351  * Helper funciton to query psp runtime database entry
352  *
353  * @adev: amdgpu_device pointer
354  * @entry_type: the type of psp runtime database entry
355  * @db_entry: runtime database entry pointer
356  *
357  * Return false if runtime database doesn't exit or entry is invalid
358  * or true if the specific database entry is found, and copy to @db_entry
359  */
360 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
361 				     enum psp_runtime_entry_type entry_type,
362 				     void *db_entry)
363 {
364 	uint64_t db_header_pos, db_dir_pos;
365 	struct psp_runtime_data_header db_header = {0};
366 	struct psp_runtime_data_directory db_dir = {0};
367 	bool ret = false;
368 	int i;
369 
370 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
371 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
372 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
373 		return false;
374 
375 	db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
376 	db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
377 
378 	/* read runtime db header from vram */
379 	amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
380 			sizeof(struct psp_runtime_data_header), false);
381 
382 	if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
383 		/* runtime db doesn't exist, exit */
384 		dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
385 		return false;
386 	}
387 
388 	/* read runtime database entry from vram */
389 	amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
390 			sizeof(struct psp_runtime_data_directory), false);
391 
392 	if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
393 		/* invalid db entry count, exit */
394 		dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
395 		return false;
396 	}
397 
398 	/* look up for requested entry type */
399 	for (i = 0; i < db_dir.entry_count && !ret; i++) {
400 		if (db_dir.entry_list[i].entry_type == entry_type) {
401 			switch (entry_type) {
402 			case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
403 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
404 					/* invalid db entry size */
405 					dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
406 					return false;
407 				}
408 				/* read runtime database entry */
409 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
410 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
411 				ret = true;
412 				break;
413 			case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
414 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
415 					/* invalid db entry size */
416 					dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
417 					return false;
418 				}
419 				/* read runtime database entry */
420 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
421 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
422 				ret = true;
423 				break;
424 			default:
425 				ret = false;
426 				break;
427 			}
428 		}
429 	}
430 
431 	return ret;
432 }
433 
434 static int psp_sw_init(struct amdgpu_ip_block *ip_block)
435 {
436 	struct amdgpu_device *adev = ip_block->adev;
437 	struct psp_context *psp = &adev->psp;
438 	int ret;
439 	struct psp_runtime_boot_cfg_entry boot_cfg_entry;
440 	struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
441 	struct psp_runtime_scpm_entry scpm_entry;
442 
443 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
444 	if (!psp->cmd) {
445 		dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
446 		ret = -ENOMEM;
447 	}
448 
449 	adev->psp.xgmi_context.supports_extended_data =
450 		!adev->gmc.xgmi.connected_to_cpu &&
451 		amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
452 
453 	memset(&scpm_entry, 0, sizeof(scpm_entry));
454 	if ((psp_get_runtime_db_entry(adev,
455 				PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
456 				&scpm_entry)) &&
457 	    (scpm_entry.scpm_status != SCPM_DISABLE)) {
458 		adev->scpm_enabled = true;
459 		adev->scpm_status = scpm_entry.scpm_status;
460 	} else {
461 		adev->scpm_enabled = false;
462 		adev->scpm_status = SCPM_DISABLE;
463 	}
464 
465 	/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
466 
467 	memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
468 	if (psp_get_runtime_db_entry(adev,
469 				PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
470 				&boot_cfg_entry)) {
471 		psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
472 		if ((psp->boot_cfg_bitmask) &
473 		    BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
474 			/* If psp runtime database exists, then
475 			 * only enable two stage memory training
476 			 * when TWO_STAGE_DRAM_TRAINING bit is set
477 			 * in runtime database
478 			 */
479 			mem_training_ctx->enable_mem_training = true;
480 		}
481 
482 	} else {
483 		/* If psp runtime database doesn't exist or is
484 		 * invalid, force enable two stage memory training
485 		 */
486 		mem_training_ctx->enable_mem_training = true;
487 	}
488 
489 	if (mem_training_ctx->enable_mem_training) {
490 		ret = psp_memory_training_init(psp);
491 		if (ret) {
492 			dev_err(adev->dev, "Failed to initialize memory training!\n");
493 			return ret;
494 		}
495 
496 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
497 		if (ret) {
498 			dev_err(adev->dev, "Failed to process memory training!\n");
499 			return ret;
500 		}
501 	}
502 
503 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
504 				      (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
505 				      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
506 				      &psp->fw_pri_bo,
507 				      &psp->fw_pri_mc_addr,
508 				      &psp->fw_pri_buf);
509 	if (ret)
510 		return ret;
511 
512 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
513 				      AMDGPU_GEM_DOMAIN_VRAM |
514 				      AMDGPU_GEM_DOMAIN_GTT,
515 				      &psp->fence_buf_bo,
516 				      &psp->fence_buf_mc_addr,
517 				      &psp->fence_buf);
518 	if (ret)
519 		goto failed1;
520 
521 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
522 				      AMDGPU_GEM_DOMAIN_VRAM |
523 				      AMDGPU_GEM_DOMAIN_GTT,
524 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
525 				      (void **)&psp->cmd_buf_mem);
526 	if (ret)
527 		goto failed2;
528 
529 	return 0;
530 
531 failed2:
532 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
533 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
534 failed1:
535 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
536 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
537 	return ret;
538 }
539 
540 static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
541 {
542 	struct amdgpu_device *adev = ip_block->adev;
543 	struct psp_context *psp = &adev->psp;
544 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
545 
546 	psp_memory_training_fini(psp);
547 
548 	amdgpu_ucode_release(&psp->sos_fw);
549 	amdgpu_ucode_release(&psp->asd_fw);
550 	amdgpu_ucode_release(&psp->ta_fw);
551 	amdgpu_ucode_release(&psp->cap_fw);
552 	amdgpu_ucode_release(&psp->toc_fw);
553 
554 	kfree(cmd);
555 	cmd = NULL;
556 
557 	psp_free_shared_bufs(psp);
558 
559 	if (psp->km_ring.ring_mem)
560 		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
561 				      &psp->km_ring.ring_mem_mc_addr,
562 				      (void **)&psp->km_ring.ring_mem);
563 
564 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
565 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
566 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
567 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
568 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
569 			      (void **)&psp->cmd_buf_mem);
570 
571 	return 0;
572 }
573 
574 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
575 		 uint32_t reg_val, uint32_t mask, bool check_changed)
576 {
577 	uint32_t val;
578 	int i;
579 	struct amdgpu_device *adev = psp->adev;
580 
581 	if (psp->adev->no_hw_access)
582 		return 0;
583 
584 	for (i = 0; i < adev->usec_timeout; i++) {
585 		val = RREG32(reg_index);
586 		if (check_changed) {
587 			if (val != reg_val)
588 				return 0;
589 		} else {
590 			if ((val & mask) == reg_val)
591 				return 0;
592 		}
593 		udelay(1);
594 	}
595 
596 	return -ETIME;
597 }
598 
599 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
600 			       uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
601 {
602 	uint32_t val;
603 	int i;
604 	struct amdgpu_device *adev = psp->adev;
605 
606 	if (psp->adev->no_hw_access)
607 		return 0;
608 
609 	for (i = 0; i < msec_timeout; i++) {
610 		val = RREG32(reg_index);
611 		if ((val & mask) == reg_val)
612 			return 0;
613 		msleep(1);
614 	}
615 
616 	return -ETIME;
617 }
618 
619 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
620 {
621 	switch (cmd_id) {
622 	case GFX_CMD_ID_LOAD_TA:
623 		return "LOAD_TA";
624 	case GFX_CMD_ID_UNLOAD_TA:
625 		return "UNLOAD_TA";
626 	case GFX_CMD_ID_INVOKE_CMD:
627 		return "INVOKE_CMD";
628 	case GFX_CMD_ID_LOAD_ASD:
629 		return "LOAD_ASD";
630 	case GFX_CMD_ID_SETUP_TMR:
631 		return "SETUP_TMR";
632 	case GFX_CMD_ID_LOAD_IP_FW:
633 		return "LOAD_IP_FW";
634 	case GFX_CMD_ID_DESTROY_TMR:
635 		return "DESTROY_TMR";
636 	case GFX_CMD_ID_SAVE_RESTORE:
637 		return "SAVE_RESTORE_IP_FW";
638 	case GFX_CMD_ID_SETUP_VMR:
639 		return "SETUP_VMR";
640 	case GFX_CMD_ID_DESTROY_VMR:
641 		return "DESTROY_VMR";
642 	case GFX_CMD_ID_PROG_REG:
643 		return "PROG_REG";
644 	case GFX_CMD_ID_GET_FW_ATTESTATION:
645 		return "GET_FW_ATTESTATION";
646 	case GFX_CMD_ID_LOAD_TOC:
647 		return "ID_LOAD_TOC";
648 	case GFX_CMD_ID_AUTOLOAD_RLC:
649 		return "AUTOLOAD_RLC";
650 	case GFX_CMD_ID_BOOT_CFG:
651 		return "BOOT_CFG";
652 	case GFX_CMD_ID_CONFIG_SQ_PERFMON:
653 		return "CONFIG_SQ_PERFMON";
654 	default:
655 		return "UNKNOWN CMD";
656 	}
657 }
658 
659 static bool psp_err_warn(struct psp_context *psp)
660 {
661 	struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
662 
663 	/* This response indicates reg list is already loaded */
664 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
665 	    cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
666 	    cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
667 	    cmd->resp.status == TEE_ERROR_CANCEL)
668 		return false;
669 
670 	return true;
671 }
672 
673 static int
674 psp_cmd_submit_buf(struct psp_context *psp,
675 		   struct amdgpu_firmware_info *ucode,
676 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
677 {
678 	int ret;
679 	int index;
680 	int timeout = psp->adev->psp_timeout;
681 	bool ras_intr = false;
682 	bool skip_unsupport = false;
683 
684 	if (psp->adev->no_hw_access)
685 		return 0;
686 
687 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
688 
689 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
690 
691 	index = atomic_inc_return(&psp->fence_value);
692 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
693 	if (ret) {
694 		atomic_dec(&psp->fence_value);
695 		goto exit;
696 	}
697 
698 	amdgpu_device_invalidate_hdp(psp->adev, NULL);
699 	while (*((unsigned int *)psp->fence_buf) != index) {
700 		if (--timeout == 0)
701 			break;
702 		/*
703 		 * Shouldn't wait for timeout when err_event_athub occurs,
704 		 * because gpu reset thread triggered and lock resource should
705 		 * be released for psp resume sequence.
706 		 */
707 		ras_intr = amdgpu_ras_intr_triggered();
708 		if (ras_intr)
709 			break;
710 		usleep_range(10, 100);
711 		amdgpu_device_invalidate_hdp(psp->adev, NULL);
712 	}
713 
714 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
715 	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
716 		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
717 
718 	memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
719 
720 	/* In some cases, psp response status is not 0 even there is no
721 	 * problem while the command is submitted. Some version of PSP FW
722 	 * doesn't write 0 to that field.
723 	 * So here we would like to only print a warning instead of an error
724 	 * during psp initialization to avoid breaking hw_init and it doesn't
725 	 * return -EINVAL.
726 	 */
727 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
728 		if (ucode)
729 			dev_warn(psp->adev->dev,
730 				 "failed to load ucode %s(0x%X) ",
731 				 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
732 		if (psp_err_warn(psp))
733 			dev_warn(
734 				psp->adev->dev,
735 				"psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
736 				psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
737 				psp->cmd_buf_mem->cmd_id,
738 				psp->cmd_buf_mem->resp.status);
739 		/* If any firmware (including CAP) load fails under SRIOV, it should
740 		 * return failure to stop the VF from initializing.
741 		 * Also return failure in case of timeout
742 		 */
743 		if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
744 			ret = -EINVAL;
745 			goto exit;
746 		}
747 	}
748 
749 	if (ucode) {
750 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
751 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
752 	}
753 
754 exit:
755 	return ret;
756 }
757 
758 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
759 {
760 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
761 
762 	mutex_lock(&psp->mutex);
763 
764 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
765 
766 	return cmd;
767 }
768 
769 static void release_psp_cmd_buf(struct psp_context *psp)
770 {
771 	mutex_unlock(&psp->mutex);
772 }
773 
774 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
775 				 struct psp_gfx_cmd_resp *cmd,
776 				 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
777 {
778 	struct amdgpu_device *adev = psp->adev;
779 	uint32_t size = 0;
780 	uint64_t tmr_pa = 0;
781 
782 	if (tmr_bo) {
783 		size = amdgpu_bo_size(tmr_bo);
784 		tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
785 	}
786 
787 	if (amdgpu_sriov_vf(psp->adev))
788 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
789 	else
790 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
791 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
792 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
793 	cmd->cmd.cmd_setup_tmr.buf_size = size;
794 	cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
795 	cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
796 	cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
797 }
798 
799 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
800 				      uint64_t pri_buf_mc, uint32_t size)
801 {
802 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
803 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
804 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
805 	cmd->cmd.cmd_load_toc.toc_size = size;
806 }
807 
808 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
809 static int psp_load_toc(struct psp_context *psp,
810 			uint32_t *tmr_size)
811 {
812 	int ret;
813 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
814 
815 	/* Copy toc to psp firmware private buffer */
816 	psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
817 
818 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
819 
820 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
821 				 psp->fence_buf_mc_addr);
822 	if (!ret)
823 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
824 
825 	release_psp_cmd_buf(psp);
826 
827 	return ret;
828 }
829 
830 /* Set up Trusted Memory Region */
831 static int psp_tmr_init(struct psp_context *psp)
832 {
833 	int ret = 0;
834 	int tmr_size;
835 	void *tmr_buf;
836 	void **pptr;
837 
838 	/*
839 	 * According to HW engineer, they prefer the TMR address be "naturally
840 	 * aligned" , e.g. the start address be an integer divide of TMR size.
841 	 *
842 	 * Note: this memory need be reserved till the driver
843 	 * uninitializes.
844 	 */
845 	tmr_size = PSP_TMR_SIZE(psp->adev);
846 
847 	/* For ASICs support RLC autoload, psp will parse the toc
848 	 * and calculate the total size of TMR needed
849 	 */
850 	if (!amdgpu_sriov_vf(psp->adev) &&
851 	    psp->toc.start_addr &&
852 	    psp->toc.size_bytes &&
853 	    psp->fw_pri_buf) {
854 		ret = psp_load_toc(psp, &tmr_size);
855 		if (ret) {
856 			dev_err(psp->adev->dev, "Failed to load toc\n");
857 			return ret;
858 		}
859 	}
860 
861 	if (!psp->tmr_bo && !psp->boot_time_tmr) {
862 		pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
863 		ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
864 					      PSP_TMR_ALIGNMENT,
865 					      AMDGPU_HAS_VRAM(psp->adev) ?
866 					      AMDGPU_GEM_DOMAIN_VRAM :
867 					      AMDGPU_GEM_DOMAIN_GTT,
868 					      &psp->tmr_bo, &psp->tmr_mc_addr,
869 					      pptr);
870 	}
871 
872 	return ret;
873 }
874 
875 static bool psp_skip_tmr(struct psp_context *psp)
876 {
877 	switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
878 	case IP_VERSION(11, 0, 9):
879 	case IP_VERSION(11, 0, 7):
880 	case IP_VERSION(13, 0, 2):
881 	case IP_VERSION(13, 0, 6):
882 	case IP_VERSION(13, 0, 10):
883 	case IP_VERSION(13, 0, 12):
884 	case IP_VERSION(13, 0, 14):
885 		return true;
886 	default:
887 		return false;
888 	}
889 }
890 
891 static int psp_tmr_load(struct psp_context *psp)
892 {
893 	int ret;
894 	struct psp_gfx_cmd_resp *cmd;
895 
896 	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
897 	 * Already set up by host driver.
898 	 */
899 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
900 		return 0;
901 
902 	cmd = acquire_psp_cmd_buf(psp);
903 
904 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
905 	if (psp->tmr_bo)
906 		dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
907 			 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
908 
909 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
910 				 psp->fence_buf_mc_addr);
911 
912 	release_psp_cmd_buf(psp);
913 
914 	return ret;
915 }
916 
917 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
918 					struct psp_gfx_cmd_resp *cmd)
919 {
920 	if (amdgpu_sriov_vf(psp->adev))
921 		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
922 	else
923 		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
924 }
925 
926 static int psp_tmr_unload(struct psp_context *psp)
927 {
928 	int ret;
929 	struct psp_gfx_cmd_resp *cmd;
930 
931 	/* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
932 	 * as TMR is not loaded at all
933 	 */
934 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
935 		return 0;
936 
937 	cmd = acquire_psp_cmd_buf(psp);
938 
939 	psp_prep_tmr_unload_cmd_buf(psp, cmd);
940 	dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
941 
942 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
943 				 psp->fence_buf_mc_addr);
944 
945 	release_psp_cmd_buf(psp);
946 
947 	return ret;
948 }
949 
950 static int psp_tmr_terminate(struct psp_context *psp)
951 {
952 	return psp_tmr_unload(psp);
953 }
954 
955 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
956 					uint64_t *output_ptr)
957 {
958 	int ret;
959 	struct psp_gfx_cmd_resp *cmd;
960 
961 	if (!output_ptr)
962 		return -EINVAL;
963 
964 	if (amdgpu_sriov_vf(psp->adev))
965 		return 0;
966 
967 	cmd = acquire_psp_cmd_buf(psp);
968 
969 	cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
970 
971 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
972 				 psp->fence_buf_mc_addr);
973 
974 	if (!ret) {
975 		*output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
976 			      ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
977 	}
978 
979 	release_psp_cmd_buf(psp);
980 
981 	return ret;
982 }
983 
984 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
985 {
986 	struct psp_context *psp = &adev->psp;
987 	struct psp_gfx_cmd_resp *cmd;
988 	int ret;
989 
990 	if (amdgpu_sriov_vf(adev))
991 		return 0;
992 
993 	cmd = acquire_psp_cmd_buf(psp);
994 
995 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
996 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
997 
998 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
999 	if (!ret) {
1000 		*boot_cfg =
1001 			(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
1002 	}
1003 
1004 	release_psp_cmd_buf(psp);
1005 
1006 	return ret;
1007 }
1008 
1009 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
1010 {
1011 	int ret;
1012 	struct psp_context *psp = &adev->psp;
1013 	struct psp_gfx_cmd_resp *cmd;
1014 
1015 	if (amdgpu_sriov_vf(adev))
1016 		return 0;
1017 
1018 	cmd = acquire_psp_cmd_buf(psp);
1019 
1020 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1021 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1022 	cmd->cmd.boot_cfg.boot_config = boot_cfg;
1023 	cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1024 
1025 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1026 
1027 	release_psp_cmd_buf(psp);
1028 
1029 	return ret;
1030 }
1031 
1032 static int psp_rl_load(struct amdgpu_device *adev)
1033 {
1034 	int ret;
1035 	struct psp_context *psp = &adev->psp;
1036 	struct psp_gfx_cmd_resp *cmd;
1037 
1038 	if (!is_psp_fw_valid(psp->rl))
1039 		return 0;
1040 
1041 	cmd = acquire_psp_cmd_buf(psp);
1042 
1043 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1044 	memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1045 
1046 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1047 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1048 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1049 	cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1050 	cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1051 
1052 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1053 
1054 	release_psp_cmd_buf(psp);
1055 
1056 	return ret;
1057 }
1058 
1059 int psp_memory_partition(struct psp_context *psp, int mode)
1060 {
1061 	struct psp_gfx_cmd_resp *cmd;
1062 	int ret;
1063 
1064 	if (amdgpu_sriov_vf(psp->adev))
1065 		return 0;
1066 
1067 	cmd = acquire_psp_cmd_buf(psp);
1068 
1069 	cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
1070 	cmd->cmd.cmd_memory_part.mode = mode;
1071 
1072 	dev_info(psp->adev->dev,
1073 		 "Requesting %d memory partition change through PSP", mode);
1074 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1075 	if (ret)
1076 		dev_err(psp->adev->dev,
1077 			"PSP request failed to change to NPS%d mode\n", mode);
1078 
1079 	release_psp_cmd_buf(psp);
1080 
1081 	return ret;
1082 }
1083 
1084 int psp_spatial_partition(struct psp_context *psp, int mode)
1085 {
1086 	struct psp_gfx_cmd_resp *cmd;
1087 	int ret;
1088 
1089 	if (amdgpu_sriov_vf(psp->adev))
1090 		return 0;
1091 
1092 	cmd = acquire_psp_cmd_buf(psp);
1093 
1094 	cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1095 	cmd->cmd.cmd_spatial_part.mode = mode;
1096 
1097 	dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1098 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1099 
1100 	release_psp_cmd_buf(psp);
1101 
1102 	return ret;
1103 }
1104 
1105 static int psp_asd_initialize(struct psp_context *psp)
1106 {
1107 	int ret;
1108 
1109 	/* If PSP version doesn't match ASD version, asd loading will be failed.
1110 	 * add workaround to bypass it for sriov now.
1111 	 * TODO: add version check to make it common
1112 	 */
1113 	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1114 		return 0;
1115 
1116 	/* bypass asd if display hardware is not available */
1117 	if (!amdgpu_device_has_display_hardware(psp->adev) &&
1118 	    amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1119 		return 0;
1120 
1121 	psp->asd_context.mem_context.shared_mc_addr  = 0;
1122 	psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1123 	psp->asd_context.ta_load_type                = GFX_CMD_ID_LOAD_ASD;
1124 
1125 	ret = psp_ta_load(psp, &psp->asd_context);
1126 	if (!ret)
1127 		psp->asd_context.initialized = true;
1128 
1129 	return ret;
1130 }
1131 
1132 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1133 				       uint32_t session_id)
1134 {
1135 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1136 	cmd->cmd.cmd_unload_ta.session_id = session_id;
1137 }
1138 
1139 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1140 {
1141 	int ret;
1142 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1143 
1144 	psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1145 
1146 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1147 
1148 	context->resp_status = cmd->resp.status;
1149 
1150 	release_psp_cmd_buf(psp);
1151 
1152 	return ret;
1153 }
1154 
1155 static int psp_asd_terminate(struct psp_context *psp)
1156 {
1157 	int ret;
1158 
1159 	if (amdgpu_sriov_vf(psp->adev))
1160 		return 0;
1161 
1162 	if (!psp->asd_context.initialized)
1163 		return 0;
1164 
1165 	ret = psp_ta_unload(psp, &psp->asd_context);
1166 	if (!ret)
1167 		psp->asd_context.initialized = false;
1168 
1169 	return ret;
1170 }
1171 
1172 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1173 		uint32_t id, uint32_t value)
1174 {
1175 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1176 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1177 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1178 }
1179 
1180 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1181 		uint32_t value)
1182 {
1183 	struct psp_gfx_cmd_resp *cmd;
1184 	int ret = 0;
1185 
1186 	if (reg >= PSP_REG_LAST)
1187 		return -EINVAL;
1188 
1189 	cmd = acquire_psp_cmd_buf(psp);
1190 
1191 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1192 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1193 	if (ret)
1194 		dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1195 
1196 	release_psp_cmd_buf(psp);
1197 
1198 	return ret;
1199 }
1200 
1201 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1202 				     uint64_t ta_bin_mc,
1203 				     struct ta_context *context)
1204 {
1205 	cmd->cmd_id				= context->ta_load_type;
1206 	cmd->cmd.cmd_load_ta.app_phy_addr_lo	= lower_32_bits(ta_bin_mc);
1207 	cmd->cmd.cmd_load_ta.app_phy_addr_hi	= upper_32_bits(ta_bin_mc);
1208 	cmd->cmd.cmd_load_ta.app_len		= context->bin_desc.size_bytes;
1209 
1210 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1211 		lower_32_bits(context->mem_context.shared_mc_addr);
1212 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1213 		upper_32_bits(context->mem_context.shared_mc_addr);
1214 	cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1215 }
1216 
1217 int psp_ta_init_shared_buf(struct psp_context *psp,
1218 				  struct ta_mem_context *mem_ctx)
1219 {
1220 	/*
1221 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1222 	 * physical) for ta to host memory
1223 	 */
1224 	return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1225 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1226 				      AMDGPU_GEM_DOMAIN_GTT,
1227 				      &mem_ctx->shared_bo,
1228 				      &mem_ctx->shared_mc_addr,
1229 				      &mem_ctx->shared_buf);
1230 }
1231 
1232 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1233 				       uint32_t ta_cmd_id,
1234 				       uint32_t session_id)
1235 {
1236 	cmd->cmd_id				= GFX_CMD_ID_INVOKE_CMD;
1237 	cmd->cmd.cmd_invoke_cmd.session_id	= session_id;
1238 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id	= ta_cmd_id;
1239 }
1240 
1241 int psp_ta_invoke(struct psp_context *psp,
1242 		  uint32_t ta_cmd_id,
1243 		  struct ta_context *context)
1244 {
1245 	int ret;
1246 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1247 
1248 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1249 
1250 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1251 				 psp->fence_buf_mc_addr);
1252 
1253 	context->resp_status = cmd->resp.status;
1254 
1255 	release_psp_cmd_buf(psp);
1256 
1257 	return ret;
1258 }
1259 
1260 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1261 {
1262 	int ret;
1263 	struct psp_gfx_cmd_resp *cmd;
1264 
1265 	cmd = acquire_psp_cmd_buf(psp);
1266 
1267 	psp_copy_fw(psp, context->bin_desc.start_addr,
1268 		    context->bin_desc.size_bytes);
1269 
1270 	psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1271 
1272 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1273 				 psp->fence_buf_mc_addr);
1274 
1275 	context->resp_status = cmd->resp.status;
1276 
1277 	if (!ret)
1278 		context->session_id = cmd->resp.session_id;
1279 
1280 	release_psp_cmd_buf(psp);
1281 
1282 	return ret;
1283 }
1284 
1285 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1286 {
1287 	return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1288 }
1289 
1290 int psp_xgmi_terminate(struct psp_context *psp)
1291 {
1292 	int ret;
1293 	struct amdgpu_device *adev = psp->adev;
1294 
1295 	/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1296 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1297 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1298 	     adev->gmc.xgmi.connected_to_cpu))
1299 		return 0;
1300 
1301 	if (!psp->xgmi_context.context.initialized)
1302 		return 0;
1303 
1304 	ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1305 
1306 	psp->xgmi_context.context.initialized = false;
1307 
1308 	return ret;
1309 }
1310 
1311 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1312 {
1313 	struct ta_xgmi_shared_memory *xgmi_cmd;
1314 	int ret;
1315 
1316 	if (!psp->ta_fw ||
1317 	    !psp->xgmi_context.context.bin_desc.size_bytes ||
1318 	    !psp->xgmi_context.context.bin_desc.start_addr)
1319 		return -ENOENT;
1320 
1321 	if (!load_ta)
1322 		goto invoke;
1323 
1324 	psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1325 	psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1326 
1327 	if (!psp->xgmi_context.context.mem_context.shared_buf) {
1328 		ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1329 		if (ret)
1330 			return ret;
1331 	}
1332 
1333 	/* Load XGMI TA */
1334 	ret = psp_ta_load(psp, &psp->xgmi_context.context);
1335 	if (!ret)
1336 		psp->xgmi_context.context.initialized = true;
1337 	else
1338 		return ret;
1339 
1340 invoke:
1341 	/* Initialize XGMI session */
1342 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1343 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1344 	xgmi_cmd->flag_extend_link_record = set_extended_data;
1345 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1346 
1347 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1348 	/* note down the capbility flag for XGMI TA */
1349 	psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1350 
1351 	return ret;
1352 }
1353 
1354 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1355 {
1356 	struct ta_xgmi_shared_memory *xgmi_cmd;
1357 	int ret;
1358 
1359 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1360 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1361 
1362 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1363 
1364 	/* Invoke xgmi ta to get hive id */
1365 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1366 	if (ret)
1367 		return ret;
1368 
1369 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1370 
1371 	return 0;
1372 }
1373 
1374 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1375 {
1376 	struct ta_xgmi_shared_memory *xgmi_cmd;
1377 	int ret;
1378 
1379 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1380 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1381 
1382 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1383 
1384 	/* Invoke xgmi ta to get the node id */
1385 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1386 	if (ret)
1387 		return ret;
1388 
1389 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1390 
1391 	return 0;
1392 }
1393 
1394 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1395 {
1396 	return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1397 			IP_VERSION(13, 0, 2) &&
1398 		psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1399 	       amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1400 		       IP_VERSION(13, 0, 6);
1401 }
1402 
1403 /*
1404  * Chips that support extended topology information require the driver to
1405  * reflect topology information in the opposite direction.  This is
1406  * because the TA has already exceeded its link record limit and if the
1407  * TA holds bi-directional information, the driver would have to do
1408  * multiple fetches instead of just two.
1409  */
1410 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1411 					struct psp_xgmi_node_info node_info)
1412 {
1413 	struct amdgpu_device *mirror_adev;
1414 	struct amdgpu_hive_info *hive;
1415 	uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1416 	uint64_t dst_node_id = node_info.node_id;
1417 	uint8_t dst_num_hops = node_info.num_hops;
1418 	uint8_t dst_num_links = node_info.num_links;
1419 
1420 	hive = amdgpu_get_xgmi_hive(psp->adev);
1421 	if (WARN_ON(!hive))
1422 		return;
1423 
1424 	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1425 		struct psp_xgmi_topology_info *mirror_top_info;
1426 		int j;
1427 
1428 		if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1429 			continue;
1430 
1431 		mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1432 		for (j = 0; j < mirror_top_info->num_nodes; j++) {
1433 			if (mirror_top_info->nodes[j].node_id != src_node_id)
1434 				continue;
1435 
1436 			mirror_top_info->nodes[j].num_hops = dst_num_hops;
1437 			/*
1438 			 * prevent 0 num_links value re-reflection since reflection
1439 			 * criteria is based on num_hops (direct or indirect).
1440 			 *
1441 			 */
1442 			if (dst_num_links)
1443 				mirror_top_info->nodes[j].num_links = dst_num_links;
1444 
1445 			break;
1446 		}
1447 
1448 		break;
1449 	}
1450 
1451 	amdgpu_put_xgmi_hive(hive);
1452 }
1453 
1454 int psp_xgmi_get_topology_info(struct psp_context *psp,
1455 			       int number_devices,
1456 			       struct psp_xgmi_topology_info *topology,
1457 			       bool get_extended_data)
1458 {
1459 	struct ta_xgmi_shared_memory *xgmi_cmd;
1460 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1461 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1462 	int i;
1463 	int ret;
1464 
1465 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1466 		return -EINVAL;
1467 
1468 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1469 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1470 	xgmi_cmd->flag_extend_link_record = get_extended_data;
1471 
1472 	/* Fill in the shared memory with topology information as input */
1473 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1474 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1475 	topology_info_input->num_nodes = number_devices;
1476 
1477 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1478 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1479 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1480 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1481 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1482 	}
1483 
1484 	/* Invoke xgmi ta to get the topology information */
1485 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1486 	if (ret)
1487 		return ret;
1488 
1489 	/* Read the output topology information from the shared memory */
1490 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1491 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1492 	for (i = 0; i < topology->num_nodes; i++) {
1493 		/* extended data will either be 0 or equal to non-extended data */
1494 		if (topology_info_output->nodes[i].num_hops)
1495 			topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1496 
1497 		/* non-extended data gets everything here so no need to update */
1498 		if (!get_extended_data) {
1499 			topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1500 			topology->nodes[i].is_sharing_enabled =
1501 					topology_info_output->nodes[i].is_sharing_enabled;
1502 			topology->nodes[i].sdma_engine =
1503 					topology_info_output->nodes[i].sdma_engine;
1504 		}
1505 
1506 	}
1507 
1508 	/* Invoke xgmi ta again to get the link information */
1509 	if (psp_xgmi_peer_link_info_supported(psp)) {
1510 		struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1511 		struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1512 		bool requires_reflection =
1513 			(psp->xgmi_context.supports_extended_data &&
1514 			 get_extended_data) ||
1515 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1516 				IP_VERSION(13, 0, 6) ||
1517 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1518 				IP_VERSION(13, 0, 14);
1519 		bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1520 				psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1521 
1522 		/* popluate the shared output buffer rather than the cmd input buffer
1523 		 * with node_ids as the input for GET_PEER_LINKS command execution.
1524 		 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1525 		 * The same requirement for GET_EXTEND_PEER_LINKS command.
1526 		 */
1527 		if (ta_port_num_support) {
1528 			link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1529 
1530 			for (i = 0; i < topology->num_nodes; i++)
1531 				link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1532 
1533 			link_extend_info_output->num_nodes = topology->num_nodes;
1534 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1535 		} else {
1536 			link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1537 
1538 			for (i = 0; i < topology->num_nodes; i++)
1539 				link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1540 
1541 			link_info_output->num_nodes = topology->num_nodes;
1542 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1543 		}
1544 
1545 		ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1546 		if (ret)
1547 			return ret;
1548 
1549 		for (i = 0; i < topology->num_nodes; i++) {
1550 			uint8_t node_num_links = ta_port_num_support ?
1551 				link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1552 			/* accumulate num_links on extended data */
1553 			if (get_extended_data) {
1554 				topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1555 			} else {
1556 				topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1557 								topology->nodes[i].num_links : node_num_links;
1558 			}
1559 			/* popluate the connected port num info if supported and available */
1560 			if (ta_port_num_support && topology->nodes[i].num_links) {
1561 				memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1562 				       sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1563 			}
1564 
1565 			/* reflect the topology information for bi-directionality */
1566 			if (requires_reflection && topology->nodes[i].num_hops)
1567 				psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1568 		}
1569 	}
1570 
1571 	return 0;
1572 }
1573 
1574 int psp_xgmi_set_topology_info(struct psp_context *psp,
1575 			       int number_devices,
1576 			       struct psp_xgmi_topology_info *topology)
1577 {
1578 	struct ta_xgmi_shared_memory *xgmi_cmd;
1579 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1580 	int i;
1581 
1582 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1583 		return -EINVAL;
1584 
1585 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1586 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1587 
1588 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1589 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1590 	topology_info_input->num_nodes = number_devices;
1591 
1592 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1593 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1594 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1595 		topology_info_input->nodes[i].is_sharing_enabled = 1;
1596 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1597 	}
1598 
1599 	/* Invoke xgmi ta to set topology information */
1600 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1601 }
1602 
1603 // ras begin
1604 static void psp_ras_ta_check_status(struct psp_context *psp)
1605 {
1606 	struct ta_ras_shared_memory *ras_cmd =
1607 		(struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1608 
1609 	switch (ras_cmd->ras_status) {
1610 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1611 		dev_warn(psp->adev->dev,
1612 			 "RAS WARNING: cmd failed due to unsupported ip\n");
1613 		break;
1614 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1615 		dev_warn(psp->adev->dev,
1616 			 "RAS WARNING: cmd failed due to unsupported error injection\n");
1617 		break;
1618 	case TA_RAS_STATUS__SUCCESS:
1619 		break;
1620 	case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1621 		if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1622 			dev_warn(psp->adev->dev,
1623 				 "RAS WARNING: Inject error to critical region is not allowed\n");
1624 		break;
1625 	default:
1626 		dev_warn(psp->adev->dev,
1627 			 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1628 		break;
1629 	}
1630 }
1631 
1632 static int psp_ras_send_cmd(struct psp_context *psp,
1633 		enum ras_command cmd_id, void *in, void *out)
1634 {
1635 	struct ta_ras_shared_memory *ras_cmd;
1636 	uint32_t cmd = cmd_id;
1637 	int ret = 0;
1638 
1639 	if (!in)
1640 		return -EINVAL;
1641 
1642 	mutex_lock(&psp->ras_context.mutex);
1643 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1644 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1645 
1646 	switch (cmd) {
1647 	case TA_RAS_COMMAND__ENABLE_FEATURES:
1648 	case TA_RAS_COMMAND__DISABLE_FEATURES:
1649 		memcpy(&ras_cmd->ras_in_message,
1650 			in, sizeof(ras_cmd->ras_in_message));
1651 		break;
1652 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1653 		memcpy(&ras_cmd->ras_in_message.trigger_error,
1654 			in, sizeof(ras_cmd->ras_in_message.trigger_error));
1655 		break;
1656 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1657 		memcpy(&ras_cmd->ras_in_message.address,
1658 			in, sizeof(ras_cmd->ras_in_message.address));
1659 		break;
1660 	default:
1661 		dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1662 		ret = -EINVAL;
1663 		goto err_out;
1664 	}
1665 
1666 	ras_cmd->cmd_id = cmd;
1667 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1668 
1669 	switch (cmd) {
1670 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1671 		if (!ret && out)
1672 			memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1673 		break;
1674 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1675 		if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1676 			ret = -EINVAL;
1677 		else if (out)
1678 			memcpy(out,
1679 				&ras_cmd->ras_out_message.address,
1680 				sizeof(ras_cmd->ras_out_message.address));
1681 		break;
1682 	default:
1683 		break;
1684 	}
1685 
1686 err_out:
1687 	mutex_unlock(&psp->ras_context.mutex);
1688 
1689 	return ret;
1690 }
1691 
1692 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1693 {
1694 	struct ta_ras_shared_memory *ras_cmd;
1695 	int ret;
1696 
1697 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1698 
1699 	/*
1700 	 * TODO: bypass the loading in sriov for now
1701 	 */
1702 	if (amdgpu_sriov_vf(psp->adev))
1703 		return 0;
1704 
1705 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1706 
1707 	if (amdgpu_ras_intr_triggered())
1708 		return ret;
1709 
1710 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1711 		dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1712 		return -EINVAL;
1713 	}
1714 
1715 	if (!ret) {
1716 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1717 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1718 
1719 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1720 		} else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1721 			dev_warn(psp->adev->dev,
1722 				 "RAS internal register access blocked\n");
1723 
1724 		psp_ras_ta_check_status(psp);
1725 	}
1726 
1727 	return ret;
1728 }
1729 
1730 int psp_ras_enable_features(struct psp_context *psp,
1731 		union ta_ras_cmd_input *info, bool enable)
1732 {
1733 	enum ras_command cmd_id;
1734 	int ret;
1735 
1736 	if (!psp->ras_context.context.initialized || !info)
1737 		return -EINVAL;
1738 
1739 	cmd_id = enable ?
1740 		TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1741 	ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
1742 	if (ret)
1743 		return -EINVAL;
1744 
1745 	return 0;
1746 }
1747 
1748 int psp_ras_terminate(struct psp_context *psp)
1749 {
1750 	int ret;
1751 
1752 	/*
1753 	 * TODO: bypass the terminate in sriov for now
1754 	 */
1755 	if (amdgpu_sriov_vf(psp->adev))
1756 		return 0;
1757 
1758 	if (!psp->ras_context.context.initialized)
1759 		return 0;
1760 
1761 	ret = psp_ta_unload(psp, &psp->ras_context.context);
1762 
1763 	psp->ras_context.context.initialized = false;
1764 
1765 	mutex_destroy(&psp->ras_context.mutex);
1766 
1767 	return ret;
1768 }
1769 
1770 int psp_ras_initialize(struct psp_context *psp)
1771 {
1772 	int ret;
1773 	uint32_t boot_cfg = 0xFF;
1774 	struct amdgpu_device *adev = psp->adev;
1775 	struct ta_ras_shared_memory *ras_cmd;
1776 
1777 	/*
1778 	 * TODO: bypass the initialize in sriov for now
1779 	 */
1780 	if (amdgpu_sriov_vf(adev))
1781 		return 0;
1782 
1783 	if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1784 	    !adev->psp.ras_context.context.bin_desc.start_addr) {
1785 		dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1786 		return 0;
1787 	}
1788 
1789 	if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1790 		/* query GECC enablement status from boot config
1791 		 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1792 		 */
1793 		ret = psp_boot_config_get(adev, &boot_cfg);
1794 		if (ret)
1795 			dev_warn(adev->dev, "PSP get boot config failed\n");
1796 
1797 		if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
1798 			if (!boot_cfg) {
1799 				dev_info(adev->dev, "GECC is disabled\n");
1800 			} else {
1801 				/* disable GECC in next boot cycle if ras is
1802 				 * disabled by module parameter amdgpu_ras_enable
1803 				 * and/or amdgpu_ras_mask, or boot_config_get call
1804 				 * is failed
1805 				 */
1806 				ret = psp_boot_config_set(adev, 0);
1807 				if (ret)
1808 					dev_warn(adev->dev, "PSP set boot config failed\n");
1809 				else
1810 					dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1811 			}
1812 		} else {
1813 			if (boot_cfg == 1) {
1814 				dev_info(adev->dev, "GECC is enabled\n");
1815 			} else {
1816 				/* enable GECC in next boot cycle if it is disabled
1817 				 * in boot config, or force enable GECC if failed to
1818 				 * get boot configuration
1819 				 */
1820 				ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1821 				if (ret)
1822 					dev_warn(adev->dev, "PSP set boot config failed\n");
1823 				else
1824 					dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1825 			}
1826 		}
1827 	}
1828 
1829 	psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1830 	psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1831 
1832 	if (!psp->ras_context.context.mem_context.shared_buf) {
1833 		ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1834 		if (ret)
1835 			return ret;
1836 	}
1837 
1838 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1839 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1840 
1841 	if (amdgpu_ras_is_poison_mode_supported(adev))
1842 		ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1843 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1844 		ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1845 	ras_cmd->ras_in_message.init_flags.xcc_mask =
1846 		adev->gfx.xcc_mask;
1847 	ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1848 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
1849 		ras_cmd->ras_in_message.init_flags.nps_mode =
1850 			adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1851 
1852 	ret = psp_ta_load(psp, &psp->ras_context.context);
1853 
1854 	if (!ret && !ras_cmd->ras_status) {
1855 		psp->ras_context.context.initialized = true;
1856 		mutex_init(&psp->ras_context.mutex);
1857 	} else {
1858 		if (ras_cmd->ras_status)
1859 			dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1860 
1861 		/* fail to load RAS TA */
1862 		psp->ras_context.context.initialized = false;
1863 	}
1864 
1865 	return ret;
1866 }
1867 
1868 int psp_ras_trigger_error(struct psp_context *psp,
1869 			  struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1870 {
1871 	struct amdgpu_device *adev = psp->adev;
1872 	int ret;
1873 	uint32_t dev_mask;
1874 	uint32_t ras_status = 0;
1875 
1876 	if (!psp->ras_context.context.initialized || !info)
1877 		return -EINVAL;
1878 
1879 	switch (info->block_id) {
1880 	case TA_RAS_BLOCK__GFX:
1881 		dev_mask = GET_MASK(GC, instance_mask);
1882 		break;
1883 	case TA_RAS_BLOCK__SDMA:
1884 		dev_mask = GET_MASK(SDMA0, instance_mask);
1885 		break;
1886 	case TA_RAS_BLOCK__VCN:
1887 	case TA_RAS_BLOCK__JPEG:
1888 		dev_mask = GET_MASK(VCN, instance_mask);
1889 		break;
1890 	default:
1891 		dev_mask = instance_mask;
1892 		break;
1893 	}
1894 
1895 	/* reuse sub_block_index for backward compatibility */
1896 	dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1897 	dev_mask &= AMDGPU_RAS_INST_MASK;
1898 	info->sub_block_index |= dev_mask;
1899 
1900 	ret = psp_ras_send_cmd(psp,
1901 			TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
1902 	if (ret)
1903 		return -EINVAL;
1904 
1905 	/* If err_event_athub occurs error inject was successful, however
1906 	 *  return status from TA is no long reliable
1907 	 */
1908 	if (amdgpu_ras_intr_triggered())
1909 		return 0;
1910 
1911 	if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1912 		return -EACCES;
1913 	else if (ras_status)
1914 		return -EINVAL;
1915 
1916 	return 0;
1917 }
1918 
1919 int psp_ras_query_address(struct psp_context *psp,
1920 			  struct ta_ras_query_address_input *addr_in,
1921 			  struct ta_ras_query_address_output *addr_out)
1922 {
1923 	int ret;
1924 
1925 	if (!psp->ras_context.context.initialized ||
1926 		!addr_in || !addr_out)
1927 		return -EINVAL;
1928 
1929 	ret = psp_ras_send_cmd(psp,
1930 			TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
1931 
1932 	return ret;
1933 }
1934 // ras end
1935 
1936 // HDCP start
1937 static int psp_hdcp_initialize(struct psp_context *psp)
1938 {
1939 	int ret;
1940 
1941 	/*
1942 	 * TODO: bypass the initialize in sriov for now
1943 	 */
1944 	if (amdgpu_sriov_vf(psp->adev))
1945 		return 0;
1946 
1947 	/* bypass hdcp initialization if dmu is harvested */
1948 	if (!amdgpu_device_has_display_hardware(psp->adev))
1949 		return 0;
1950 
1951 	if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1952 	    !psp->hdcp_context.context.bin_desc.start_addr) {
1953 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1954 		return 0;
1955 	}
1956 
1957 	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1958 	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1959 
1960 	if (!psp->hdcp_context.context.mem_context.shared_buf) {
1961 		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1962 		if (ret)
1963 			return ret;
1964 	}
1965 
1966 	ret = psp_ta_load(psp, &psp->hdcp_context.context);
1967 	if (!ret) {
1968 		psp->hdcp_context.context.initialized = true;
1969 		mutex_init(&psp->hdcp_context.mutex);
1970 	}
1971 
1972 	return ret;
1973 }
1974 
1975 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1976 {
1977 	/*
1978 	 * TODO: bypass the loading in sriov for now
1979 	 */
1980 	if (amdgpu_sriov_vf(psp->adev))
1981 		return 0;
1982 
1983 	if (!psp->hdcp_context.context.initialized)
1984 		return 0;
1985 
1986 	return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
1987 }
1988 
1989 static int psp_hdcp_terminate(struct psp_context *psp)
1990 {
1991 	int ret;
1992 
1993 	/*
1994 	 * TODO: bypass the terminate in sriov for now
1995 	 */
1996 	if (amdgpu_sriov_vf(psp->adev))
1997 		return 0;
1998 
1999 	if (!psp->hdcp_context.context.initialized)
2000 		return 0;
2001 
2002 	ret = psp_ta_unload(psp, &psp->hdcp_context.context);
2003 
2004 	psp->hdcp_context.context.initialized = false;
2005 
2006 	return ret;
2007 }
2008 // HDCP end
2009 
2010 // DTM start
2011 static int psp_dtm_initialize(struct psp_context *psp)
2012 {
2013 	int ret;
2014 
2015 	/*
2016 	 * TODO: bypass the initialize in sriov for now
2017 	 */
2018 	if (amdgpu_sriov_vf(psp->adev))
2019 		return 0;
2020 
2021 	/* bypass dtm initialization if dmu is harvested */
2022 	if (!amdgpu_device_has_display_hardware(psp->adev))
2023 		return 0;
2024 
2025 	if (!psp->dtm_context.context.bin_desc.size_bytes ||
2026 	    !psp->dtm_context.context.bin_desc.start_addr) {
2027 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
2028 		return 0;
2029 	}
2030 
2031 	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
2032 	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2033 
2034 	if (!psp->dtm_context.context.mem_context.shared_buf) {
2035 		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
2036 		if (ret)
2037 			return ret;
2038 	}
2039 
2040 	ret = psp_ta_load(psp, &psp->dtm_context.context);
2041 	if (!ret) {
2042 		psp->dtm_context.context.initialized = true;
2043 		mutex_init(&psp->dtm_context.mutex);
2044 	}
2045 
2046 	return ret;
2047 }
2048 
2049 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2050 {
2051 	/*
2052 	 * TODO: bypass the loading in sriov for now
2053 	 */
2054 	if (amdgpu_sriov_vf(psp->adev))
2055 		return 0;
2056 
2057 	if (!psp->dtm_context.context.initialized)
2058 		return 0;
2059 
2060 	return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
2061 }
2062 
2063 static int psp_dtm_terminate(struct psp_context *psp)
2064 {
2065 	int ret;
2066 
2067 	/*
2068 	 * TODO: bypass the terminate in sriov for now
2069 	 */
2070 	if (amdgpu_sriov_vf(psp->adev))
2071 		return 0;
2072 
2073 	if (!psp->dtm_context.context.initialized)
2074 		return 0;
2075 
2076 	ret = psp_ta_unload(psp, &psp->dtm_context.context);
2077 
2078 	psp->dtm_context.context.initialized = false;
2079 
2080 	return ret;
2081 }
2082 // DTM end
2083 
2084 // RAP start
2085 static int psp_rap_initialize(struct psp_context *psp)
2086 {
2087 	int ret;
2088 	enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2089 
2090 	/*
2091 	 * TODO: bypass the initialize in sriov for now
2092 	 */
2093 	if (amdgpu_sriov_vf(psp->adev))
2094 		return 0;
2095 
2096 	if (!psp->rap_context.context.bin_desc.size_bytes ||
2097 	    !psp->rap_context.context.bin_desc.start_addr) {
2098 		dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2099 		return 0;
2100 	}
2101 
2102 	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2103 	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2104 
2105 	if (!psp->rap_context.context.mem_context.shared_buf) {
2106 		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2107 		if (ret)
2108 			return ret;
2109 	}
2110 
2111 	ret = psp_ta_load(psp, &psp->rap_context.context);
2112 	if (!ret) {
2113 		psp->rap_context.context.initialized = true;
2114 		mutex_init(&psp->rap_context.mutex);
2115 	} else
2116 		return ret;
2117 
2118 	ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2119 	if (ret || status != TA_RAP_STATUS__SUCCESS) {
2120 		psp_rap_terminate(psp);
2121 		/* free rap shared memory */
2122 		psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2123 
2124 		dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2125 			 ret, status);
2126 
2127 		return ret;
2128 	}
2129 
2130 	return 0;
2131 }
2132 
2133 static int psp_rap_terminate(struct psp_context *psp)
2134 {
2135 	int ret;
2136 
2137 	if (!psp->rap_context.context.initialized)
2138 		return 0;
2139 
2140 	ret = psp_ta_unload(psp, &psp->rap_context.context);
2141 
2142 	psp->rap_context.context.initialized = false;
2143 
2144 	return ret;
2145 }
2146 
2147 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2148 {
2149 	struct ta_rap_shared_memory *rap_cmd;
2150 	int ret = 0;
2151 
2152 	if (!psp->rap_context.context.initialized)
2153 		return 0;
2154 
2155 	if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2156 	    ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2157 		return -EINVAL;
2158 
2159 	mutex_lock(&psp->rap_context.mutex);
2160 
2161 	rap_cmd = (struct ta_rap_shared_memory *)
2162 		  psp->rap_context.context.mem_context.shared_buf;
2163 	memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2164 
2165 	rap_cmd->cmd_id = ta_cmd_id;
2166 	rap_cmd->validation_method_id = METHOD_A;
2167 
2168 	ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2169 	if (ret)
2170 		goto out_unlock;
2171 
2172 	if (status)
2173 		*status = rap_cmd->rap_status;
2174 
2175 out_unlock:
2176 	mutex_unlock(&psp->rap_context.mutex);
2177 
2178 	return ret;
2179 }
2180 // RAP end
2181 
2182 /* securedisplay start */
2183 static int psp_securedisplay_initialize(struct psp_context *psp)
2184 {
2185 	int ret;
2186 	struct ta_securedisplay_cmd *securedisplay_cmd;
2187 
2188 	/*
2189 	 * TODO: bypass the initialize in sriov for now
2190 	 */
2191 	if (amdgpu_sriov_vf(psp->adev))
2192 		return 0;
2193 
2194 	/* bypass securedisplay initialization if dmu is harvested */
2195 	if (!amdgpu_device_has_display_hardware(psp->adev))
2196 		return 0;
2197 
2198 	if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2199 	    !psp->securedisplay_context.context.bin_desc.start_addr) {
2200 		dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
2201 		return 0;
2202 	}
2203 
2204 	psp->securedisplay_context.context.mem_context.shared_mem_size =
2205 		PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2206 	psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2207 
2208 	if (!psp->securedisplay_context.context.initialized) {
2209 		ret = psp_ta_init_shared_buf(psp,
2210 					     &psp->securedisplay_context.context.mem_context);
2211 		if (ret)
2212 			return ret;
2213 	}
2214 
2215 	ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2216 	if (!ret) {
2217 		psp->securedisplay_context.context.initialized = true;
2218 		mutex_init(&psp->securedisplay_context.mutex);
2219 	} else
2220 		return ret;
2221 
2222 	mutex_lock(&psp->securedisplay_context.mutex);
2223 
2224 	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2225 			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2226 
2227 	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2228 
2229 	mutex_unlock(&psp->securedisplay_context.mutex);
2230 
2231 	if (ret) {
2232 		psp_securedisplay_terminate(psp);
2233 		/* free securedisplay shared memory */
2234 		psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2235 		dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2236 		return -EINVAL;
2237 	}
2238 
2239 	if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2240 		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2241 		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2242 			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2243 		/* don't try again */
2244 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2245 	}
2246 
2247 	return 0;
2248 }
2249 
2250 static int psp_securedisplay_terminate(struct psp_context *psp)
2251 {
2252 	int ret;
2253 
2254 	/*
2255 	 * TODO:bypass the terminate in sriov for now
2256 	 */
2257 	if (amdgpu_sriov_vf(psp->adev))
2258 		return 0;
2259 
2260 	if (!psp->securedisplay_context.context.initialized)
2261 		return 0;
2262 
2263 	ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2264 
2265 	psp->securedisplay_context.context.initialized = false;
2266 
2267 	return ret;
2268 }
2269 
2270 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2271 {
2272 	int ret;
2273 
2274 	if (!psp->securedisplay_context.context.initialized)
2275 		return -EINVAL;
2276 
2277 	if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2278 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
2279 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
2280 		return -EINVAL;
2281 
2282 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2283 
2284 	return ret;
2285 }
2286 /* SECUREDISPLAY end */
2287 
2288 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2289 {
2290 	struct psp_context *psp = &adev->psp;
2291 	int ret = 0;
2292 
2293 	if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2294 		ret = psp->funcs->wait_for_bootloader(psp);
2295 
2296 	return ret;
2297 }
2298 
2299 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2300 {
2301 	if (psp->funcs &&
2302 	    psp->funcs->get_ras_capability) {
2303 		return psp->funcs->get_ras_capability(psp);
2304 	} else {
2305 		return false;
2306 	}
2307 }
2308 
2309 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
2310 {
2311 	struct psp_context *psp = &adev->psp;
2312 
2313 	if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
2314 		return false;
2315 
2316 	if (psp->funcs && psp->funcs->is_reload_needed)
2317 		return psp->funcs->is_reload_needed(psp);
2318 
2319 	return false;
2320 }
2321 
2322 static int psp_hw_start(struct psp_context *psp)
2323 {
2324 	struct amdgpu_device *adev = psp->adev;
2325 	int ret;
2326 
2327 	if (!amdgpu_sriov_vf(adev)) {
2328 		if ((is_psp_fw_valid(psp->kdb)) &&
2329 		    (psp->funcs->bootloader_load_kdb != NULL)) {
2330 			ret = psp_bootloader_load_kdb(psp);
2331 			if (ret) {
2332 				dev_err(adev->dev, "PSP load kdb failed!\n");
2333 				return ret;
2334 			}
2335 		}
2336 
2337 		if ((is_psp_fw_valid(psp->spl)) &&
2338 		    (psp->funcs->bootloader_load_spl != NULL)) {
2339 			ret = psp_bootloader_load_spl(psp);
2340 			if (ret) {
2341 				dev_err(adev->dev, "PSP load spl failed!\n");
2342 				return ret;
2343 			}
2344 		}
2345 
2346 		if ((is_psp_fw_valid(psp->sys)) &&
2347 		    (psp->funcs->bootloader_load_sysdrv != NULL)) {
2348 			ret = psp_bootloader_load_sysdrv(psp);
2349 			if (ret) {
2350 				dev_err(adev->dev, "PSP load sys drv failed!\n");
2351 				return ret;
2352 			}
2353 		}
2354 
2355 		if ((is_psp_fw_valid(psp->soc_drv)) &&
2356 		    (psp->funcs->bootloader_load_soc_drv != NULL)) {
2357 			ret = psp_bootloader_load_soc_drv(psp);
2358 			if (ret) {
2359 				dev_err(adev->dev, "PSP load soc drv failed!\n");
2360 				return ret;
2361 			}
2362 		}
2363 
2364 		if ((is_psp_fw_valid(psp->intf_drv)) &&
2365 		    (psp->funcs->bootloader_load_intf_drv != NULL)) {
2366 			ret = psp_bootloader_load_intf_drv(psp);
2367 			if (ret) {
2368 				dev_err(adev->dev, "PSP load intf drv failed!\n");
2369 				return ret;
2370 			}
2371 		}
2372 
2373 		if ((is_psp_fw_valid(psp->dbg_drv)) &&
2374 		    (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2375 			ret = psp_bootloader_load_dbg_drv(psp);
2376 			if (ret) {
2377 				dev_err(adev->dev, "PSP load dbg drv failed!\n");
2378 				return ret;
2379 			}
2380 		}
2381 
2382 		if ((is_psp_fw_valid(psp->ras_drv)) &&
2383 		    (psp->funcs->bootloader_load_ras_drv != NULL)) {
2384 			ret = psp_bootloader_load_ras_drv(psp);
2385 			if (ret) {
2386 				dev_err(adev->dev, "PSP load ras_drv failed!\n");
2387 				return ret;
2388 			}
2389 		}
2390 
2391 		if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2392 		    (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2393 			ret = psp_bootloader_load_ipkeymgr_drv(psp);
2394 			if (ret) {
2395 				dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2396 				return ret;
2397 			}
2398 		}
2399 
2400 		if ((is_psp_fw_valid(psp->spdm_drv)) &&
2401 		    (psp->funcs->bootloader_load_spdm_drv != NULL)) {
2402 			ret = psp_bootloader_load_spdm_drv(psp);
2403 			if (ret) {
2404 				dev_err(adev->dev, "PSP load spdm_drv failed!\n");
2405 				return ret;
2406 			}
2407 		}
2408 
2409 		if ((is_psp_fw_valid(psp->sos)) &&
2410 		    (psp->funcs->bootloader_load_sos != NULL)) {
2411 			ret = psp_bootloader_load_sos(psp);
2412 			if (ret) {
2413 				dev_err(adev->dev, "PSP load sos failed!\n");
2414 				return ret;
2415 			}
2416 		}
2417 	}
2418 
2419 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2420 	if (ret) {
2421 		dev_err(adev->dev, "PSP create ring failed!\n");
2422 		return ret;
2423 	}
2424 
2425 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2426 		goto skip_pin_bo;
2427 
2428 	if (!psp->boot_time_tmr || psp->autoload_supported) {
2429 		ret = psp_tmr_init(psp);
2430 		if (ret) {
2431 			dev_err(adev->dev, "PSP tmr init failed!\n");
2432 			return ret;
2433 		}
2434 	}
2435 
2436 skip_pin_bo:
2437 	/*
2438 	 * For ASICs with DF Cstate management centralized
2439 	 * to PMFW, TMR setup should be performed after PMFW
2440 	 * loaded and before other non-psp firmware loaded.
2441 	 */
2442 	if (psp->pmfw_centralized_cstate_management) {
2443 		ret = psp_load_smu_fw(psp);
2444 		if (ret)
2445 			return ret;
2446 	}
2447 
2448 	if (!psp->boot_time_tmr || !psp->autoload_supported) {
2449 		ret = psp_tmr_load(psp);
2450 		if (ret) {
2451 			dev_err(adev->dev, "PSP load tmr failed!\n");
2452 			return ret;
2453 		}
2454 	}
2455 
2456 	return 0;
2457 }
2458 
2459 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2460 			   enum psp_gfx_fw_type *type)
2461 {
2462 	switch (ucode->ucode_id) {
2463 	case AMDGPU_UCODE_ID_CAP:
2464 		*type = GFX_FW_TYPE_CAP;
2465 		break;
2466 	case AMDGPU_UCODE_ID_SDMA0:
2467 		*type = GFX_FW_TYPE_SDMA0;
2468 		break;
2469 	case AMDGPU_UCODE_ID_SDMA1:
2470 		*type = GFX_FW_TYPE_SDMA1;
2471 		break;
2472 	case AMDGPU_UCODE_ID_SDMA2:
2473 		*type = GFX_FW_TYPE_SDMA2;
2474 		break;
2475 	case AMDGPU_UCODE_ID_SDMA3:
2476 		*type = GFX_FW_TYPE_SDMA3;
2477 		break;
2478 	case AMDGPU_UCODE_ID_SDMA4:
2479 		*type = GFX_FW_TYPE_SDMA4;
2480 		break;
2481 	case AMDGPU_UCODE_ID_SDMA5:
2482 		*type = GFX_FW_TYPE_SDMA5;
2483 		break;
2484 	case AMDGPU_UCODE_ID_SDMA6:
2485 		*type = GFX_FW_TYPE_SDMA6;
2486 		break;
2487 	case AMDGPU_UCODE_ID_SDMA7:
2488 		*type = GFX_FW_TYPE_SDMA7;
2489 		break;
2490 	case AMDGPU_UCODE_ID_CP_MES:
2491 		*type = GFX_FW_TYPE_CP_MES;
2492 		break;
2493 	case AMDGPU_UCODE_ID_CP_MES_DATA:
2494 		*type = GFX_FW_TYPE_MES_STACK;
2495 		break;
2496 	case AMDGPU_UCODE_ID_CP_MES1:
2497 		*type = GFX_FW_TYPE_CP_MES_KIQ;
2498 		break;
2499 	case AMDGPU_UCODE_ID_CP_MES1_DATA:
2500 		*type = GFX_FW_TYPE_MES_KIQ_STACK;
2501 		break;
2502 	case AMDGPU_UCODE_ID_CP_CE:
2503 		*type = GFX_FW_TYPE_CP_CE;
2504 		break;
2505 	case AMDGPU_UCODE_ID_CP_PFP:
2506 		*type = GFX_FW_TYPE_CP_PFP;
2507 		break;
2508 	case AMDGPU_UCODE_ID_CP_ME:
2509 		*type = GFX_FW_TYPE_CP_ME;
2510 		break;
2511 	case AMDGPU_UCODE_ID_CP_MEC1:
2512 		*type = GFX_FW_TYPE_CP_MEC;
2513 		break;
2514 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
2515 		*type = GFX_FW_TYPE_CP_MEC_ME1;
2516 		break;
2517 	case AMDGPU_UCODE_ID_CP_MEC2:
2518 		*type = GFX_FW_TYPE_CP_MEC;
2519 		break;
2520 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
2521 		*type = GFX_FW_TYPE_CP_MEC_ME2;
2522 		break;
2523 	case AMDGPU_UCODE_ID_RLC_P:
2524 		*type = GFX_FW_TYPE_RLC_P;
2525 		break;
2526 	case AMDGPU_UCODE_ID_RLC_V:
2527 		*type = GFX_FW_TYPE_RLC_V;
2528 		break;
2529 	case AMDGPU_UCODE_ID_RLC_G:
2530 		*type = GFX_FW_TYPE_RLC_G;
2531 		break;
2532 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2533 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2534 		break;
2535 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2536 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2537 		break;
2538 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2539 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2540 		break;
2541 	case AMDGPU_UCODE_ID_RLC_IRAM:
2542 		*type = GFX_FW_TYPE_RLC_IRAM;
2543 		break;
2544 	case AMDGPU_UCODE_ID_RLC_DRAM:
2545 		*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2546 		break;
2547 	case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2548 		*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2549 		break;
2550 	case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2551 		*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2552 		break;
2553 	case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2554 		*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2555 		break;
2556 	case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2557 		*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2558 		break;
2559 	case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2560 		*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2561 		break;
2562 	case AMDGPU_UCODE_ID_SMC:
2563 		*type = GFX_FW_TYPE_SMU;
2564 		break;
2565 	case AMDGPU_UCODE_ID_PPTABLE:
2566 		*type = GFX_FW_TYPE_PPTABLE;
2567 		break;
2568 	case AMDGPU_UCODE_ID_UVD:
2569 		*type = GFX_FW_TYPE_UVD;
2570 		break;
2571 	case AMDGPU_UCODE_ID_UVD1:
2572 		*type = GFX_FW_TYPE_UVD1;
2573 		break;
2574 	case AMDGPU_UCODE_ID_VCE:
2575 		*type = GFX_FW_TYPE_VCE;
2576 		break;
2577 	case AMDGPU_UCODE_ID_VCN:
2578 		*type = GFX_FW_TYPE_VCN;
2579 		break;
2580 	case AMDGPU_UCODE_ID_VCN1:
2581 		*type = GFX_FW_TYPE_VCN1;
2582 		break;
2583 	case AMDGPU_UCODE_ID_DMCU_ERAM:
2584 		*type = GFX_FW_TYPE_DMCU_ERAM;
2585 		break;
2586 	case AMDGPU_UCODE_ID_DMCU_INTV:
2587 		*type = GFX_FW_TYPE_DMCU_ISR;
2588 		break;
2589 	case AMDGPU_UCODE_ID_VCN0_RAM:
2590 		*type = GFX_FW_TYPE_VCN0_RAM;
2591 		break;
2592 	case AMDGPU_UCODE_ID_VCN1_RAM:
2593 		*type = GFX_FW_TYPE_VCN1_RAM;
2594 		break;
2595 	case AMDGPU_UCODE_ID_DMCUB:
2596 		*type = GFX_FW_TYPE_DMUB;
2597 		break;
2598 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2599 	case AMDGPU_UCODE_ID_SDMA_RS64:
2600 		*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2601 		break;
2602 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2603 		*type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2604 		break;
2605 	case AMDGPU_UCODE_ID_IMU_I:
2606 		*type = GFX_FW_TYPE_IMU_I;
2607 		break;
2608 	case AMDGPU_UCODE_ID_IMU_D:
2609 		*type = GFX_FW_TYPE_IMU_D;
2610 		break;
2611 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
2612 		*type = GFX_FW_TYPE_RS64_PFP;
2613 		break;
2614 	case AMDGPU_UCODE_ID_CP_RS64_ME:
2615 		*type = GFX_FW_TYPE_RS64_ME;
2616 		break;
2617 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
2618 		*type = GFX_FW_TYPE_RS64_MEC;
2619 		break;
2620 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2621 		*type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2622 		break;
2623 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2624 		*type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2625 		break;
2626 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2627 		*type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2628 		break;
2629 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2630 		*type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2631 		break;
2632 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2633 		*type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2634 		break;
2635 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2636 		*type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2637 		break;
2638 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2639 		*type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2640 		break;
2641 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2642 		*type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2643 		break;
2644 	case AMDGPU_UCODE_ID_VPE_CTX:
2645 		*type = GFX_FW_TYPE_VPEC_FW1;
2646 		break;
2647 	case AMDGPU_UCODE_ID_VPE_CTL:
2648 		*type = GFX_FW_TYPE_VPEC_FW2;
2649 		break;
2650 	case AMDGPU_UCODE_ID_VPE:
2651 		*type = GFX_FW_TYPE_VPE;
2652 		break;
2653 	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2654 		*type = GFX_FW_TYPE_UMSCH_UCODE;
2655 		break;
2656 	case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2657 		*type = GFX_FW_TYPE_UMSCH_DATA;
2658 		break;
2659 	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2660 		*type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2661 		break;
2662 	case AMDGPU_UCODE_ID_P2S_TABLE:
2663 		*type = GFX_FW_TYPE_P2S_TABLE;
2664 		break;
2665 	case AMDGPU_UCODE_ID_JPEG_RAM:
2666 		*type = GFX_FW_TYPE_JPEG_RAM;
2667 		break;
2668 	case AMDGPU_UCODE_ID_ISP:
2669 		*type = GFX_FW_TYPE_ISP;
2670 		break;
2671 	case AMDGPU_UCODE_ID_MAXIMUM:
2672 	default:
2673 		return -EINVAL;
2674 	}
2675 
2676 	return 0;
2677 }
2678 
2679 static void psp_print_fw_hdr(struct psp_context *psp,
2680 			     struct amdgpu_firmware_info *ucode)
2681 {
2682 	struct amdgpu_device *adev = psp->adev;
2683 	struct common_firmware_header *hdr;
2684 
2685 	switch (ucode->ucode_id) {
2686 	case AMDGPU_UCODE_ID_SDMA0:
2687 	case AMDGPU_UCODE_ID_SDMA1:
2688 	case AMDGPU_UCODE_ID_SDMA2:
2689 	case AMDGPU_UCODE_ID_SDMA3:
2690 	case AMDGPU_UCODE_ID_SDMA4:
2691 	case AMDGPU_UCODE_ID_SDMA5:
2692 	case AMDGPU_UCODE_ID_SDMA6:
2693 	case AMDGPU_UCODE_ID_SDMA7:
2694 		hdr = (struct common_firmware_header *)
2695 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2696 		amdgpu_ucode_print_sdma_hdr(hdr);
2697 		break;
2698 	case AMDGPU_UCODE_ID_CP_CE:
2699 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2700 		amdgpu_ucode_print_gfx_hdr(hdr);
2701 		break;
2702 	case AMDGPU_UCODE_ID_CP_PFP:
2703 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2704 		amdgpu_ucode_print_gfx_hdr(hdr);
2705 		break;
2706 	case AMDGPU_UCODE_ID_CP_ME:
2707 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2708 		amdgpu_ucode_print_gfx_hdr(hdr);
2709 		break;
2710 	case AMDGPU_UCODE_ID_CP_MEC1:
2711 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2712 		amdgpu_ucode_print_gfx_hdr(hdr);
2713 		break;
2714 	case AMDGPU_UCODE_ID_RLC_G:
2715 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2716 		amdgpu_ucode_print_rlc_hdr(hdr);
2717 		break;
2718 	case AMDGPU_UCODE_ID_SMC:
2719 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
2720 		amdgpu_ucode_print_smc_hdr(hdr);
2721 		break;
2722 	default:
2723 		break;
2724 	}
2725 }
2726 
2727 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2728 				       struct amdgpu_firmware_info *ucode,
2729 				       struct psp_gfx_cmd_resp *cmd)
2730 {
2731 	int ret;
2732 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
2733 
2734 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2735 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2736 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2737 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2738 
2739 	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2740 	if (ret)
2741 		dev_err(psp->adev->dev, "Unknown firmware type\n");
2742 
2743 	return ret;
2744 }
2745 
2746 int psp_execute_ip_fw_load(struct psp_context *psp,
2747 			   struct amdgpu_firmware_info *ucode)
2748 {
2749 	int ret = 0;
2750 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2751 
2752 	ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2753 	if (!ret) {
2754 		ret = psp_cmd_submit_buf(psp, ucode, cmd,
2755 					 psp->fence_buf_mc_addr);
2756 	}
2757 
2758 	release_psp_cmd_buf(psp);
2759 
2760 	return ret;
2761 }
2762 
2763 static int psp_load_p2s_table(struct psp_context *psp)
2764 {
2765 	int ret;
2766 	struct amdgpu_device *adev = psp->adev;
2767 	struct amdgpu_firmware_info *ucode =
2768 		&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2769 
2770 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2771 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2772 		return 0;
2773 
2774 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2775 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2776 		uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2777 								0x0036003C;
2778 		if (psp->sos.fw_version < supp_vers)
2779 			return 0;
2780 	}
2781 
2782 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2783 		return 0;
2784 
2785 	ret = psp_execute_ip_fw_load(psp, ucode);
2786 
2787 	return ret;
2788 }
2789 
2790 static int psp_load_smu_fw(struct psp_context *psp)
2791 {
2792 	int ret;
2793 	struct amdgpu_device *adev = psp->adev;
2794 	struct amdgpu_firmware_info *ucode =
2795 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2796 	struct amdgpu_ras *ras = psp->ras_context.ras;
2797 
2798 	/*
2799 	 * Skip SMU FW reloading in case of using BACO for runpm only,
2800 	 * as SMU is always alive.
2801 	 */
2802 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2803 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2804 		return 0;
2805 
2806 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2807 		return 0;
2808 
2809 	if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2810 	     (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2811 	      amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2812 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2813 		if (ret)
2814 			dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2815 	}
2816 
2817 	ret = psp_execute_ip_fw_load(psp, ucode);
2818 
2819 	if (ret)
2820 		dev_err(adev->dev, "PSP load smu failed!\n");
2821 
2822 	return ret;
2823 }
2824 
2825 static bool fw_load_skip_check(struct psp_context *psp,
2826 			       struct amdgpu_firmware_info *ucode)
2827 {
2828 	if (!ucode->fw || !ucode->ucode_size)
2829 		return true;
2830 
2831 	if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2832 		return true;
2833 
2834 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2835 	    (psp_smu_reload_quirk(psp) ||
2836 	     psp->autoload_supported ||
2837 	     psp->pmfw_centralized_cstate_management))
2838 		return true;
2839 
2840 	if (amdgpu_sriov_vf(psp->adev) &&
2841 	    amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2842 		return true;
2843 
2844 	if (psp->autoload_supported &&
2845 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2846 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2847 		/* skip mec JT when autoload is enabled */
2848 		return true;
2849 
2850 	return false;
2851 }
2852 
2853 int psp_load_fw_list(struct psp_context *psp,
2854 		     struct amdgpu_firmware_info **ucode_list, int ucode_count)
2855 {
2856 	int ret = 0, i;
2857 	struct amdgpu_firmware_info *ucode;
2858 
2859 	for (i = 0; i < ucode_count; ++i) {
2860 		ucode = ucode_list[i];
2861 		psp_print_fw_hdr(psp, ucode);
2862 		ret = psp_execute_ip_fw_load(psp, ucode);
2863 		if (ret)
2864 			return ret;
2865 	}
2866 	return ret;
2867 }
2868 
2869 static int psp_load_non_psp_fw(struct psp_context *psp)
2870 {
2871 	int i, ret;
2872 	struct amdgpu_firmware_info *ucode;
2873 	struct amdgpu_device *adev = psp->adev;
2874 
2875 	if (psp->autoload_supported &&
2876 	    !psp->pmfw_centralized_cstate_management) {
2877 		ret = psp_load_smu_fw(psp);
2878 		if (ret)
2879 			return ret;
2880 	}
2881 
2882 	/* Load P2S table first if it's available */
2883 	psp_load_p2s_table(psp);
2884 
2885 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
2886 		ucode = &adev->firmware.ucode[i];
2887 
2888 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2889 		    !fw_load_skip_check(psp, ucode)) {
2890 			ret = psp_load_smu_fw(psp);
2891 			if (ret)
2892 				return ret;
2893 			continue;
2894 		}
2895 
2896 		if (fw_load_skip_check(psp, ucode))
2897 			continue;
2898 
2899 		if (psp->autoload_supported &&
2900 		    (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2901 			     IP_VERSION(11, 0, 7) ||
2902 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2903 			     IP_VERSION(11, 0, 11) ||
2904 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2905 			     IP_VERSION(11, 0, 12)) &&
2906 		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2907 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2908 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2909 			/* PSP only receive one SDMA fw for sienna_cichlid,
2910 			 * as all four sdma fw are same
2911 			 */
2912 			continue;
2913 
2914 		psp_print_fw_hdr(psp, ucode);
2915 
2916 		ret = psp_execute_ip_fw_load(psp, ucode);
2917 		if (ret)
2918 			return ret;
2919 
2920 		/* Start rlc autoload after psp received all the gfx firmware */
2921 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2922 		    adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2923 			ret = psp_rlc_autoload_start(psp);
2924 			if (ret) {
2925 				dev_err(adev->dev, "Failed to start rlc autoload\n");
2926 				return ret;
2927 			}
2928 		}
2929 	}
2930 
2931 	return 0;
2932 }
2933 
2934 static int psp_load_fw(struct amdgpu_device *adev)
2935 {
2936 	int ret;
2937 	struct psp_context *psp = &adev->psp;
2938 
2939 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2940 		/* should not destroy ring, only stop */
2941 		psp_ring_stop(psp, PSP_RING_TYPE__KM);
2942 	} else {
2943 		memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2944 
2945 		ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2946 		if (ret) {
2947 			dev_err(adev->dev, "PSP ring init failed!\n");
2948 			goto failed;
2949 		}
2950 	}
2951 
2952 	ret = psp_hw_start(psp);
2953 	if (ret)
2954 		goto failed;
2955 
2956 	ret = psp_load_non_psp_fw(psp);
2957 	if (ret)
2958 		goto failed1;
2959 
2960 	ret = psp_asd_initialize(psp);
2961 	if (ret) {
2962 		dev_err(adev->dev, "PSP load asd failed!\n");
2963 		goto failed1;
2964 	}
2965 
2966 	ret = psp_rl_load(adev);
2967 	if (ret) {
2968 		dev_err(adev->dev, "PSP load RL failed!\n");
2969 		goto failed1;
2970 	}
2971 
2972 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2973 		if (adev->gmc.xgmi.num_physical_nodes > 1) {
2974 			ret = psp_xgmi_initialize(psp, false, true);
2975 			/* Warning the XGMI seesion initialize failure
2976 			 * Instead of stop driver initialization
2977 			 */
2978 			if (ret)
2979 				dev_err(psp->adev->dev,
2980 					"XGMI: Failed to initialize XGMI session\n");
2981 		}
2982 	}
2983 
2984 	if (psp->ta_fw) {
2985 		ret = psp_ras_initialize(psp);
2986 		if (ret)
2987 			dev_err(psp->adev->dev,
2988 				"RAS: Failed to initialize RAS\n");
2989 
2990 		ret = psp_hdcp_initialize(psp);
2991 		if (ret)
2992 			dev_err(psp->adev->dev,
2993 				"HDCP: Failed to initialize HDCP\n");
2994 
2995 		ret = psp_dtm_initialize(psp);
2996 		if (ret)
2997 			dev_err(psp->adev->dev,
2998 				"DTM: Failed to initialize DTM\n");
2999 
3000 		ret = psp_rap_initialize(psp);
3001 		if (ret)
3002 			dev_err(psp->adev->dev,
3003 				"RAP: Failed to initialize RAP\n");
3004 
3005 		ret = psp_securedisplay_initialize(psp);
3006 		if (ret)
3007 			dev_err(psp->adev->dev,
3008 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3009 	}
3010 
3011 	return 0;
3012 
3013 failed1:
3014 	psp_free_shared_bufs(psp);
3015 failed:
3016 	/*
3017 	 * all cleanup jobs (xgmi terminate, ras terminate,
3018 	 * ring destroy, cmd/fence/fw buffers destory,
3019 	 * psp->cmd destory) are delayed to psp_hw_fini
3020 	 */
3021 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3022 	return ret;
3023 }
3024 
3025 static int psp_hw_init(struct amdgpu_ip_block *ip_block)
3026 {
3027 	int ret;
3028 	struct amdgpu_device *adev = ip_block->adev;
3029 
3030 	mutex_lock(&adev->firmware.mutex);
3031 
3032 	ret = amdgpu_ucode_init_bo(adev);
3033 	if (ret)
3034 		goto failed;
3035 
3036 	ret = psp_load_fw(adev);
3037 	if (ret) {
3038 		dev_err(adev->dev, "PSP firmware loading failed\n");
3039 		goto failed;
3040 	}
3041 
3042 	mutex_unlock(&adev->firmware.mutex);
3043 	return 0;
3044 
3045 failed:
3046 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
3047 	mutex_unlock(&adev->firmware.mutex);
3048 	return -EINVAL;
3049 }
3050 
3051 static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
3052 {
3053 	struct amdgpu_device *adev = ip_block->adev;
3054 	struct psp_context *psp = &adev->psp;
3055 
3056 	if (psp->ta_fw) {
3057 		psp_ras_terminate(psp);
3058 		psp_securedisplay_terminate(psp);
3059 		psp_rap_terminate(psp);
3060 		psp_dtm_terminate(psp);
3061 		psp_hdcp_terminate(psp);
3062 
3063 		if (adev->gmc.xgmi.num_physical_nodes > 1)
3064 			psp_xgmi_terminate(psp);
3065 	}
3066 
3067 	psp_asd_terminate(psp);
3068 	psp_tmr_terminate(psp);
3069 
3070 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3071 
3072 	return 0;
3073 }
3074 
3075 static int psp_suspend(struct amdgpu_ip_block *ip_block)
3076 {
3077 	int ret = 0;
3078 	struct amdgpu_device *adev = ip_block->adev;
3079 	struct psp_context *psp = &adev->psp;
3080 
3081 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3082 	    psp->xgmi_context.context.initialized) {
3083 		ret = psp_xgmi_terminate(psp);
3084 		if (ret) {
3085 			dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3086 			goto out;
3087 		}
3088 	}
3089 
3090 	if (psp->ta_fw) {
3091 		ret = psp_ras_terminate(psp);
3092 		if (ret) {
3093 			dev_err(adev->dev, "Failed to terminate ras ta\n");
3094 			goto out;
3095 		}
3096 		ret = psp_hdcp_terminate(psp);
3097 		if (ret) {
3098 			dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3099 			goto out;
3100 		}
3101 		ret = psp_dtm_terminate(psp);
3102 		if (ret) {
3103 			dev_err(adev->dev, "Failed to terminate dtm ta\n");
3104 			goto out;
3105 		}
3106 		ret = psp_rap_terminate(psp);
3107 		if (ret) {
3108 			dev_err(adev->dev, "Failed to terminate rap ta\n");
3109 			goto out;
3110 		}
3111 		ret = psp_securedisplay_terminate(psp);
3112 		if (ret) {
3113 			dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3114 			goto out;
3115 		}
3116 	}
3117 
3118 	ret = psp_asd_terminate(psp);
3119 	if (ret) {
3120 		dev_err(adev->dev, "Failed to terminate asd\n");
3121 		goto out;
3122 	}
3123 
3124 	ret = psp_tmr_terminate(psp);
3125 	if (ret) {
3126 		dev_err(adev->dev, "Failed to terminate tmr\n");
3127 		goto out;
3128 	}
3129 
3130 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3131 	if (ret)
3132 		dev_err(adev->dev, "PSP ring stop failed\n");
3133 
3134 out:
3135 	return ret;
3136 }
3137 
3138 static int psp_resume(struct amdgpu_ip_block *ip_block)
3139 {
3140 	int ret;
3141 	struct amdgpu_device *adev = ip_block->adev;
3142 	struct psp_context *psp = &adev->psp;
3143 
3144 	dev_info(adev->dev, "PSP is resuming...\n");
3145 
3146 	if (psp->mem_train_ctx.enable_mem_training) {
3147 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3148 		if (ret) {
3149 			dev_err(adev->dev, "Failed to process memory training!\n");
3150 			return ret;
3151 		}
3152 	}
3153 
3154 	mutex_lock(&adev->firmware.mutex);
3155 
3156 	ret = amdgpu_ucode_init_bo(adev);
3157 	if (ret)
3158 		goto failed;
3159 
3160 	ret = psp_hw_start(psp);
3161 	if (ret)
3162 		goto failed;
3163 
3164 	ret = psp_load_non_psp_fw(psp);
3165 	if (ret)
3166 		goto failed;
3167 
3168 	ret = psp_asd_initialize(psp);
3169 	if (ret) {
3170 		dev_err(adev->dev, "PSP load asd failed!\n");
3171 		goto failed;
3172 	}
3173 
3174 	ret = psp_rl_load(adev);
3175 	if (ret) {
3176 		dev_err(adev->dev, "PSP load RL failed!\n");
3177 		goto failed;
3178 	}
3179 
3180 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3181 		ret = psp_xgmi_initialize(psp, false, true);
3182 		/* Warning the XGMI seesion initialize failure
3183 		 * Instead of stop driver initialization
3184 		 */
3185 		if (ret)
3186 			dev_err(psp->adev->dev,
3187 				"XGMI: Failed to initialize XGMI session\n");
3188 	}
3189 
3190 	if (psp->ta_fw) {
3191 		ret = psp_ras_initialize(psp);
3192 		if (ret)
3193 			dev_err(psp->adev->dev,
3194 				"RAS: Failed to initialize RAS\n");
3195 
3196 		ret = psp_hdcp_initialize(psp);
3197 		if (ret)
3198 			dev_err(psp->adev->dev,
3199 				"HDCP: Failed to initialize HDCP\n");
3200 
3201 		ret = psp_dtm_initialize(psp);
3202 		if (ret)
3203 			dev_err(psp->adev->dev,
3204 				"DTM: Failed to initialize DTM\n");
3205 
3206 		ret = psp_rap_initialize(psp);
3207 		if (ret)
3208 			dev_err(psp->adev->dev,
3209 				"RAP: Failed to initialize RAP\n");
3210 
3211 		ret = psp_securedisplay_initialize(psp);
3212 		if (ret)
3213 			dev_err(psp->adev->dev,
3214 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3215 	}
3216 
3217 	mutex_unlock(&adev->firmware.mutex);
3218 
3219 	return 0;
3220 
3221 failed:
3222 	dev_err(adev->dev, "PSP resume failed\n");
3223 	mutex_unlock(&adev->firmware.mutex);
3224 	return ret;
3225 }
3226 
3227 int psp_gpu_reset(struct amdgpu_device *adev)
3228 {
3229 	int ret;
3230 
3231 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3232 		return 0;
3233 
3234 	mutex_lock(&adev->psp.mutex);
3235 	ret = psp_mode1_reset(&adev->psp);
3236 	mutex_unlock(&adev->psp.mutex);
3237 
3238 	return ret;
3239 }
3240 
3241 int psp_rlc_autoload_start(struct psp_context *psp)
3242 {
3243 	int ret;
3244 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3245 
3246 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3247 
3248 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
3249 				 psp->fence_buf_mc_addr);
3250 
3251 	release_psp_cmd_buf(psp);
3252 
3253 	return ret;
3254 }
3255 
3256 int psp_ring_cmd_submit(struct psp_context *psp,
3257 			uint64_t cmd_buf_mc_addr,
3258 			uint64_t fence_mc_addr,
3259 			int index)
3260 {
3261 	unsigned int psp_write_ptr_reg = 0;
3262 	struct psp_gfx_rb_frame *write_frame;
3263 	struct psp_ring *ring = &psp->km_ring;
3264 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3265 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3266 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3267 	struct amdgpu_device *adev = psp->adev;
3268 	uint32_t ring_size_dw = ring->ring_size / 4;
3269 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3270 
3271 	/* KM (GPCOM) prepare write pointer */
3272 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
3273 
3274 	/* Update KM RB frame pointer to new frame */
3275 	/* write_frame ptr increments by size of rb_frame in bytes */
3276 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3277 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
3278 		write_frame = ring_buffer_start;
3279 	else
3280 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3281 	/* Check invalid write_frame ptr address */
3282 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3283 		dev_err(adev->dev,
3284 			"ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3285 			ring_buffer_start, ring_buffer_end, write_frame);
3286 		dev_err(adev->dev,
3287 			"write_frame is pointing to address out of bounds\n");
3288 		return -EINVAL;
3289 	}
3290 
3291 	/* Initialize KM RB frame */
3292 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3293 
3294 	/* Update KM RB frame */
3295 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3296 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3297 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3298 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3299 	write_frame->fence_value = index;
3300 	amdgpu_device_flush_hdp(adev, NULL);
3301 
3302 	/* Update the write Pointer in DWORDs */
3303 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3304 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
3305 	return 0;
3306 }
3307 
3308 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3309 {
3310 	struct amdgpu_device *adev = psp->adev;
3311 	const struct psp_firmware_header_v1_0 *asd_hdr;
3312 	int err = 0;
3313 
3314 	err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
3315 				   "amdgpu/%s_asd.bin", chip_name);
3316 	if (err)
3317 		goto out;
3318 
3319 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3320 	adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3321 	adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3322 	adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3323 	adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3324 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3325 	return 0;
3326 out:
3327 	amdgpu_ucode_release(&adev->psp.asd_fw);
3328 	return err;
3329 }
3330 
3331 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3332 {
3333 	struct amdgpu_device *adev = psp->adev;
3334 	const struct psp_firmware_header_v1_0 *toc_hdr;
3335 	int err = 0;
3336 
3337 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
3338 				   "amdgpu/%s_toc.bin", chip_name);
3339 	if (err)
3340 		goto out;
3341 
3342 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3343 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3344 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3345 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3346 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3347 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3348 	return 0;
3349 out:
3350 	amdgpu_ucode_release(&adev->psp.toc_fw);
3351 	return err;
3352 }
3353 
3354 static int parse_sos_bin_descriptor(struct psp_context *psp,
3355 				   const struct psp_fw_bin_desc *desc,
3356 				   const struct psp_firmware_header_v2_0 *sos_hdr)
3357 {
3358 	uint8_t *ucode_start_addr  = NULL;
3359 
3360 	if (!psp || !desc || !sos_hdr)
3361 		return -EINVAL;
3362 
3363 	ucode_start_addr  = (uint8_t *)sos_hdr +
3364 			    le32_to_cpu(desc->offset_bytes) +
3365 			    le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3366 
3367 	switch (desc->fw_type) {
3368 	case PSP_FW_TYPE_PSP_SOS:
3369 		psp->sos.fw_version        = le32_to_cpu(desc->fw_version);
3370 		psp->sos.feature_version   = le32_to_cpu(desc->fw_version);
3371 		psp->sos.size_bytes        = le32_to_cpu(desc->size_bytes);
3372 		psp->sos.start_addr	   = ucode_start_addr;
3373 		break;
3374 	case PSP_FW_TYPE_PSP_SYS_DRV:
3375 		psp->sys.fw_version        = le32_to_cpu(desc->fw_version);
3376 		psp->sys.feature_version   = le32_to_cpu(desc->fw_version);
3377 		psp->sys.size_bytes        = le32_to_cpu(desc->size_bytes);
3378 		psp->sys.start_addr        = ucode_start_addr;
3379 		break;
3380 	case PSP_FW_TYPE_PSP_KDB:
3381 		psp->kdb.fw_version        = le32_to_cpu(desc->fw_version);
3382 		psp->kdb.feature_version   = le32_to_cpu(desc->fw_version);
3383 		psp->kdb.size_bytes        = le32_to_cpu(desc->size_bytes);
3384 		psp->kdb.start_addr        = ucode_start_addr;
3385 		break;
3386 	case PSP_FW_TYPE_PSP_TOC:
3387 		psp->toc.fw_version        = le32_to_cpu(desc->fw_version);
3388 		psp->toc.feature_version   = le32_to_cpu(desc->fw_version);
3389 		psp->toc.size_bytes        = le32_to_cpu(desc->size_bytes);
3390 		psp->toc.start_addr        = ucode_start_addr;
3391 		break;
3392 	case PSP_FW_TYPE_PSP_SPL:
3393 		psp->spl.fw_version        = le32_to_cpu(desc->fw_version);
3394 		psp->spl.feature_version   = le32_to_cpu(desc->fw_version);
3395 		psp->spl.size_bytes        = le32_to_cpu(desc->size_bytes);
3396 		psp->spl.start_addr        = ucode_start_addr;
3397 		break;
3398 	case PSP_FW_TYPE_PSP_RL:
3399 		psp->rl.fw_version         = le32_to_cpu(desc->fw_version);
3400 		psp->rl.feature_version    = le32_to_cpu(desc->fw_version);
3401 		psp->rl.size_bytes         = le32_to_cpu(desc->size_bytes);
3402 		psp->rl.start_addr         = ucode_start_addr;
3403 		break;
3404 	case PSP_FW_TYPE_PSP_SOC_DRV:
3405 		psp->soc_drv.fw_version         = le32_to_cpu(desc->fw_version);
3406 		psp->soc_drv.feature_version    = le32_to_cpu(desc->fw_version);
3407 		psp->soc_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3408 		psp->soc_drv.start_addr         = ucode_start_addr;
3409 		break;
3410 	case PSP_FW_TYPE_PSP_INTF_DRV:
3411 		psp->intf_drv.fw_version        = le32_to_cpu(desc->fw_version);
3412 		psp->intf_drv.feature_version   = le32_to_cpu(desc->fw_version);
3413 		psp->intf_drv.size_bytes        = le32_to_cpu(desc->size_bytes);
3414 		psp->intf_drv.start_addr        = ucode_start_addr;
3415 		break;
3416 	case PSP_FW_TYPE_PSP_DBG_DRV:
3417 		psp->dbg_drv.fw_version         = le32_to_cpu(desc->fw_version);
3418 		psp->dbg_drv.feature_version    = le32_to_cpu(desc->fw_version);
3419 		psp->dbg_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3420 		psp->dbg_drv.start_addr         = ucode_start_addr;
3421 		break;
3422 	case PSP_FW_TYPE_PSP_RAS_DRV:
3423 		psp->ras_drv.fw_version         = le32_to_cpu(desc->fw_version);
3424 		psp->ras_drv.feature_version    = le32_to_cpu(desc->fw_version);
3425 		psp->ras_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3426 		psp->ras_drv.start_addr         = ucode_start_addr;
3427 		break;
3428 	case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3429 		psp->ipkeymgr_drv.fw_version         = le32_to_cpu(desc->fw_version);
3430 		psp->ipkeymgr_drv.feature_version    = le32_to_cpu(desc->fw_version);
3431 		psp->ipkeymgr_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3432 		psp->ipkeymgr_drv.start_addr         = ucode_start_addr;
3433 		break;
3434 	case PSP_FW_TYPE_PSP_SPDM_DRV:
3435 		psp->spdm_drv.fw_version	= le32_to_cpu(desc->fw_version);
3436 		psp->spdm_drv.feature_version	= le32_to_cpu(desc->fw_version);
3437 		psp->spdm_drv.size_bytes	= le32_to_cpu(desc->size_bytes);
3438 		psp->spdm_drv.start_addr	= ucode_start_addr;
3439 		break;
3440 	default:
3441 		dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3442 		break;
3443 	}
3444 
3445 	return 0;
3446 }
3447 
3448 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3449 {
3450 	const struct psp_firmware_header_v1_0 *sos_hdr;
3451 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3452 	uint8_t *ucode_array_start_addr;
3453 
3454 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3455 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3456 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3457 
3458 	if (adev->gmc.xgmi.connected_to_cpu ||
3459 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3460 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3461 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3462 
3463 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3464 		adev->psp.sys.start_addr = ucode_array_start_addr;
3465 
3466 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3467 		adev->psp.sos.start_addr = ucode_array_start_addr +
3468 				le32_to_cpu(sos_hdr->sos.offset_bytes);
3469 	} else {
3470 		/* Load alternate PSP SOS FW */
3471 		sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3472 
3473 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3474 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3475 
3476 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3477 		adev->psp.sys.start_addr = ucode_array_start_addr +
3478 			le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3479 
3480 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3481 		adev->psp.sos.start_addr = ucode_array_start_addr +
3482 			le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3483 	}
3484 
3485 	if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3486 		dev_warn(adev->dev, "PSP SOS FW not available");
3487 		return -EINVAL;
3488 	}
3489 
3490 	return 0;
3491 }
3492 
3493 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3494 {
3495 	struct amdgpu_device *adev = psp->adev;
3496 	const struct psp_firmware_header_v1_0 *sos_hdr;
3497 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3498 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3499 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3500 	const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3501 	const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
3502 	int fw_index, fw_bin_count, start_index = 0;
3503 	const struct psp_fw_bin_desc *fw_bin;
3504 	uint8_t *ucode_array_start_addr;
3505 	int err = 0;
3506 
3507 	err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3508 				   "amdgpu/%s_sos.bin", chip_name);
3509 	if (err)
3510 		goto out;
3511 
3512 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3513 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3514 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3515 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3516 
3517 	switch (sos_hdr->header.header_version_major) {
3518 	case 1:
3519 		err = psp_init_sos_base_fw(adev);
3520 		if (err)
3521 			goto out;
3522 
3523 		if (sos_hdr->header.header_version_minor == 1) {
3524 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3525 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3526 			adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3527 					le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3528 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3529 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3530 					le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3531 		}
3532 		if (sos_hdr->header.header_version_minor == 2) {
3533 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3534 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3535 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3536 						    le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3537 		}
3538 		if (sos_hdr->header.header_version_minor == 3) {
3539 			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3540 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3541 			adev->psp.toc.start_addr = ucode_array_start_addr +
3542 				le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3543 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3544 			adev->psp.kdb.start_addr = ucode_array_start_addr +
3545 				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3546 			adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3547 			adev->psp.spl.start_addr = ucode_array_start_addr +
3548 				le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3549 			adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3550 			adev->psp.rl.start_addr = ucode_array_start_addr +
3551 				le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3552 		}
3553 		break;
3554 	case 2:
3555 		sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3556 
3557 		fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
3558 
3559 		if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
3560 			dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3561 			err = -EINVAL;
3562 			goto out;
3563 		}
3564 
3565 		if (sos_hdr_v2_0->header.header_version_minor == 1) {
3566 			sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
3567 
3568 			fw_bin = sos_hdr_v2_1->psp_fw_bin;
3569 
3570 			if (psp_is_aux_sos_load_required(psp))
3571 				start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3572 			else
3573 				fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3574 
3575 		} else {
3576 			fw_bin = sos_hdr_v2_0->psp_fw_bin;
3577 		}
3578 
3579 		for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
3580 			err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
3581 						       sos_hdr_v2_0);
3582 			if (err)
3583 				goto out;
3584 		}
3585 		break;
3586 	default:
3587 		dev_err(adev->dev,
3588 			"unsupported psp sos firmware\n");
3589 		err = -EINVAL;
3590 		goto out;
3591 	}
3592 
3593 	return 0;
3594 out:
3595 	amdgpu_ucode_release(&adev->psp.sos_fw);
3596 
3597 	return err;
3598 }
3599 
3600 static bool is_ta_fw_applicable(struct psp_context *psp,
3601 			     const struct psp_fw_bin_desc *desc)
3602 {
3603 	struct amdgpu_device *adev = psp->adev;
3604 	uint32_t fw_version;
3605 
3606 	switch (desc->fw_type) {
3607 	case TA_FW_TYPE_PSP_XGMI:
3608 	case TA_FW_TYPE_PSP_XGMI_AUX:
3609 		/* for now, AUX TA only exists on 13.0.6 ta bin,
3610 		 * from v20.00.0x.14
3611 		 */
3612 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3613 		    IP_VERSION(13, 0, 6)) {
3614 			fw_version = le32_to_cpu(desc->fw_version);
3615 
3616 			if (adev->flags & AMD_IS_APU &&
3617 			    (fw_version & 0xff) >= 0x14)
3618 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
3619 			else
3620 				return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
3621 		}
3622 		break;
3623 	default:
3624 		break;
3625 	}
3626 
3627 	return true;
3628 }
3629 
3630 static int parse_ta_bin_descriptor(struct psp_context *psp,
3631 				   const struct psp_fw_bin_desc *desc,
3632 				   const struct ta_firmware_header_v2_0 *ta_hdr)
3633 {
3634 	uint8_t *ucode_start_addr  = NULL;
3635 
3636 	if (!psp || !desc || !ta_hdr)
3637 		return -EINVAL;
3638 
3639 	if (!is_ta_fw_applicable(psp, desc))
3640 		return 0;
3641 
3642 	ucode_start_addr  = (uint8_t *)ta_hdr +
3643 			    le32_to_cpu(desc->offset_bytes) +
3644 			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3645 
3646 	switch (desc->fw_type) {
3647 	case TA_FW_TYPE_PSP_ASD:
3648 		psp->asd_context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3649 		psp->asd_context.bin_desc.feature_version   = le32_to_cpu(desc->fw_version);
3650 		psp->asd_context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3651 		psp->asd_context.bin_desc.start_addr        = ucode_start_addr;
3652 		break;
3653 	case TA_FW_TYPE_PSP_XGMI:
3654 	case TA_FW_TYPE_PSP_XGMI_AUX:
3655 		psp->xgmi_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3656 		psp->xgmi_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3657 		psp->xgmi_context.context.bin_desc.start_addr       = ucode_start_addr;
3658 		break;
3659 	case TA_FW_TYPE_PSP_RAS:
3660 		psp->ras_context.context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3661 		psp->ras_context.context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3662 		psp->ras_context.context.bin_desc.start_addr        = ucode_start_addr;
3663 		break;
3664 	case TA_FW_TYPE_PSP_HDCP:
3665 		psp->hdcp_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3666 		psp->hdcp_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3667 		psp->hdcp_context.context.bin_desc.start_addr       = ucode_start_addr;
3668 		break;
3669 	case TA_FW_TYPE_PSP_DTM:
3670 		psp->dtm_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3671 		psp->dtm_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3672 		psp->dtm_context.context.bin_desc.start_addr       = ucode_start_addr;
3673 		break;
3674 	case TA_FW_TYPE_PSP_RAP:
3675 		psp->rap_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3676 		psp->rap_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3677 		psp->rap_context.context.bin_desc.start_addr       = ucode_start_addr;
3678 		break;
3679 	case TA_FW_TYPE_PSP_SECUREDISPLAY:
3680 		psp->securedisplay_context.context.bin_desc.fw_version =
3681 			le32_to_cpu(desc->fw_version);
3682 		psp->securedisplay_context.context.bin_desc.size_bytes =
3683 			le32_to_cpu(desc->size_bytes);
3684 		psp->securedisplay_context.context.bin_desc.start_addr =
3685 			ucode_start_addr;
3686 		break;
3687 	default:
3688 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3689 		break;
3690 	}
3691 
3692 	return 0;
3693 }
3694 
3695 static int parse_ta_v1_microcode(struct psp_context *psp)
3696 {
3697 	const struct ta_firmware_header_v1_0 *ta_hdr;
3698 	struct amdgpu_device *adev = psp->adev;
3699 
3700 	ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3701 
3702 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3703 		return -EINVAL;
3704 
3705 	adev->psp.xgmi_context.context.bin_desc.fw_version =
3706 		le32_to_cpu(ta_hdr->xgmi.fw_version);
3707 	adev->psp.xgmi_context.context.bin_desc.size_bytes =
3708 		le32_to_cpu(ta_hdr->xgmi.size_bytes);
3709 	adev->psp.xgmi_context.context.bin_desc.start_addr =
3710 		(uint8_t *)ta_hdr +
3711 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3712 
3713 	adev->psp.ras_context.context.bin_desc.fw_version =
3714 		le32_to_cpu(ta_hdr->ras.fw_version);
3715 	adev->psp.ras_context.context.bin_desc.size_bytes =
3716 		le32_to_cpu(ta_hdr->ras.size_bytes);
3717 	adev->psp.ras_context.context.bin_desc.start_addr =
3718 		(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3719 		le32_to_cpu(ta_hdr->ras.offset_bytes);
3720 
3721 	adev->psp.hdcp_context.context.bin_desc.fw_version =
3722 		le32_to_cpu(ta_hdr->hdcp.fw_version);
3723 	adev->psp.hdcp_context.context.bin_desc.size_bytes =
3724 		le32_to_cpu(ta_hdr->hdcp.size_bytes);
3725 	adev->psp.hdcp_context.context.bin_desc.start_addr =
3726 		(uint8_t *)ta_hdr +
3727 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3728 
3729 	adev->psp.dtm_context.context.bin_desc.fw_version =
3730 		le32_to_cpu(ta_hdr->dtm.fw_version);
3731 	adev->psp.dtm_context.context.bin_desc.size_bytes =
3732 		le32_to_cpu(ta_hdr->dtm.size_bytes);
3733 	adev->psp.dtm_context.context.bin_desc.start_addr =
3734 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3735 		le32_to_cpu(ta_hdr->dtm.offset_bytes);
3736 
3737 	adev->psp.securedisplay_context.context.bin_desc.fw_version =
3738 		le32_to_cpu(ta_hdr->securedisplay.fw_version);
3739 	adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3740 		le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3741 	adev->psp.securedisplay_context.context.bin_desc.start_addr =
3742 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3743 		le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3744 
3745 	adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3746 
3747 	return 0;
3748 }
3749 
3750 static int parse_ta_v2_microcode(struct psp_context *psp)
3751 {
3752 	const struct ta_firmware_header_v2_0 *ta_hdr;
3753 	struct amdgpu_device *adev = psp->adev;
3754 	int err = 0;
3755 	int ta_index = 0;
3756 
3757 	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3758 
3759 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3760 		return -EINVAL;
3761 
3762 	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3763 		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3764 		return -EINVAL;
3765 	}
3766 
3767 	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3768 		err = parse_ta_bin_descriptor(psp,
3769 					      &ta_hdr->ta_fw_bin[ta_index],
3770 					      ta_hdr);
3771 		if (err)
3772 			return err;
3773 	}
3774 
3775 	return 0;
3776 }
3777 
3778 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3779 {
3780 	const struct common_firmware_header *hdr;
3781 	struct amdgpu_device *adev = psp->adev;
3782 	int err;
3783 
3784 	err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3785 				   "amdgpu/%s_ta.bin", chip_name);
3786 	if (err)
3787 		return err;
3788 
3789 	hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3790 	switch (le16_to_cpu(hdr->header_version_major)) {
3791 	case 1:
3792 		err = parse_ta_v1_microcode(psp);
3793 		break;
3794 	case 2:
3795 		err = parse_ta_v2_microcode(psp);
3796 		break;
3797 	default:
3798 		dev_err(adev->dev, "unsupported TA header version\n");
3799 		err = -EINVAL;
3800 	}
3801 
3802 	if (err)
3803 		amdgpu_ucode_release(&adev->psp.ta_fw);
3804 
3805 	return err;
3806 }
3807 
3808 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3809 {
3810 	struct amdgpu_device *adev = psp->adev;
3811 	const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3812 	struct amdgpu_firmware_info *info = NULL;
3813 	int err = 0;
3814 
3815 	if (!amdgpu_sriov_vf(adev)) {
3816 		dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3817 		return -EINVAL;
3818 	}
3819 
3820 	err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
3821 				   "amdgpu/%s_cap.bin", chip_name);
3822 	if (err) {
3823 		if (err == -ENODEV) {
3824 			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3825 			err = 0;
3826 		} else {
3827 			dev_err(adev->dev, "fail to initialize cap microcode\n");
3828 		}
3829 		goto out;
3830 	}
3831 
3832 	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3833 	info->ucode_id = AMDGPU_UCODE_ID_CAP;
3834 	info->fw = adev->psp.cap_fw;
3835 	cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3836 		adev->psp.cap_fw->data;
3837 	adev->firmware.fw_size += ALIGN(
3838 			le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3839 	adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3840 	adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3841 	adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3842 
3843 	return 0;
3844 
3845 out:
3846 	amdgpu_ucode_release(&adev->psp.cap_fw);
3847 	return err;
3848 }
3849 
3850 int psp_config_sq_perfmon(struct psp_context *psp,
3851 		uint32_t xcp_id, bool core_override_enable,
3852 		bool reg_override_enable, bool perfmon_override_enable)
3853 {
3854 	int ret;
3855 
3856 	if (amdgpu_sriov_vf(psp->adev))
3857 		return 0;
3858 
3859 	if (xcp_id > MAX_XCP) {
3860 		dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
3861 		return -EINVAL;
3862 	}
3863 
3864 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
3865 		dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
3866 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
3867 		return -EINVAL;
3868 	}
3869 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3870 
3871 	cmd->cmd_id	=	GFX_CMD_ID_CONFIG_SQ_PERFMON;
3872 	cmd->cmd.config_sq_perfmon.gfx_xcp_mask	=	BIT_MASK(xcp_id);
3873 	cmd->cmd.config_sq_perfmon.core_override	=	core_override_enable;
3874 	cmd->cmd.config_sq_perfmon.reg_override	=	reg_override_enable;
3875 	cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
3876 
3877 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
3878 	if (ret)
3879 		dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
3880 			xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
3881 
3882 	release_psp_cmd_buf(psp);
3883 	return ret;
3884 }
3885 
3886 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3887 					enum amd_clockgating_state state)
3888 {
3889 	return 0;
3890 }
3891 
3892 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
3893 				     enum amd_powergating_state state)
3894 {
3895 	return 0;
3896 }
3897 
3898 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3899 					 struct device_attribute *attr,
3900 					 char *buf)
3901 {
3902 	struct drm_device *ddev = dev_get_drvdata(dev);
3903 	struct amdgpu_device *adev = drm_to_adev(ddev);
3904 	struct amdgpu_ip_block *ip_block;
3905 	uint32_t fw_ver;
3906 	int ret;
3907 
3908 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
3909 	if (!ip_block || !ip_block->status.late_initialized) {
3910 		dev_info(adev->dev, "PSP block is not ready yet\n.");
3911 		return -EBUSY;
3912 	}
3913 
3914 	mutex_lock(&adev->psp.mutex);
3915 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3916 	mutex_unlock(&adev->psp.mutex);
3917 
3918 	if (ret) {
3919 		dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
3920 		return ret;
3921 	}
3922 
3923 	return sysfs_emit(buf, "%x\n", fw_ver);
3924 }
3925 
3926 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3927 						       struct device_attribute *attr,
3928 						       const char *buf,
3929 						       size_t count)
3930 {
3931 	struct drm_device *ddev = dev_get_drvdata(dev);
3932 	struct amdgpu_device *adev = drm_to_adev(ddev);
3933 	int ret, idx;
3934 	const struct firmware *usbc_pd_fw;
3935 	struct amdgpu_bo *fw_buf_bo = NULL;
3936 	uint64_t fw_pri_mc_addr;
3937 	void *fw_pri_cpu_addr;
3938 	struct amdgpu_ip_block *ip_block;
3939 
3940 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
3941 	if (!ip_block || !ip_block->status.late_initialized) {
3942 		dev_err(adev->dev, "PSP block is not ready yet.");
3943 		return -EBUSY;
3944 	}
3945 
3946 	if (!drm_dev_enter(ddev, &idx))
3947 		return -ENODEV;
3948 
3949 	ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
3950 				   "amdgpu/%s", buf);
3951 	if (ret)
3952 		goto fail;
3953 
3954 	/* LFB address which is aligned to 1MB boundary per PSP request */
3955 	ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3956 				      AMDGPU_GEM_DOMAIN_VRAM |
3957 				      AMDGPU_GEM_DOMAIN_GTT,
3958 				      &fw_buf_bo, &fw_pri_mc_addr,
3959 				      &fw_pri_cpu_addr);
3960 	if (ret)
3961 		goto rel_buf;
3962 
3963 	memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3964 
3965 	mutex_lock(&adev->psp.mutex);
3966 	ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3967 	mutex_unlock(&adev->psp.mutex);
3968 
3969 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3970 
3971 rel_buf:
3972 	amdgpu_ucode_release(&usbc_pd_fw);
3973 fail:
3974 	if (ret) {
3975 		dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
3976 		count = ret;
3977 	}
3978 
3979 	drm_dev_exit(idx);
3980 	return count;
3981 }
3982 
3983 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3984 {
3985 	int idx;
3986 
3987 	if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
3988 		return;
3989 
3990 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3991 	memcpy(psp->fw_pri_buf, start_addr, bin_size);
3992 
3993 	drm_dev_exit(idx);
3994 }
3995 
3996 /**
3997  * DOC: usbc_pd_fw
3998  * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3999  * this file will trigger the update process.
4000  */
4001 static DEVICE_ATTR(usbc_pd_fw, 0644,
4002 		   psp_usbc_pd_fw_sysfs_read,
4003 		   psp_usbc_pd_fw_sysfs_write);
4004 
4005 int is_psp_fw_valid(struct psp_bin_desc bin)
4006 {
4007 	return bin.size_bytes;
4008 }
4009 
4010 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
4011 					struct bin_attribute *bin_attr,
4012 					char *buffer, loff_t pos, size_t count)
4013 {
4014 	struct device *dev = kobj_to_dev(kobj);
4015 	struct drm_device *ddev = dev_get_drvdata(dev);
4016 	struct amdgpu_device *adev = drm_to_adev(ddev);
4017 
4018 	adev->psp.vbflash_done = false;
4019 
4020 	/* Safeguard against memory drain */
4021 	if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
4022 		dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
4023 		kvfree(adev->psp.vbflash_tmp_buf);
4024 		adev->psp.vbflash_tmp_buf = NULL;
4025 		adev->psp.vbflash_image_size = 0;
4026 		return -ENOMEM;
4027 	}
4028 
4029 	/* TODO Just allocate max for now and optimize to realloc later if needed */
4030 	if (!adev->psp.vbflash_tmp_buf) {
4031 		adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
4032 		if (!adev->psp.vbflash_tmp_buf)
4033 			return -ENOMEM;
4034 	}
4035 
4036 	mutex_lock(&adev->psp.mutex);
4037 	memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
4038 	adev->psp.vbflash_image_size += count;
4039 	mutex_unlock(&adev->psp.mutex);
4040 
4041 	dev_dbg(adev->dev, "IFWI staged for update\n");
4042 
4043 	return count;
4044 }
4045 
4046 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
4047 				       struct bin_attribute *bin_attr, char *buffer,
4048 				       loff_t pos, size_t count)
4049 {
4050 	struct device *dev = kobj_to_dev(kobj);
4051 	struct drm_device *ddev = dev_get_drvdata(dev);
4052 	struct amdgpu_device *adev = drm_to_adev(ddev);
4053 	struct amdgpu_bo *fw_buf_bo = NULL;
4054 	uint64_t fw_pri_mc_addr;
4055 	void *fw_pri_cpu_addr;
4056 	int ret;
4057 
4058 	if (adev->psp.vbflash_image_size == 0)
4059 		return -EINVAL;
4060 
4061 	dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
4062 
4063 	ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
4064 					AMDGPU_GPU_PAGE_SIZE,
4065 					AMDGPU_GEM_DOMAIN_VRAM,
4066 					&fw_buf_bo,
4067 					&fw_pri_mc_addr,
4068 					&fw_pri_cpu_addr);
4069 	if (ret)
4070 		goto rel_buf;
4071 
4072 	memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
4073 
4074 	mutex_lock(&adev->psp.mutex);
4075 	ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
4076 	mutex_unlock(&adev->psp.mutex);
4077 
4078 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4079 
4080 rel_buf:
4081 	kvfree(adev->psp.vbflash_tmp_buf);
4082 	adev->psp.vbflash_tmp_buf = NULL;
4083 	adev->psp.vbflash_image_size = 0;
4084 
4085 	if (ret) {
4086 		dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
4087 		return ret;
4088 	}
4089 
4090 	dev_dbg(adev->dev, "PSP IFWI flash process done\n");
4091 	return 0;
4092 }
4093 
4094 /**
4095  * DOC: psp_vbflash
4096  * Writing to this file will stage an IFWI for update. Reading from this file
4097  * will trigger the update process.
4098  */
4099 static struct bin_attribute psp_vbflash_bin_attr = {
4100 	.attr = {.name = "psp_vbflash", .mode = 0660},
4101 	.size = 0,
4102 	.write = amdgpu_psp_vbflash_write,
4103 	.read = amdgpu_psp_vbflash_read,
4104 };
4105 
4106 /**
4107  * DOC: psp_vbflash_status
4108  * The status of the flash process.
4109  * 0: IFWI flash not complete.
4110  * 1: IFWI flash complete.
4111  */
4112 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
4113 					 struct device_attribute *attr,
4114 					 char *buf)
4115 {
4116 	struct drm_device *ddev = dev_get_drvdata(dev);
4117 	struct amdgpu_device *adev = drm_to_adev(ddev);
4118 	uint32_t vbflash_status;
4119 
4120 	vbflash_status = psp_vbflash_status(&adev->psp);
4121 	if (!adev->psp.vbflash_done)
4122 		vbflash_status = 0;
4123 	else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
4124 		vbflash_status = 1;
4125 
4126 	return sysfs_emit(buf, "0x%x\n", vbflash_status);
4127 }
4128 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
4129 
4130 static struct bin_attribute *bin_flash_attrs[] = {
4131 	&psp_vbflash_bin_attr,
4132 	NULL
4133 };
4134 
4135 static struct attribute *flash_attrs[] = {
4136 	&dev_attr_psp_vbflash_status.attr,
4137 	&dev_attr_usbc_pd_fw.attr,
4138 	NULL
4139 };
4140 
4141 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
4142 {
4143 	struct device *dev = kobj_to_dev(kobj);
4144 	struct drm_device *ddev = dev_get_drvdata(dev);
4145 	struct amdgpu_device *adev = drm_to_adev(ddev);
4146 
4147 	if (attr == &dev_attr_usbc_pd_fw.attr)
4148 		return adev->psp.sup_pd_fw_up ? 0660 : 0;
4149 
4150 	return adev->psp.sup_ifwi_up ? 0440 : 0;
4151 }
4152 
4153 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
4154 						const struct bin_attribute *attr,
4155 						int idx)
4156 {
4157 	struct device *dev = kobj_to_dev(kobj);
4158 	struct drm_device *ddev = dev_get_drvdata(dev);
4159 	struct amdgpu_device *adev = drm_to_adev(ddev);
4160 
4161 	return adev->psp.sup_ifwi_up ? 0660 : 0;
4162 }
4163 
4164 const struct attribute_group amdgpu_flash_attr_group = {
4165 	.attrs = flash_attrs,
4166 	.bin_attrs = bin_flash_attrs,
4167 	.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4168 	.is_visible = amdgpu_flash_attr_is_visible,
4169 };
4170 
4171 const struct amd_ip_funcs psp_ip_funcs = {
4172 	.name = "psp",
4173 	.early_init = psp_early_init,
4174 	.sw_init = psp_sw_init,
4175 	.sw_fini = psp_sw_fini,
4176 	.hw_init = psp_hw_init,
4177 	.hw_fini = psp_hw_fini,
4178 	.suspend = psp_suspend,
4179 	.resume = psp_resume,
4180 	.set_clockgating_state = psp_set_clockgating_state,
4181 	.set_powergating_state = psp_set_powergating_state,
4182 };
4183 
4184 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4185 	.type = AMD_IP_BLOCK_TYPE_PSP,
4186 	.major = 3,
4187 	.minor = 1,
4188 	.rev = 0,
4189 	.funcs = &psp_ip_funcs,
4190 };
4191 
4192 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4193 	.type = AMD_IP_BLOCK_TYPE_PSP,
4194 	.major = 10,
4195 	.minor = 0,
4196 	.rev = 0,
4197 	.funcs = &psp_ip_funcs,
4198 };
4199 
4200 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4201 	.type = AMD_IP_BLOCK_TYPE_PSP,
4202 	.major = 11,
4203 	.minor = 0,
4204 	.rev = 0,
4205 	.funcs = &psp_ip_funcs,
4206 };
4207 
4208 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4209 	.type = AMD_IP_BLOCK_TYPE_PSP,
4210 	.major = 11,
4211 	.minor = 0,
4212 	.rev = 8,
4213 	.funcs = &psp_ip_funcs,
4214 };
4215 
4216 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4217 	.type = AMD_IP_BLOCK_TYPE_PSP,
4218 	.major = 12,
4219 	.minor = 0,
4220 	.rev = 0,
4221 	.funcs = &psp_ip_funcs,
4222 };
4223 
4224 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4225 	.type = AMD_IP_BLOCK_TYPE_PSP,
4226 	.major = 13,
4227 	.minor = 0,
4228 	.rev = 0,
4229 	.funcs = &psp_ip_funcs,
4230 };
4231 
4232 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4233 	.type = AMD_IP_BLOCK_TYPE_PSP,
4234 	.major = 13,
4235 	.minor = 0,
4236 	.rev = 4,
4237 	.funcs = &psp_ip_funcs,
4238 };
4239 
4240 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4241 	.type = AMD_IP_BLOCK_TYPE_PSP,
4242 	.major = 14,
4243 	.minor = 0,
4244 	.rev = 0,
4245 	.funcs = &psp_ip_funcs,
4246 };
4247