xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c (revision b5e161e42e0af7b55d4627aa68922765db2d9367)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 
42 #include "amdgpu_ras.h"
43 #include "amdgpu_securedisplay.h"
44 #include "amdgpu_atomfirmware.h"
45 
46 #define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*3)
47 
48 static int psp_load_smu_fw(struct psp_context *psp);
49 static int psp_rap_terminate(struct psp_context *psp);
50 static int psp_securedisplay_terminate(struct psp_context *psp);
51 
52 static int psp_ring_init(struct psp_context *psp,
53 			 enum psp_ring_type ring_type)
54 {
55 	int ret = 0;
56 	struct psp_ring *ring;
57 	struct amdgpu_device *adev = psp->adev;
58 
59 	ring = &psp->km_ring;
60 
61 	ring->ring_type = ring_type;
62 
63 	/* allocate 4k Page of Local Frame Buffer memory for ring */
64 	ring->ring_size = 0x1000;
65 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
66 				      AMDGPU_GEM_DOMAIN_VRAM |
67 				      AMDGPU_GEM_DOMAIN_GTT,
68 				      &adev->firmware.rbuf,
69 				      &ring->ring_mem_mc_addr,
70 				      (void **)&ring->ring_mem);
71 	if (ret) {
72 		ring->ring_size = 0;
73 		return ret;
74 	}
75 
76 	return 0;
77 }
78 
79 /*
80  * Due to DF Cstate management centralized to PMFW, the firmware
81  * loading sequence will be updated as below:
82  *   - Load KDB
83  *   - Load SYS_DRV
84  *   - Load tOS
85  *   - Load PMFW
86  *   - Setup TMR
87  *   - Load other non-psp fw
88  *   - Load ASD
89  *   - Load XGMI/RAS/HDCP/DTM TA if any
90  *
91  * This new sequence is required for
92  *   - Arcturus and onwards
93  */
94 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
95 {
96 	struct amdgpu_device *adev = psp->adev;
97 
98 	if (amdgpu_sriov_vf(adev)) {
99 		psp->pmfw_centralized_cstate_management = false;
100 		return;
101 	}
102 
103 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
104 	case IP_VERSION(11, 0, 0):
105 	case IP_VERSION(11, 0, 4):
106 	case IP_VERSION(11, 0, 5):
107 	case IP_VERSION(11, 0, 7):
108 	case IP_VERSION(11, 0, 9):
109 	case IP_VERSION(11, 0, 11):
110 	case IP_VERSION(11, 0, 12):
111 	case IP_VERSION(11, 0, 13):
112 	case IP_VERSION(13, 0, 0):
113 	case IP_VERSION(13, 0, 2):
114 	case IP_VERSION(13, 0, 7):
115 		psp->pmfw_centralized_cstate_management = true;
116 		break;
117 	default:
118 		psp->pmfw_centralized_cstate_management = false;
119 		break;
120 	}
121 }
122 
123 static int psp_init_sriov_microcode(struct psp_context *psp)
124 {
125 	struct amdgpu_device *adev = psp->adev;
126 	char ucode_prefix[30];
127 	int ret = 0;
128 
129 	amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
130 
131 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
132 	case IP_VERSION(9, 0, 0):
133 	case IP_VERSION(11, 0, 7):
134 	case IP_VERSION(11, 0, 9):
135 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
136 		ret = psp_init_cap_microcode(psp, ucode_prefix);
137 		break;
138 	case IP_VERSION(13, 0, 2):
139 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
140 		ret = psp_init_cap_microcode(psp, ucode_prefix);
141 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
142 		break;
143 	case IP_VERSION(13, 0, 0):
144 		adev->virt.autoload_ucode_id = 0;
145 		break;
146 	case IP_VERSION(13, 0, 6):
147 		ret = psp_init_cap_microcode(psp, ucode_prefix);
148 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
149 		break;
150 	case IP_VERSION(13, 0, 10):
151 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
152 		ret = psp_init_cap_microcode(psp, ucode_prefix);
153 		break;
154 	default:
155 		return -EINVAL;
156 	}
157 	return ret;
158 }
159 
160 static int psp_early_init(void *handle)
161 {
162 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
163 	struct psp_context *psp = &adev->psp;
164 
165 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
166 	case IP_VERSION(9, 0, 0):
167 		psp_v3_1_set_psp_funcs(psp);
168 		psp->autoload_supported = false;
169 		break;
170 	case IP_VERSION(10, 0, 0):
171 	case IP_VERSION(10, 0, 1):
172 		psp_v10_0_set_psp_funcs(psp);
173 		psp->autoload_supported = false;
174 		break;
175 	case IP_VERSION(11, 0, 2):
176 	case IP_VERSION(11, 0, 4):
177 		psp_v11_0_set_psp_funcs(psp);
178 		psp->autoload_supported = false;
179 		break;
180 	case IP_VERSION(11, 0, 0):
181 	case IP_VERSION(11, 0, 7):
182 		adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
183 		fallthrough;
184 	case IP_VERSION(11, 0, 5):
185 	case IP_VERSION(11, 0, 9):
186 	case IP_VERSION(11, 0, 11):
187 	case IP_VERSION(11, 5, 0):
188 	case IP_VERSION(11, 0, 12):
189 	case IP_VERSION(11, 0, 13):
190 		psp_v11_0_set_psp_funcs(psp);
191 		psp->autoload_supported = true;
192 		break;
193 	case IP_VERSION(11, 0, 3):
194 	case IP_VERSION(12, 0, 1):
195 		psp_v12_0_set_psp_funcs(psp);
196 		break;
197 	case IP_VERSION(13, 0, 2):
198 	case IP_VERSION(13, 0, 6):
199 		psp_v13_0_set_psp_funcs(psp);
200 		break;
201 	case IP_VERSION(13, 0, 1):
202 	case IP_VERSION(13, 0, 3):
203 	case IP_VERSION(13, 0, 5):
204 	case IP_VERSION(13, 0, 8):
205 	case IP_VERSION(13, 0, 11):
206 	case IP_VERSION(14, 0, 0):
207 		psp_v13_0_set_psp_funcs(psp);
208 		psp->autoload_supported = true;
209 		break;
210 	case IP_VERSION(11, 0, 8):
211 		if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
212 			psp_v11_0_8_set_psp_funcs(psp);
213 			psp->autoload_supported = false;
214 		}
215 		break;
216 	case IP_VERSION(13, 0, 0):
217 	case IP_VERSION(13, 0, 7):
218 	case IP_VERSION(13, 0, 10):
219 		psp_v13_0_set_psp_funcs(psp);
220 		psp->autoload_supported = true;
221 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
222 		break;
223 	case IP_VERSION(13, 0, 4):
224 		psp_v13_0_4_set_psp_funcs(psp);
225 		psp->autoload_supported = true;
226 		break;
227 	default:
228 		return -EINVAL;
229 	}
230 
231 	psp->adev = adev;
232 
233 	psp_check_pmfw_centralized_cstate_management(psp);
234 
235 	if (amdgpu_sriov_vf(adev))
236 		return psp_init_sriov_microcode(psp);
237 	else
238 		return psp_init_microcode(psp);
239 }
240 
241 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
242 {
243 	amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
244 			      &mem_ctx->shared_buf);
245 	mem_ctx->shared_bo = NULL;
246 }
247 
248 static void psp_free_shared_bufs(struct psp_context *psp)
249 {
250 	void *tmr_buf;
251 	void **pptr;
252 
253 	/* free TMR memory buffer */
254 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
255 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
256 	psp->tmr_bo = NULL;
257 
258 	/* free xgmi shared memory */
259 	psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
260 
261 	/* free ras shared memory */
262 	psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
263 
264 	/* free hdcp shared memory */
265 	psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
266 
267 	/* free dtm shared memory */
268 	psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
269 
270 	/* free rap shared memory */
271 	psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
272 
273 	/* free securedisplay shared memory */
274 	psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
275 
276 
277 }
278 
279 static void psp_memory_training_fini(struct psp_context *psp)
280 {
281 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
282 
283 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
284 	kfree(ctx->sys_cache);
285 	ctx->sys_cache = NULL;
286 }
287 
288 static int psp_memory_training_init(struct psp_context *psp)
289 {
290 	int ret;
291 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
292 
293 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
294 		dev_dbg(psp->adev->dev, "memory training is not supported!\n");
295 		return 0;
296 	}
297 
298 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
299 	if (ctx->sys_cache == NULL) {
300 		dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
301 		ret = -ENOMEM;
302 		goto Err_out;
303 	}
304 
305 	dev_dbg(psp->adev->dev,
306 		"train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
307 		ctx->train_data_size,
308 		ctx->p2c_train_data_offset,
309 		ctx->c2p_train_data_offset);
310 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
311 	return 0;
312 
313 Err_out:
314 	psp_memory_training_fini(psp);
315 	return ret;
316 }
317 
318 /*
319  * Helper funciton to query psp runtime database entry
320  *
321  * @adev: amdgpu_device pointer
322  * @entry_type: the type of psp runtime database entry
323  * @db_entry: runtime database entry pointer
324  *
325  * Return false if runtime database doesn't exit or entry is invalid
326  * or true if the specific database entry is found, and copy to @db_entry
327  */
328 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
329 				     enum psp_runtime_entry_type entry_type,
330 				     void *db_entry)
331 {
332 	uint64_t db_header_pos, db_dir_pos;
333 	struct psp_runtime_data_header db_header = {0};
334 	struct psp_runtime_data_directory db_dir = {0};
335 	bool ret = false;
336 	int i;
337 
338 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6))
339 		return false;
340 
341 	db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
342 	db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
343 
344 	/* read runtime db header from vram */
345 	amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
346 			sizeof(struct psp_runtime_data_header), false);
347 
348 	if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
349 		/* runtime db doesn't exist, exit */
350 		dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
351 		return false;
352 	}
353 
354 	/* read runtime database entry from vram */
355 	amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
356 			sizeof(struct psp_runtime_data_directory), false);
357 
358 	if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
359 		/* invalid db entry count, exit */
360 		dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
361 		return false;
362 	}
363 
364 	/* look up for requested entry type */
365 	for (i = 0; i < db_dir.entry_count && !ret; i++) {
366 		if (db_dir.entry_list[i].entry_type == entry_type) {
367 			switch (entry_type) {
368 			case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
369 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
370 					/* invalid db entry size */
371 					dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
372 					return false;
373 				}
374 				/* read runtime database entry */
375 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
376 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
377 				ret = true;
378 				break;
379 			case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
380 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
381 					/* invalid db entry size */
382 					dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
383 					return false;
384 				}
385 				/* read runtime database entry */
386 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
387 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
388 				ret = true;
389 				break;
390 			default:
391 				ret = false;
392 				break;
393 			}
394 		}
395 	}
396 
397 	return ret;
398 }
399 
400 static int psp_sw_init(void *handle)
401 {
402 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
403 	struct psp_context *psp = &adev->psp;
404 	int ret;
405 	struct psp_runtime_boot_cfg_entry boot_cfg_entry;
406 	struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
407 	struct psp_runtime_scpm_entry scpm_entry;
408 
409 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
410 	if (!psp->cmd) {
411 		dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
412 		ret = -ENOMEM;
413 	}
414 
415 	adev->psp.xgmi_context.supports_extended_data =
416 		!adev->gmc.xgmi.connected_to_cpu &&
417 		amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
418 
419 	memset(&scpm_entry, 0, sizeof(scpm_entry));
420 	if ((psp_get_runtime_db_entry(adev,
421 				PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
422 				&scpm_entry)) &&
423 	    (scpm_entry.scpm_status != SCPM_DISABLE)) {
424 		adev->scpm_enabled = true;
425 		adev->scpm_status = scpm_entry.scpm_status;
426 	} else {
427 		adev->scpm_enabled = false;
428 		adev->scpm_status = SCPM_DISABLE;
429 	}
430 
431 	/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
432 
433 	memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
434 	if (psp_get_runtime_db_entry(adev,
435 				PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
436 				&boot_cfg_entry)) {
437 		psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
438 		if ((psp->boot_cfg_bitmask) &
439 		    BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
440 			/* If psp runtime database exists, then
441 			 * only enable two stage memory training
442 			 * when TWO_STAGE_DRAM_TRAINING bit is set
443 			 * in runtime database
444 			 */
445 			mem_training_ctx->enable_mem_training = true;
446 		}
447 
448 	} else {
449 		/* If psp runtime database doesn't exist or is
450 		 * invalid, force enable two stage memory training
451 		 */
452 		mem_training_ctx->enable_mem_training = true;
453 	}
454 
455 	if (mem_training_ctx->enable_mem_training) {
456 		ret = psp_memory_training_init(psp);
457 		if (ret) {
458 			dev_err(adev->dev, "Failed to initialize memory training!\n");
459 			return ret;
460 		}
461 
462 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
463 		if (ret) {
464 			dev_err(adev->dev, "Failed to process memory training!\n");
465 			return ret;
466 		}
467 	}
468 
469 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
470 				      (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
471 				      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
472 				      &psp->fw_pri_bo,
473 				      &psp->fw_pri_mc_addr,
474 				      &psp->fw_pri_buf);
475 	if (ret)
476 		return ret;
477 
478 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
479 				      AMDGPU_GEM_DOMAIN_VRAM |
480 				      AMDGPU_GEM_DOMAIN_GTT,
481 				      &psp->fence_buf_bo,
482 				      &psp->fence_buf_mc_addr,
483 				      &psp->fence_buf);
484 	if (ret)
485 		goto failed1;
486 
487 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
488 				      AMDGPU_GEM_DOMAIN_VRAM |
489 				      AMDGPU_GEM_DOMAIN_GTT,
490 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
491 				      (void **)&psp->cmd_buf_mem);
492 	if (ret)
493 		goto failed2;
494 
495 	return 0;
496 
497 failed2:
498 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
499 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
500 failed1:
501 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
502 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
503 	return ret;
504 }
505 
506 static int psp_sw_fini(void *handle)
507 {
508 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
509 	struct psp_context *psp = &adev->psp;
510 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
511 
512 	psp_memory_training_fini(psp);
513 
514 	amdgpu_ucode_release(&psp->sos_fw);
515 	amdgpu_ucode_release(&psp->asd_fw);
516 	amdgpu_ucode_release(&psp->ta_fw);
517 	amdgpu_ucode_release(&psp->cap_fw);
518 	amdgpu_ucode_release(&psp->toc_fw);
519 
520 	kfree(cmd);
521 	cmd = NULL;
522 
523 	psp_free_shared_bufs(psp);
524 
525 	if (psp->km_ring.ring_mem)
526 		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
527 				      &psp->km_ring.ring_mem_mc_addr,
528 				      (void **)&psp->km_ring.ring_mem);
529 
530 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
531 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
532 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
533 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
534 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
535 			      (void **)&psp->cmd_buf_mem);
536 
537 	return 0;
538 }
539 
540 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
541 		 uint32_t reg_val, uint32_t mask, bool check_changed)
542 {
543 	uint32_t val;
544 	int i;
545 	struct amdgpu_device *adev = psp->adev;
546 
547 	if (psp->adev->no_hw_access)
548 		return 0;
549 
550 	for (i = 0; i < adev->usec_timeout; i++) {
551 		val = RREG32(reg_index);
552 		if (check_changed) {
553 			if (val != reg_val)
554 				return 0;
555 		} else {
556 			if ((val & mask) == reg_val)
557 				return 0;
558 		}
559 		udelay(1);
560 	}
561 
562 	return -ETIME;
563 }
564 
565 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
566 			       uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
567 {
568 	uint32_t val;
569 	int i;
570 	struct amdgpu_device *adev = psp->adev;
571 
572 	if (psp->adev->no_hw_access)
573 		return 0;
574 
575 	for (i = 0; i < msec_timeout; i++) {
576 		val = RREG32(reg_index);
577 		if ((val & mask) == reg_val)
578 			return 0;
579 		msleep(1);
580 	}
581 
582 	return -ETIME;
583 }
584 
585 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
586 {
587 	switch (cmd_id) {
588 	case GFX_CMD_ID_LOAD_TA:
589 		return "LOAD_TA";
590 	case GFX_CMD_ID_UNLOAD_TA:
591 		return "UNLOAD_TA";
592 	case GFX_CMD_ID_INVOKE_CMD:
593 		return "INVOKE_CMD";
594 	case GFX_CMD_ID_LOAD_ASD:
595 		return "LOAD_ASD";
596 	case GFX_CMD_ID_SETUP_TMR:
597 		return "SETUP_TMR";
598 	case GFX_CMD_ID_LOAD_IP_FW:
599 		return "LOAD_IP_FW";
600 	case GFX_CMD_ID_DESTROY_TMR:
601 		return "DESTROY_TMR";
602 	case GFX_CMD_ID_SAVE_RESTORE:
603 		return "SAVE_RESTORE_IP_FW";
604 	case GFX_CMD_ID_SETUP_VMR:
605 		return "SETUP_VMR";
606 	case GFX_CMD_ID_DESTROY_VMR:
607 		return "DESTROY_VMR";
608 	case GFX_CMD_ID_PROG_REG:
609 		return "PROG_REG";
610 	case GFX_CMD_ID_GET_FW_ATTESTATION:
611 		return "GET_FW_ATTESTATION";
612 	case GFX_CMD_ID_LOAD_TOC:
613 		return "ID_LOAD_TOC";
614 	case GFX_CMD_ID_AUTOLOAD_RLC:
615 		return "AUTOLOAD_RLC";
616 	case GFX_CMD_ID_BOOT_CFG:
617 		return "BOOT_CFG";
618 	default:
619 		return "UNKNOWN CMD";
620 	}
621 }
622 
623 static int
624 psp_cmd_submit_buf(struct psp_context *psp,
625 		   struct amdgpu_firmware_info *ucode,
626 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
627 {
628 	int ret;
629 	int index;
630 	int timeout = 20000;
631 	bool ras_intr = false;
632 	bool skip_unsupport = false;
633 
634 	if (psp->adev->no_hw_access)
635 		return 0;
636 
637 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
638 
639 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
640 
641 	index = atomic_inc_return(&psp->fence_value);
642 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
643 	if (ret) {
644 		atomic_dec(&psp->fence_value);
645 		goto exit;
646 	}
647 
648 	amdgpu_device_invalidate_hdp(psp->adev, NULL);
649 	while (*((unsigned int *)psp->fence_buf) != index) {
650 		if (--timeout == 0)
651 			break;
652 		/*
653 		 * Shouldn't wait for timeout when err_event_athub occurs,
654 		 * because gpu reset thread triggered and lock resource should
655 		 * be released for psp resume sequence.
656 		 */
657 		ras_intr = amdgpu_ras_intr_triggered();
658 		if (ras_intr)
659 			break;
660 		usleep_range(10, 100);
661 		amdgpu_device_invalidate_hdp(psp->adev, NULL);
662 	}
663 
664 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
665 	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
666 		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
667 
668 	memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
669 
670 	/* In some cases, psp response status is not 0 even there is no
671 	 * problem while the command is submitted. Some version of PSP FW
672 	 * doesn't write 0 to that field.
673 	 * So here we would like to only print a warning instead of an error
674 	 * during psp initialization to avoid breaking hw_init and it doesn't
675 	 * return -EINVAL.
676 	 */
677 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
678 		if (ucode)
679 			dev_warn(psp->adev->dev,
680 				 "failed to load ucode %s(0x%X) ",
681 				 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
682 		dev_warn(psp->adev->dev,
683 			 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
684 			 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
685 			 psp->cmd_buf_mem->resp.status);
686 		/* If any firmware (including CAP) load fails under SRIOV, it should
687 		 * return failure to stop the VF from initializing.
688 		 * Also return failure in case of timeout
689 		 */
690 		if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
691 			ret = -EINVAL;
692 			goto exit;
693 		}
694 	}
695 
696 	if (ucode) {
697 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
698 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
699 	}
700 
701 exit:
702 	return ret;
703 }
704 
705 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
706 {
707 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
708 
709 	mutex_lock(&psp->mutex);
710 
711 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
712 
713 	return cmd;
714 }
715 
716 static void release_psp_cmd_buf(struct psp_context *psp)
717 {
718 	mutex_unlock(&psp->mutex);
719 }
720 
721 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
722 				 struct psp_gfx_cmd_resp *cmd,
723 				 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
724 {
725 	struct amdgpu_device *adev = psp->adev;
726 	uint32_t size = 0;
727 	uint64_t tmr_pa = 0;
728 
729 	if (tmr_bo) {
730 		size = amdgpu_bo_size(tmr_bo);
731 		tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
732 	}
733 
734 	if (amdgpu_sriov_vf(psp->adev))
735 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
736 	else
737 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
738 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
739 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
740 	cmd->cmd.cmd_setup_tmr.buf_size = size;
741 	cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
742 	cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
743 	cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
744 }
745 
746 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
747 				      uint64_t pri_buf_mc, uint32_t size)
748 {
749 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
750 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
751 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
752 	cmd->cmd.cmd_load_toc.toc_size = size;
753 }
754 
755 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
756 static int psp_load_toc(struct psp_context *psp,
757 			uint32_t *tmr_size)
758 {
759 	int ret;
760 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
761 
762 	/* Copy toc to psp firmware private buffer */
763 	psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
764 
765 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
766 
767 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
768 				 psp->fence_buf_mc_addr);
769 	if (!ret)
770 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
771 
772 	release_psp_cmd_buf(psp);
773 
774 	return ret;
775 }
776 
777 static bool psp_boottime_tmr(struct psp_context *psp)
778 {
779 	switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
780 	case IP_VERSION(13, 0, 6):
781 		return true;
782 	default:
783 		return false;
784 	}
785 }
786 
787 /* Set up Trusted Memory Region */
788 static int psp_tmr_init(struct psp_context *psp)
789 {
790 	int ret = 0;
791 	int tmr_size;
792 	void *tmr_buf;
793 	void **pptr;
794 
795 	/*
796 	 * According to HW engineer, they prefer the TMR address be "naturally
797 	 * aligned" , e.g. the start address be an integer divide of TMR size.
798 	 *
799 	 * Note: this memory need be reserved till the driver
800 	 * uninitializes.
801 	 */
802 	tmr_size = PSP_TMR_SIZE(psp->adev);
803 
804 	/* For ASICs support RLC autoload, psp will parse the toc
805 	 * and calculate the total size of TMR needed
806 	 */
807 	if (!amdgpu_sriov_vf(psp->adev) &&
808 	    psp->toc.start_addr &&
809 	    psp->toc.size_bytes &&
810 	    psp->fw_pri_buf) {
811 		ret = psp_load_toc(psp, &tmr_size);
812 		if (ret) {
813 			dev_err(psp->adev->dev, "Failed to load toc\n");
814 			return ret;
815 		}
816 	}
817 
818 	if (!psp->tmr_bo) {
819 		pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
820 		ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
821 					      PSP_TMR_ALIGNMENT,
822 					      AMDGPU_HAS_VRAM(psp->adev) ?
823 					      AMDGPU_GEM_DOMAIN_VRAM :
824 					      AMDGPU_GEM_DOMAIN_GTT,
825 					      &psp->tmr_bo, &psp->tmr_mc_addr,
826 					      pptr);
827 	}
828 
829 	return ret;
830 }
831 
832 static bool psp_skip_tmr(struct psp_context *psp)
833 {
834 	switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
835 	case IP_VERSION(11, 0, 9):
836 	case IP_VERSION(11, 0, 7):
837 	case IP_VERSION(13, 0, 2):
838 	case IP_VERSION(13, 0, 6):
839 	case IP_VERSION(13, 0, 10):
840 		return true;
841 	default:
842 		return false;
843 	}
844 }
845 
846 static int psp_tmr_load(struct psp_context *psp)
847 {
848 	int ret;
849 	struct psp_gfx_cmd_resp *cmd;
850 
851 	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
852 	 * Already set up by host driver.
853 	 */
854 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
855 		return 0;
856 
857 	cmd = acquire_psp_cmd_buf(psp);
858 
859 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
860 	if (psp->tmr_bo)
861 		dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
862 			 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
863 
864 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
865 				 psp->fence_buf_mc_addr);
866 
867 	release_psp_cmd_buf(psp);
868 
869 	return ret;
870 }
871 
872 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
873 					struct psp_gfx_cmd_resp *cmd)
874 {
875 	if (amdgpu_sriov_vf(psp->adev))
876 		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
877 	else
878 		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
879 }
880 
881 static int psp_tmr_unload(struct psp_context *psp)
882 {
883 	int ret;
884 	struct psp_gfx_cmd_resp *cmd;
885 
886 	/* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
887 	 * as TMR is not loaded at all
888 	 */
889 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
890 		return 0;
891 
892 	cmd = acquire_psp_cmd_buf(psp);
893 
894 	psp_prep_tmr_unload_cmd_buf(psp, cmd);
895 	dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
896 
897 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
898 				 psp->fence_buf_mc_addr);
899 
900 	release_psp_cmd_buf(psp);
901 
902 	return ret;
903 }
904 
905 static int psp_tmr_terminate(struct psp_context *psp)
906 {
907 	return psp_tmr_unload(psp);
908 }
909 
910 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
911 					uint64_t *output_ptr)
912 {
913 	int ret;
914 	struct psp_gfx_cmd_resp *cmd;
915 
916 	if (!output_ptr)
917 		return -EINVAL;
918 
919 	if (amdgpu_sriov_vf(psp->adev))
920 		return 0;
921 
922 	cmd = acquire_psp_cmd_buf(psp);
923 
924 	cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
925 
926 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
927 				 psp->fence_buf_mc_addr);
928 
929 	if (!ret) {
930 		*output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
931 			      ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
932 	}
933 
934 	release_psp_cmd_buf(psp);
935 
936 	return ret;
937 }
938 
939 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
940 {
941 	struct psp_context *psp = &adev->psp;
942 	struct psp_gfx_cmd_resp *cmd;
943 	int ret;
944 
945 	if (amdgpu_sriov_vf(adev))
946 		return 0;
947 
948 	cmd = acquire_psp_cmd_buf(psp);
949 
950 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
951 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
952 
953 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
954 	if (!ret) {
955 		*boot_cfg =
956 			(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
957 	}
958 
959 	release_psp_cmd_buf(psp);
960 
961 	return ret;
962 }
963 
964 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
965 {
966 	int ret;
967 	struct psp_context *psp = &adev->psp;
968 	struct psp_gfx_cmd_resp *cmd;
969 
970 	if (amdgpu_sriov_vf(adev))
971 		return 0;
972 
973 	cmd = acquire_psp_cmd_buf(psp);
974 
975 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
976 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
977 	cmd->cmd.boot_cfg.boot_config = boot_cfg;
978 	cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
979 
980 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
981 
982 	release_psp_cmd_buf(psp);
983 
984 	return ret;
985 }
986 
987 static int psp_rl_load(struct amdgpu_device *adev)
988 {
989 	int ret;
990 	struct psp_context *psp = &adev->psp;
991 	struct psp_gfx_cmd_resp *cmd;
992 
993 	if (!is_psp_fw_valid(psp->rl))
994 		return 0;
995 
996 	cmd = acquire_psp_cmd_buf(psp);
997 
998 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
999 	memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1000 
1001 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1002 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1003 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1004 	cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1005 	cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1006 
1007 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1008 
1009 	release_psp_cmd_buf(psp);
1010 
1011 	return ret;
1012 }
1013 
1014 int psp_spatial_partition(struct psp_context *psp, int mode)
1015 {
1016 	struct psp_gfx_cmd_resp *cmd;
1017 	int ret;
1018 
1019 	if (amdgpu_sriov_vf(psp->adev))
1020 		return 0;
1021 
1022 	cmd = acquire_psp_cmd_buf(psp);
1023 
1024 	cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1025 	cmd->cmd.cmd_spatial_part.mode = mode;
1026 
1027 	dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1028 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1029 
1030 	release_psp_cmd_buf(psp);
1031 
1032 	return ret;
1033 }
1034 
1035 static int psp_asd_initialize(struct psp_context *psp)
1036 {
1037 	int ret;
1038 
1039 	/* If PSP version doesn't match ASD version, asd loading will be failed.
1040 	 * add workaround to bypass it for sriov now.
1041 	 * TODO: add version check to make it common
1042 	 */
1043 	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1044 		return 0;
1045 
1046 	psp->asd_context.mem_context.shared_mc_addr  = 0;
1047 	psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1048 	psp->asd_context.ta_load_type                = GFX_CMD_ID_LOAD_ASD;
1049 
1050 	ret = psp_ta_load(psp, &psp->asd_context);
1051 	if (!ret)
1052 		psp->asd_context.initialized = true;
1053 
1054 	return ret;
1055 }
1056 
1057 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1058 				       uint32_t session_id)
1059 {
1060 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1061 	cmd->cmd.cmd_unload_ta.session_id = session_id;
1062 }
1063 
1064 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1065 {
1066 	int ret;
1067 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1068 
1069 	psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1070 
1071 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1072 
1073 	context->resp_status = cmd->resp.status;
1074 
1075 	release_psp_cmd_buf(psp);
1076 
1077 	return ret;
1078 }
1079 
1080 static int psp_asd_terminate(struct psp_context *psp)
1081 {
1082 	int ret;
1083 
1084 	if (amdgpu_sriov_vf(psp->adev))
1085 		return 0;
1086 
1087 	if (!psp->asd_context.initialized)
1088 		return 0;
1089 
1090 	ret = psp_ta_unload(psp, &psp->asd_context);
1091 	if (!ret)
1092 		psp->asd_context.initialized = false;
1093 
1094 	return ret;
1095 }
1096 
1097 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1098 		uint32_t id, uint32_t value)
1099 {
1100 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1101 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1102 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1103 }
1104 
1105 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1106 		uint32_t value)
1107 {
1108 	struct psp_gfx_cmd_resp *cmd;
1109 	int ret = 0;
1110 
1111 	if (reg >= PSP_REG_LAST)
1112 		return -EINVAL;
1113 
1114 	cmd = acquire_psp_cmd_buf(psp);
1115 
1116 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1117 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1118 	if (ret)
1119 		dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1120 
1121 	release_psp_cmd_buf(psp);
1122 
1123 	return ret;
1124 }
1125 
1126 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1127 				     uint64_t ta_bin_mc,
1128 				     struct ta_context *context)
1129 {
1130 	cmd->cmd_id				= context->ta_load_type;
1131 	cmd->cmd.cmd_load_ta.app_phy_addr_lo	= lower_32_bits(ta_bin_mc);
1132 	cmd->cmd.cmd_load_ta.app_phy_addr_hi	= upper_32_bits(ta_bin_mc);
1133 	cmd->cmd.cmd_load_ta.app_len		= context->bin_desc.size_bytes;
1134 
1135 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1136 		lower_32_bits(context->mem_context.shared_mc_addr);
1137 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1138 		upper_32_bits(context->mem_context.shared_mc_addr);
1139 	cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1140 }
1141 
1142 int psp_ta_init_shared_buf(struct psp_context *psp,
1143 				  struct ta_mem_context *mem_ctx)
1144 {
1145 	/*
1146 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1147 	 * physical) for ta to host memory
1148 	 */
1149 	return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1150 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1151 				      AMDGPU_GEM_DOMAIN_GTT,
1152 				      &mem_ctx->shared_bo,
1153 				      &mem_ctx->shared_mc_addr,
1154 				      &mem_ctx->shared_buf);
1155 }
1156 
1157 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1158 				       uint32_t ta_cmd_id,
1159 				       uint32_t session_id)
1160 {
1161 	cmd->cmd_id				= GFX_CMD_ID_INVOKE_CMD;
1162 	cmd->cmd.cmd_invoke_cmd.session_id	= session_id;
1163 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id	= ta_cmd_id;
1164 }
1165 
1166 int psp_ta_invoke(struct psp_context *psp,
1167 		  uint32_t ta_cmd_id,
1168 		  struct ta_context *context)
1169 {
1170 	int ret;
1171 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1172 
1173 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1174 
1175 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1176 				 psp->fence_buf_mc_addr);
1177 
1178 	context->resp_status = cmd->resp.status;
1179 
1180 	release_psp_cmd_buf(psp);
1181 
1182 	return ret;
1183 }
1184 
1185 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1186 {
1187 	int ret;
1188 	struct psp_gfx_cmd_resp *cmd;
1189 
1190 	cmd = acquire_psp_cmd_buf(psp);
1191 
1192 	psp_copy_fw(psp, context->bin_desc.start_addr,
1193 		    context->bin_desc.size_bytes);
1194 
1195 	psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1196 
1197 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1198 				 psp->fence_buf_mc_addr);
1199 
1200 	context->resp_status = cmd->resp.status;
1201 
1202 	if (!ret)
1203 		context->session_id = cmd->resp.session_id;
1204 
1205 	release_psp_cmd_buf(psp);
1206 
1207 	return ret;
1208 }
1209 
1210 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1211 {
1212 	return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1213 }
1214 
1215 int psp_xgmi_terminate(struct psp_context *psp)
1216 {
1217 	int ret;
1218 	struct amdgpu_device *adev = psp->adev;
1219 
1220 	/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1221 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1222 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1223 	     adev->gmc.xgmi.connected_to_cpu))
1224 		return 0;
1225 
1226 	if (!psp->xgmi_context.context.initialized)
1227 		return 0;
1228 
1229 	ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1230 
1231 	psp->xgmi_context.context.initialized = false;
1232 
1233 	return ret;
1234 }
1235 
1236 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1237 {
1238 	struct ta_xgmi_shared_memory *xgmi_cmd;
1239 	int ret;
1240 
1241 	if (!psp->ta_fw ||
1242 	    !psp->xgmi_context.context.bin_desc.size_bytes ||
1243 	    !psp->xgmi_context.context.bin_desc.start_addr)
1244 		return -ENOENT;
1245 
1246 	if (!load_ta)
1247 		goto invoke;
1248 
1249 	psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1250 	psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1251 
1252 	if (!psp->xgmi_context.context.mem_context.shared_buf) {
1253 		ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1254 		if (ret)
1255 			return ret;
1256 	}
1257 
1258 	/* Load XGMI TA */
1259 	ret = psp_ta_load(psp, &psp->xgmi_context.context);
1260 	if (!ret)
1261 		psp->xgmi_context.context.initialized = true;
1262 	else
1263 		return ret;
1264 
1265 invoke:
1266 	/* Initialize XGMI session */
1267 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1268 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1269 	xgmi_cmd->flag_extend_link_record = set_extended_data;
1270 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1271 
1272 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1273 	/* note down the capbility flag for XGMI TA */
1274 	psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1275 
1276 	return ret;
1277 }
1278 
1279 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1280 {
1281 	struct ta_xgmi_shared_memory *xgmi_cmd;
1282 	int ret;
1283 
1284 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1285 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1286 
1287 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1288 
1289 	/* Invoke xgmi ta to get hive id */
1290 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1291 	if (ret)
1292 		return ret;
1293 
1294 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1295 
1296 	return 0;
1297 }
1298 
1299 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1300 {
1301 	struct ta_xgmi_shared_memory *xgmi_cmd;
1302 	int ret;
1303 
1304 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1305 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1306 
1307 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1308 
1309 	/* Invoke xgmi ta to get the node id */
1310 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1311 	if (ret)
1312 		return ret;
1313 
1314 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1315 
1316 	return 0;
1317 }
1318 
1319 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1320 {
1321 	return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1322 			IP_VERSION(13, 0, 2) &&
1323 		psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1324 	       amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1325 		       IP_VERSION(13, 0, 6);
1326 }
1327 
1328 /*
1329  * Chips that support extended topology information require the driver to
1330  * reflect topology information in the opposite direction.  This is
1331  * because the TA has already exceeded its link record limit and if the
1332  * TA holds bi-directional information, the driver would have to do
1333  * multiple fetches instead of just two.
1334  */
1335 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1336 					struct psp_xgmi_node_info node_info)
1337 {
1338 	struct amdgpu_device *mirror_adev;
1339 	struct amdgpu_hive_info *hive;
1340 	uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1341 	uint64_t dst_node_id = node_info.node_id;
1342 	uint8_t dst_num_hops = node_info.num_hops;
1343 	uint8_t dst_num_links = node_info.num_links;
1344 
1345 	hive = amdgpu_get_xgmi_hive(psp->adev);
1346 	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1347 		struct psp_xgmi_topology_info *mirror_top_info;
1348 		int j;
1349 
1350 		if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1351 			continue;
1352 
1353 		mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1354 		for (j = 0; j < mirror_top_info->num_nodes; j++) {
1355 			if (mirror_top_info->nodes[j].node_id != src_node_id)
1356 				continue;
1357 
1358 			mirror_top_info->nodes[j].num_hops = dst_num_hops;
1359 			/*
1360 			 * prevent 0 num_links value re-reflection since reflection
1361 			 * criteria is based on num_hops (direct or indirect).
1362 			 *
1363 			 */
1364 			if (dst_num_links)
1365 				mirror_top_info->nodes[j].num_links = dst_num_links;
1366 
1367 			break;
1368 		}
1369 
1370 		break;
1371 	}
1372 
1373 	amdgpu_put_xgmi_hive(hive);
1374 }
1375 
1376 int psp_xgmi_get_topology_info(struct psp_context *psp,
1377 			       int number_devices,
1378 			       struct psp_xgmi_topology_info *topology,
1379 			       bool get_extended_data)
1380 {
1381 	struct ta_xgmi_shared_memory *xgmi_cmd;
1382 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1383 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1384 	int i;
1385 	int ret;
1386 
1387 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1388 		return -EINVAL;
1389 
1390 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1391 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1392 	xgmi_cmd->flag_extend_link_record = get_extended_data;
1393 
1394 	/* Fill in the shared memory with topology information as input */
1395 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1396 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1397 	topology_info_input->num_nodes = number_devices;
1398 
1399 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1400 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1401 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1402 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1403 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1404 	}
1405 
1406 	/* Invoke xgmi ta to get the topology information */
1407 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1408 	if (ret)
1409 		return ret;
1410 
1411 	/* Read the output topology information from the shared memory */
1412 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1413 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1414 	for (i = 0; i < topology->num_nodes; i++) {
1415 		/* extended data will either be 0 or equal to non-extended data */
1416 		if (topology_info_output->nodes[i].num_hops)
1417 			topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1418 
1419 		/* non-extended data gets everything here so no need to update */
1420 		if (!get_extended_data) {
1421 			topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1422 			topology->nodes[i].is_sharing_enabled =
1423 					topology_info_output->nodes[i].is_sharing_enabled;
1424 			topology->nodes[i].sdma_engine =
1425 					topology_info_output->nodes[i].sdma_engine;
1426 		}
1427 
1428 	}
1429 
1430 	/* Invoke xgmi ta again to get the link information */
1431 	if (psp_xgmi_peer_link_info_supported(psp)) {
1432 		struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1433 		struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1434 		bool requires_reflection =
1435 			(psp->xgmi_context.supports_extended_data &&
1436 			 get_extended_data) ||
1437 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1438 				IP_VERSION(13, 0, 6);
1439 		bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1440 				psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1441 
1442 		/* popluate the shared output buffer rather than the cmd input buffer
1443 		 * with node_ids as the input for GET_PEER_LINKS command execution.
1444 		 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1445 		 * The same requirement for GET_EXTEND_PEER_LINKS command.
1446 		 */
1447 		if (ta_port_num_support) {
1448 			link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1449 
1450 			for (i = 0; i < topology->num_nodes; i++)
1451 				link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1452 
1453 			link_extend_info_output->num_nodes = topology->num_nodes;
1454 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1455 		} else {
1456 			link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1457 
1458 			for (i = 0; i < topology->num_nodes; i++)
1459 				link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1460 
1461 			link_info_output->num_nodes = topology->num_nodes;
1462 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1463 		}
1464 
1465 		ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1466 		if (ret)
1467 			return ret;
1468 
1469 		for (i = 0; i < topology->num_nodes; i++) {
1470 			uint8_t node_num_links = ta_port_num_support ?
1471 				link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1472 			/* accumulate num_links on extended data */
1473 			if (get_extended_data) {
1474 				topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1475 			} else {
1476 				topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1477 								topology->nodes[i].num_links : node_num_links;
1478 			}
1479 			/* popluate the connected port num info if supported and available */
1480 			if (ta_port_num_support && topology->nodes[i].num_links) {
1481 				memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1482 				       sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1483 			}
1484 
1485 			/* reflect the topology information for bi-directionality */
1486 			if (requires_reflection && topology->nodes[i].num_hops)
1487 				psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1488 		}
1489 	}
1490 
1491 	return 0;
1492 }
1493 
1494 int psp_xgmi_set_topology_info(struct psp_context *psp,
1495 			       int number_devices,
1496 			       struct psp_xgmi_topology_info *topology)
1497 {
1498 	struct ta_xgmi_shared_memory *xgmi_cmd;
1499 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1500 	int i;
1501 
1502 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1503 		return -EINVAL;
1504 
1505 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1506 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1507 
1508 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1509 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1510 	topology_info_input->num_nodes = number_devices;
1511 
1512 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1513 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1514 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1515 		topology_info_input->nodes[i].is_sharing_enabled = 1;
1516 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1517 	}
1518 
1519 	/* Invoke xgmi ta to set topology information */
1520 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1521 }
1522 
1523 // ras begin
1524 static void psp_ras_ta_check_status(struct psp_context *psp)
1525 {
1526 	struct ta_ras_shared_memory *ras_cmd =
1527 		(struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1528 
1529 	switch (ras_cmd->ras_status) {
1530 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1531 		dev_warn(psp->adev->dev,
1532 			 "RAS WARNING: cmd failed due to unsupported ip\n");
1533 		break;
1534 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1535 		dev_warn(psp->adev->dev,
1536 			 "RAS WARNING: cmd failed due to unsupported error injection\n");
1537 		break;
1538 	case TA_RAS_STATUS__SUCCESS:
1539 		break;
1540 	case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1541 		if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1542 			dev_warn(psp->adev->dev,
1543 				 "RAS WARNING: Inject error to critical region is not allowed\n");
1544 		break;
1545 	default:
1546 		dev_warn(psp->adev->dev,
1547 			 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1548 		break;
1549 	}
1550 }
1551 
1552 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1553 {
1554 	struct ta_ras_shared_memory *ras_cmd;
1555 	int ret;
1556 
1557 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1558 
1559 	/*
1560 	 * TODO: bypass the loading in sriov for now
1561 	 */
1562 	if (amdgpu_sriov_vf(psp->adev))
1563 		return 0;
1564 
1565 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1566 
1567 	if (amdgpu_ras_intr_triggered())
1568 		return ret;
1569 
1570 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1571 		dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1572 		return -EINVAL;
1573 	}
1574 
1575 	if (!ret) {
1576 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1577 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1578 
1579 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1580 		} else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1581 			dev_warn(psp->adev->dev,
1582 				 "RAS internal register access blocked\n");
1583 
1584 		psp_ras_ta_check_status(psp);
1585 	}
1586 
1587 	return ret;
1588 }
1589 
1590 int psp_ras_enable_features(struct psp_context *psp,
1591 		union ta_ras_cmd_input *info, bool enable)
1592 {
1593 	struct ta_ras_shared_memory *ras_cmd;
1594 	int ret;
1595 
1596 	if (!psp->ras_context.context.initialized)
1597 		return -EINVAL;
1598 
1599 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1600 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1601 
1602 	if (enable)
1603 		ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
1604 	else
1605 		ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
1606 
1607 	ras_cmd->ras_in_message = *info;
1608 
1609 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1610 	if (ret)
1611 		return -EINVAL;
1612 
1613 	return 0;
1614 }
1615 
1616 int psp_ras_terminate(struct psp_context *psp)
1617 {
1618 	int ret;
1619 
1620 	/*
1621 	 * TODO: bypass the terminate in sriov for now
1622 	 */
1623 	if (amdgpu_sriov_vf(psp->adev))
1624 		return 0;
1625 
1626 	if (!psp->ras_context.context.initialized)
1627 		return 0;
1628 
1629 	ret = psp_ta_unload(psp, &psp->ras_context.context);
1630 
1631 	psp->ras_context.context.initialized = false;
1632 
1633 	return ret;
1634 }
1635 
1636 int psp_ras_initialize(struct psp_context *psp)
1637 {
1638 	int ret;
1639 	uint32_t boot_cfg = 0xFF;
1640 	struct amdgpu_device *adev = psp->adev;
1641 	struct ta_ras_shared_memory *ras_cmd;
1642 
1643 	/*
1644 	 * TODO: bypass the initialize in sriov for now
1645 	 */
1646 	if (amdgpu_sriov_vf(adev))
1647 		return 0;
1648 
1649 	if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1650 	    !adev->psp.ras_context.context.bin_desc.start_addr) {
1651 		dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1652 		return 0;
1653 	}
1654 
1655 	if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1656 		/* query GECC enablement status from boot config
1657 		 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1658 		 */
1659 		ret = psp_boot_config_get(adev, &boot_cfg);
1660 		if (ret)
1661 			dev_warn(adev->dev, "PSP get boot config failed\n");
1662 
1663 		if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
1664 			if (!boot_cfg) {
1665 				dev_info(adev->dev, "GECC is disabled\n");
1666 			} else {
1667 				/* disable GECC in next boot cycle if ras is
1668 				 * disabled by module parameter amdgpu_ras_enable
1669 				 * and/or amdgpu_ras_mask, or boot_config_get call
1670 				 * is failed
1671 				 */
1672 				ret = psp_boot_config_set(adev, 0);
1673 				if (ret)
1674 					dev_warn(adev->dev, "PSP set boot config failed\n");
1675 				else
1676 					dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1677 			}
1678 		} else {
1679 			if (boot_cfg == 1) {
1680 				dev_info(adev->dev, "GECC is enabled\n");
1681 			} else {
1682 				/* enable GECC in next boot cycle if it is disabled
1683 				 * in boot config, or force enable GECC if failed to
1684 				 * get boot configuration
1685 				 */
1686 				ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1687 				if (ret)
1688 					dev_warn(adev->dev, "PSP set boot config failed\n");
1689 				else
1690 					dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1691 			}
1692 		}
1693 	}
1694 
1695 	psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1696 	psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1697 
1698 	if (!psp->ras_context.context.mem_context.shared_buf) {
1699 		ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1700 		if (ret)
1701 			return ret;
1702 	}
1703 
1704 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1705 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1706 
1707 	if (amdgpu_ras_is_poison_mode_supported(adev))
1708 		ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1709 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1710 		ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1711 	ras_cmd->ras_in_message.init_flags.xcc_mask =
1712 		adev->gfx.xcc_mask;
1713 	ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1714 
1715 	ret = psp_ta_load(psp, &psp->ras_context.context);
1716 
1717 	if (!ret && !ras_cmd->ras_status)
1718 		psp->ras_context.context.initialized = true;
1719 	else {
1720 		if (ras_cmd->ras_status)
1721 			dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1722 
1723 		/* fail to load RAS TA */
1724 		psp->ras_context.context.initialized = false;
1725 	}
1726 
1727 	return ret;
1728 }
1729 
1730 int psp_ras_trigger_error(struct psp_context *psp,
1731 			  struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1732 {
1733 	struct ta_ras_shared_memory *ras_cmd;
1734 	struct amdgpu_device *adev = psp->adev;
1735 	int ret;
1736 	uint32_t dev_mask;
1737 
1738 	if (!psp->ras_context.context.initialized)
1739 		return -EINVAL;
1740 
1741 	switch (info->block_id) {
1742 	case TA_RAS_BLOCK__GFX:
1743 		dev_mask = GET_MASK(GC, instance_mask);
1744 		break;
1745 	case TA_RAS_BLOCK__SDMA:
1746 		dev_mask = GET_MASK(SDMA0, instance_mask);
1747 		break;
1748 	case TA_RAS_BLOCK__VCN:
1749 	case TA_RAS_BLOCK__JPEG:
1750 		dev_mask = GET_MASK(VCN, instance_mask);
1751 		break;
1752 	default:
1753 		dev_mask = instance_mask;
1754 		break;
1755 	}
1756 
1757 	/* reuse sub_block_index for backward compatibility */
1758 	dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1759 	dev_mask &= AMDGPU_RAS_INST_MASK;
1760 	info->sub_block_index |= dev_mask;
1761 
1762 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1763 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1764 
1765 	ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
1766 	ras_cmd->ras_in_message.trigger_error = *info;
1767 
1768 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1769 	if (ret)
1770 		return -EINVAL;
1771 
1772 	/* If err_event_athub occurs error inject was successful, however
1773 	 *  return status from TA is no long reliable
1774 	 */
1775 	if (amdgpu_ras_intr_triggered())
1776 		return 0;
1777 
1778 	if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1779 		return -EACCES;
1780 	else if (ras_cmd->ras_status)
1781 		return -EINVAL;
1782 
1783 	return 0;
1784 }
1785 
1786 int psp_ras_query_address(struct psp_context *psp,
1787 			  struct ta_ras_query_address_input *addr_in,
1788 			  struct ta_ras_query_address_output *addr_out)
1789 {
1790 	struct ta_ras_shared_memory *ras_cmd;
1791 	int ret;
1792 
1793 	if (!psp->ras_context.context.initialized)
1794 		return -EINVAL;
1795 
1796 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1797 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1798 
1799 	ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS;
1800 	ras_cmd->ras_in_message.address = *addr_in;
1801 
1802 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1803 	if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1804 		return -EINVAL;
1805 
1806 	*addr_out = ras_cmd->ras_out_message.address;
1807 
1808 	return 0;
1809 }
1810 // ras end
1811 
1812 // HDCP start
1813 static int psp_hdcp_initialize(struct psp_context *psp)
1814 {
1815 	int ret;
1816 
1817 	/*
1818 	 * TODO: bypass the initialize in sriov for now
1819 	 */
1820 	if (amdgpu_sriov_vf(psp->adev))
1821 		return 0;
1822 
1823 	if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1824 	    !psp->hdcp_context.context.bin_desc.start_addr) {
1825 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1826 		return 0;
1827 	}
1828 
1829 	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1830 	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1831 
1832 	if (!psp->hdcp_context.context.mem_context.shared_buf) {
1833 		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1834 		if (ret)
1835 			return ret;
1836 	}
1837 
1838 	ret = psp_ta_load(psp, &psp->hdcp_context.context);
1839 	if (!ret) {
1840 		psp->hdcp_context.context.initialized = true;
1841 		mutex_init(&psp->hdcp_context.mutex);
1842 	}
1843 
1844 	return ret;
1845 }
1846 
1847 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1848 {
1849 	/*
1850 	 * TODO: bypass the loading in sriov for now
1851 	 */
1852 	if (amdgpu_sriov_vf(psp->adev))
1853 		return 0;
1854 
1855 	return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
1856 }
1857 
1858 static int psp_hdcp_terminate(struct psp_context *psp)
1859 {
1860 	int ret;
1861 
1862 	/*
1863 	 * TODO: bypass the terminate in sriov for now
1864 	 */
1865 	if (amdgpu_sriov_vf(psp->adev))
1866 		return 0;
1867 
1868 	if (!psp->hdcp_context.context.initialized)
1869 		return 0;
1870 
1871 	ret = psp_ta_unload(psp, &psp->hdcp_context.context);
1872 
1873 	psp->hdcp_context.context.initialized = false;
1874 
1875 	return ret;
1876 }
1877 // HDCP end
1878 
1879 // DTM start
1880 static int psp_dtm_initialize(struct psp_context *psp)
1881 {
1882 	int ret;
1883 
1884 	/*
1885 	 * TODO: bypass the initialize in sriov for now
1886 	 */
1887 	if (amdgpu_sriov_vf(psp->adev))
1888 		return 0;
1889 
1890 	if (!psp->dtm_context.context.bin_desc.size_bytes ||
1891 	    !psp->dtm_context.context.bin_desc.start_addr) {
1892 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1893 		return 0;
1894 	}
1895 
1896 	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
1897 	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1898 
1899 	if (!psp->dtm_context.context.mem_context.shared_buf) {
1900 		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
1901 		if (ret)
1902 			return ret;
1903 	}
1904 
1905 	ret = psp_ta_load(psp, &psp->dtm_context.context);
1906 	if (!ret) {
1907 		psp->dtm_context.context.initialized = true;
1908 		mutex_init(&psp->dtm_context.mutex);
1909 	}
1910 
1911 	return ret;
1912 }
1913 
1914 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1915 {
1916 	/*
1917 	 * TODO: bypass the loading in sriov for now
1918 	 */
1919 	if (amdgpu_sriov_vf(psp->adev))
1920 		return 0;
1921 
1922 	return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
1923 }
1924 
1925 static int psp_dtm_terminate(struct psp_context *psp)
1926 {
1927 	int ret;
1928 
1929 	/*
1930 	 * TODO: bypass the terminate in sriov for now
1931 	 */
1932 	if (amdgpu_sriov_vf(psp->adev))
1933 		return 0;
1934 
1935 	if (!psp->dtm_context.context.initialized)
1936 		return 0;
1937 
1938 	ret = psp_ta_unload(psp, &psp->dtm_context.context);
1939 
1940 	psp->dtm_context.context.initialized = false;
1941 
1942 	return ret;
1943 }
1944 // DTM end
1945 
1946 // RAP start
1947 static int psp_rap_initialize(struct psp_context *psp)
1948 {
1949 	int ret;
1950 	enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
1951 
1952 	/*
1953 	 * TODO: bypass the initialize in sriov for now
1954 	 */
1955 	if (amdgpu_sriov_vf(psp->adev))
1956 		return 0;
1957 
1958 	if (!psp->rap_context.context.bin_desc.size_bytes ||
1959 	    !psp->rap_context.context.bin_desc.start_addr) {
1960 		dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
1961 		return 0;
1962 	}
1963 
1964 	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
1965 	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1966 
1967 	if (!psp->rap_context.context.mem_context.shared_buf) {
1968 		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
1969 		if (ret)
1970 			return ret;
1971 	}
1972 
1973 	ret = psp_ta_load(psp, &psp->rap_context.context);
1974 	if (!ret) {
1975 		psp->rap_context.context.initialized = true;
1976 		mutex_init(&psp->rap_context.mutex);
1977 	} else
1978 		return ret;
1979 
1980 	ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
1981 	if (ret || status != TA_RAP_STATUS__SUCCESS) {
1982 		psp_rap_terminate(psp);
1983 		/* free rap shared memory */
1984 		psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
1985 
1986 		dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
1987 			 ret, status);
1988 
1989 		return ret;
1990 	}
1991 
1992 	return 0;
1993 }
1994 
1995 static int psp_rap_terminate(struct psp_context *psp)
1996 {
1997 	int ret;
1998 
1999 	if (!psp->rap_context.context.initialized)
2000 		return 0;
2001 
2002 	ret = psp_ta_unload(psp, &psp->rap_context.context);
2003 
2004 	psp->rap_context.context.initialized = false;
2005 
2006 	return ret;
2007 }
2008 
2009 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2010 {
2011 	struct ta_rap_shared_memory *rap_cmd;
2012 	int ret = 0;
2013 
2014 	if (!psp->rap_context.context.initialized)
2015 		return 0;
2016 
2017 	if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2018 	    ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2019 		return -EINVAL;
2020 
2021 	mutex_lock(&psp->rap_context.mutex);
2022 
2023 	rap_cmd = (struct ta_rap_shared_memory *)
2024 		  psp->rap_context.context.mem_context.shared_buf;
2025 	memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2026 
2027 	rap_cmd->cmd_id = ta_cmd_id;
2028 	rap_cmd->validation_method_id = METHOD_A;
2029 
2030 	ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2031 	if (ret)
2032 		goto out_unlock;
2033 
2034 	if (status)
2035 		*status = rap_cmd->rap_status;
2036 
2037 out_unlock:
2038 	mutex_unlock(&psp->rap_context.mutex);
2039 
2040 	return ret;
2041 }
2042 // RAP end
2043 
2044 /* securedisplay start */
2045 static int psp_securedisplay_initialize(struct psp_context *psp)
2046 {
2047 	int ret;
2048 	struct ta_securedisplay_cmd *securedisplay_cmd;
2049 
2050 	/*
2051 	 * TODO: bypass the initialize in sriov for now
2052 	 */
2053 	if (amdgpu_sriov_vf(psp->adev))
2054 		return 0;
2055 
2056 	if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2057 	    !psp->securedisplay_context.context.bin_desc.start_addr) {
2058 		dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
2059 		return 0;
2060 	}
2061 
2062 	psp->securedisplay_context.context.mem_context.shared_mem_size =
2063 		PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2064 	psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2065 
2066 	if (!psp->securedisplay_context.context.initialized) {
2067 		ret = psp_ta_init_shared_buf(psp,
2068 					     &psp->securedisplay_context.context.mem_context);
2069 		if (ret)
2070 			return ret;
2071 	}
2072 
2073 	ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2074 	if (!ret) {
2075 		psp->securedisplay_context.context.initialized = true;
2076 		mutex_init(&psp->securedisplay_context.mutex);
2077 	} else
2078 		return ret;
2079 
2080 	mutex_lock(&psp->securedisplay_context.mutex);
2081 
2082 	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2083 			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2084 
2085 	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2086 
2087 	mutex_unlock(&psp->securedisplay_context.mutex);
2088 
2089 	if (ret) {
2090 		psp_securedisplay_terminate(psp);
2091 		/* free securedisplay shared memory */
2092 		psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2093 		dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2094 		return -EINVAL;
2095 	}
2096 
2097 	if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2098 		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2099 		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2100 			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2101 		/* don't try again */
2102 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2103 	}
2104 
2105 	return 0;
2106 }
2107 
2108 static int psp_securedisplay_terminate(struct psp_context *psp)
2109 {
2110 	int ret;
2111 
2112 	/*
2113 	 * TODO:bypass the terminate in sriov for now
2114 	 */
2115 	if (amdgpu_sriov_vf(psp->adev))
2116 		return 0;
2117 
2118 	if (!psp->securedisplay_context.context.initialized)
2119 		return 0;
2120 
2121 	ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2122 
2123 	psp->securedisplay_context.context.initialized = false;
2124 
2125 	return ret;
2126 }
2127 
2128 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2129 {
2130 	int ret;
2131 
2132 	if (!psp->securedisplay_context.context.initialized)
2133 		return -EINVAL;
2134 
2135 	if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2136 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
2137 		return -EINVAL;
2138 
2139 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2140 
2141 	return ret;
2142 }
2143 /* SECUREDISPLAY end */
2144 
2145 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2146 {
2147 	struct psp_context *psp = &adev->psp;
2148 	int ret = 0;
2149 
2150 	if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2151 		ret = psp->funcs->wait_for_bootloader(psp);
2152 
2153 	return ret;
2154 }
2155 
2156 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2157 {
2158 	if (psp->funcs &&
2159 	    psp->funcs->get_ras_capability) {
2160 		return psp->funcs->get_ras_capability(psp);
2161 	} else {
2162 		return false;
2163 	}
2164 }
2165 
2166 static int psp_hw_start(struct psp_context *psp)
2167 {
2168 	struct amdgpu_device *adev = psp->adev;
2169 	int ret;
2170 
2171 	if (!amdgpu_sriov_vf(adev)) {
2172 		if ((is_psp_fw_valid(psp->kdb)) &&
2173 		    (psp->funcs->bootloader_load_kdb != NULL)) {
2174 			ret = psp_bootloader_load_kdb(psp);
2175 			if (ret) {
2176 				dev_err(adev->dev, "PSP load kdb failed!\n");
2177 				return ret;
2178 			}
2179 		}
2180 
2181 		if ((is_psp_fw_valid(psp->spl)) &&
2182 		    (psp->funcs->bootloader_load_spl != NULL)) {
2183 			ret = psp_bootloader_load_spl(psp);
2184 			if (ret) {
2185 				dev_err(adev->dev, "PSP load spl failed!\n");
2186 				return ret;
2187 			}
2188 		}
2189 
2190 		if ((is_psp_fw_valid(psp->sys)) &&
2191 		    (psp->funcs->bootloader_load_sysdrv != NULL)) {
2192 			ret = psp_bootloader_load_sysdrv(psp);
2193 			if (ret) {
2194 				dev_err(adev->dev, "PSP load sys drv failed!\n");
2195 				return ret;
2196 			}
2197 		}
2198 
2199 		if ((is_psp_fw_valid(psp->soc_drv)) &&
2200 		    (psp->funcs->bootloader_load_soc_drv != NULL)) {
2201 			ret = psp_bootloader_load_soc_drv(psp);
2202 			if (ret) {
2203 				dev_err(adev->dev, "PSP load soc drv failed!\n");
2204 				return ret;
2205 			}
2206 		}
2207 
2208 		if ((is_psp_fw_valid(psp->intf_drv)) &&
2209 		    (psp->funcs->bootloader_load_intf_drv != NULL)) {
2210 			ret = psp_bootloader_load_intf_drv(psp);
2211 			if (ret) {
2212 				dev_err(adev->dev, "PSP load intf drv failed!\n");
2213 				return ret;
2214 			}
2215 		}
2216 
2217 		if ((is_psp_fw_valid(psp->dbg_drv)) &&
2218 		    (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2219 			ret = psp_bootloader_load_dbg_drv(psp);
2220 			if (ret) {
2221 				dev_err(adev->dev, "PSP load dbg drv failed!\n");
2222 				return ret;
2223 			}
2224 		}
2225 
2226 		if ((is_psp_fw_valid(psp->ras_drv)) &&
2227 		    (psp->funcs->bootloader_load_ras_drv != NULL)) {
2228 			ret = psp_bootloader_load_ras_drv(psp);
2229 			if (ret) {
2230 				dev_err(adev->dev, "PSP load ras_drv failed!\n");
2231 				return ret;
2232 			}
2233 		}
2234 
2235 		if ((is_psp_fw_valid(psp->sos)) &&
2236 		    (psp->funcs->bootloader_load_sos != NULL)) {
2237 			ret = psp_bootloader_load_sos(psp);
2238 			if (ret) {
2239 				dev_err(adev->dev, "PSP load sos failed!\n");
2240 				return ret;
2241 			}
2242 		}
2243 	}
2244 
2245 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2246 	if (ret) {
2247 		dev_err(adev->dev, "PSP create ring failed!\n");
2248 		return ret;
2249 	}
2250 
2251 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2252 		goto skip_pin_bo;
2253 
2254 	if (!psp_boottime_tmr(psp)) {
2255 		ret = psp_tmr_init(psp);
2256 		if (ret) {
2257 			dev_err(adev->dev, "PSP tmr init failed!\n");
2258 			return ret;
2259 		}
2260 	}
2261 
2262 skip_pin_bo:
2263 	/*
2264 	 * For ASICs with DF Cstate management centralized
2265 	 * to PMFW, TMR setup should be performed after PMFW
2266 	 * loaded and before other non-psp firmware loaded.
2267 	 */
2268 	if (psp->pmfw_centralized_cstate_management) {
2269 		ret = psp_load_smu_fw(psp);
2270 		if (ret)
2271 			return ret;
2272 	}
2273 
2274 	ret = psp_tmr_load(psp);
2275 	if (ret) {
2276 		dev_err(adev->dev, "PSP load tmr failed!\n");
2277 		return ret;
2278 	}
2279 
2280 	return 0;
2281 }
2282 
2283 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2284 			   enum psp_gfx_fw_type *type)
2285 {
2286 	switch (ucode->ucode_id) {
2287 	case AMDGPU_UCODE_ID_CAP:
2288 		*type = GFX_FW_TYPE_CAP;
2289 		break;
2290 	case AMDGPU_UCODE_ID_SDMA0:
2291 		*type = GFX_FW_TYPE_SDMA0;
2292 		break;
2293 	case AMDGPU_UCODE_ID_SDMA1:
2294 		*type = GFX_FW_TYPE_SDMA1;
2295 		break;
2296 	case AMDGPU_UCODE_ID_SDMA2:
2297 		*type = GFX_FW_TYPE_SDMA2;
2298 		break;
2299 	case AMDGPU_UCODE_ID_SDMA3:
2300 		*type = GFX_FW_TYPE_SDMA3;
2301 		break;
2302 	case AMDGPU_UCODE_ID_SDMA4:
2303 		*type = GFX_FW_TYPE_SDMA4;
2304 		break;
2305 	case AMDGPU_UCODE_ID_SDMA5:
2306 		*type = GFX_FW_TYPE_SDMA5;
2307 		break;
2308 	case AMDGPU_UCODE_ID_SDMA6:
2309 		*type = GFX_FW_TYPE_SDMA6;
2310 		break;
2311 	case AMDGPU_UCODE_ID_SDMA7:
2312 		*type = GFX_FW_TYPE_SDMA7;
2313 		break;
2314 	case AMDGPU_UCODE_ID_CP_MES:
2315 		*type = GFX_FW_TYPE_CP_MES;
2316 		break;
2317 	case AMDGPU_UCODE_ID_CP_MES_DATA:
2318 		*type = GFX_FW_TYPE_MES_STACK;
2319 		break;
2320 	case AMDGPU_UCODE_ID_CP_MES1:
2321 		*type = GFX_FW_TYPE_CP_MES_KIQ;
2322 		break;
2323 	case AMDGPU_UCODE_ID_CP_MES1_DATA:
2324 		*type = GFX_FW_TYPE_MES_KIQ_STACK;
2325 		break;
2326 	case AMDGPU_UCODE_ID_CP_CE:
2327 		*type = GFX_FW_TYPE_CP_CE;
2328 		break;
2329 	case AMDGPU_UCODE_ID_CP_PFP:
2330 		*type = GFX_FW_TYPE_CP_PFP;
2331 		break;
2332 	case AMDGPU_UCODE_ID_CP_ME:
2333 		*type = GFX_FW_TYPE_CP_ME;
2334 		break;
2335 	case AMDGPU_UCODE_ID_CP_MEC1:
2336 		*type = GFX_FW_TYPE_CP_MEC;
2337 		break;
2338 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
2339 		*type = GFX_FW_TYPE_CP_MEC_ME1;
2340 		break;
2341 	case AMDGPU_UCODE_ID_CP_MEC2:
2342 		*type = GFX_FW_TYPE_CP_MEC;
2343 		break;
2344 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
2345 		*type = GFX_FW_TYPE_CP_MEC_ME2;
2346 		break;
2347 	case AMDGPU_UCODE_ID_RLC_P:
2348 		*type = GFX_FW_TYPE_RLC_P;
2349 		break;
2350 	case AMDGPU_UCODE_ID_RLC_V:
2351 		*type = GFX_FW_TYPE_RLC_V;
2352 		break;
2353 	case AMDGPU_UCODE_ID_RLC_G:
2354 		*type = GFX_FW_TYPE_RLC_G;
2355 		break;
2356 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2357 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2358 		break;
2359 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2360 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2361 		break;
2362 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2363 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2364 		break;
2365 	case AMDGPU_UCODE_ID_RLC_IRAM:
2366 		*type = GFX_FW_TYPE_RLC_IRAM;
2367 		break;
2368 	case AMDGPU_UCODE_ID_RLC_DRAM:
2369 		*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2370 		break;
2371 	case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2372 		*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2373 		break;
2374 	case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2375 		*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2376 		break;
2377 	case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2378 		*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2379 		break;
2380 	case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2381 		*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2382 		break;
2383 	case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2384 		*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2385 		break;
2386 	case AMDGPU_UCODE_ID_SMC:
2387 		*type = GFX_FW_TYPE_SMU;
2388 		break;
2389 	case AMDGPU_UCODE_ID_PPTABLE:
2390 		*type = GFX_FW_TYPE_PPTABLE;
2391 		break;
2392 	case AMDGPU_UCODE_ID_UVD:
2393 		*type = GFX_FW_TYPE_UVD;
2394 		break;
2395 	case AMDGPU_UCODE_ID_UVD1:
2396 		*type = GFX_FW_TYPE_UVD1;
2397 		break;
2398 	case AMDGPU_UCODE_ID_VCE:
2399 		*type = GFX_FW_TYPE_VCE;
2400 		break;
2401 	case AMDGPU_UCODE_ID_VCN:
2402 		*type = GFX_FW_TYPE_VCN;
2403 		break;
2404 	case AMDGPU_UCODE_ID_VCN1:
2405 		*type = GFX_FW_TYPE_VCN1;
2406 		break;
2407 	case AMDGPU_UCODE_ID_DMCU_ERAM:
2408 		*type = GFX_FW_TYPE_DMCU_ERAM;
2409 		break;
2410 	case AMDGPU_UCODE_ID_DMCU_INTV:
2411 		*type = GFX_FW_TYPE_DMCU_ISR;
2412 		break;
2413 	case AMDGPU_UCODE_ID_VCN0_RAM:
2414 		*type = GFX_FW_TYPE_VCN0_RAM;
2415 		break;
2416 	case AMDGPU_UCODE_ID_VCN1_RAM:
2417 		*type = GFX_FW_TYPE_VCN1_RAM;
2418 		break;
2419 	case AMDGPU_UCODE_ID_DMCUB:
2420 		*type = GFX_FW_TYPE_DMUB;
2421 		break;
2422 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2423 		*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2424 		break;
2425 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2426 		*type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2427 		break;
2428 	case AMDGPU_UCODE_ID_IMU_I:
2429 		*type = GFX_FW_TYPE_IMU_I;
2430 		break;
2431 	case AMDGPU_UCODE_ID_IMU_D:
2432 		*type = GFX_FW_TYPE_IMU_D;
2433 		break;
2434 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
2435 		*type = GFX_FW_TYPE_RS64_PFP;
2436 		break;
2437 	case AMDGPU_UCODE_ID_CP_RS64_ME:
2438 		*type = GFX_FW_TYPE_RS64_ME;
2439 		break;
2440 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
2441 		*type = GFX_FW_TYPE_RS64_MEC;
2442 		break;
2443 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2444 		*type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2445 		break;
2446 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2447 		*type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2448 		break;
2449 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2450 		*type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2451 		break;
2452 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2453 		*type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2454 		break;
2455 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2456 		*type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2457 		break;
2458 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2459 		*type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2460 		break;
2461 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2462 		*type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2463 		break;
2464 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2465 		*type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2466 		break;
2467 	case AMDGPU_UCODE_ID_VPE_CTX:
2468 		*type = GFX_FW_TYPE_VPEC_FW1;
2469 		break;
2470 	case AMDGPU_UCODE_ID_VPE_CTL:
2471 		*type = GFX_FW_TYPE_VPEC_FW2;
2472 		break;
2473 	case AMDGPU_UCODE_ID_VPE:
2474 		*type = GFX_FW_TYPE_VPE;
2475 		break;
2476 	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2477 		*type = GFX_FW_TYPE_UMSCH_UCODE;
2478 		break;
2479 	case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2480 		*type = GFX_FW_TYPE_UMSCH_DATA;
2481 		break;
2482 	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2483 		*type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2484 		break;
2485 	case AMDGPU_UCODE_ID_P2S_TABLE:
2486 		*type = GFX_FW_TYPE_P2S_TABLE;
2487 		break;
2488 	case AMDGPU_UCODE_ID_JPEG_RAM:
2489 		*type = GFX_FW_TYPE_JPEG_RAM;
2490 		break;
2491 	case AMDGPU_UCODE_ID_MAXIMUM:
2492 	default:
2493 		return -EINVAL;
2494 	}
2495 
2496 	return 0;
2497 }
2498 
2499 static void psp_print_fw_hdr(struct psp_context *psp,
2500 			     struct amdgpu_firmware_info *ucode)
2501 {
2502 	struct amdgpu_device *adev = psp->adev;
2503 	struct common_firmware_header *hdr;
2504 
2505 	switch (ucode->ucode_id) {
2506 	case AMDGPU_UCODE_ID_SDMA0:
2507 	case AMDGPU_UCODE_ID_SDMA1:
2508 	case AMDGPU_UCODE_ID_SDMA2:
2509 	case AMDGPU_UCODE_ID_SDMA3:
2510 	case AMDGPU_UCODE_ID_SDMA4:
2511 	case AMDGPU_UCODE_ID_SDMA5:
2512 	case AMDGPU_UCODE_ID_SDMA6:
2513 	case AMDGPU_UCODE_ID_SDMA7:
2514 		hdr = (struct common_firmware_header *)
2515 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2516 		amdgpu_ucode_print_sdma_hdr(hdr);
2517 		break;
2518 	case AMDGPU_UCODE_ID_CP_CE:
2519 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2520 		amdgpu_ucode_print_gfx_hdr(hdr);
2521 		break;
2522 	case AMDGPU_UCODE_ID_CP_PFP:
2523 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2524 		amdgpu_ucode_print_gfx_hdr(hdr);
2525 		break;
2526 	case AMDGPU_UCODE_ID_CP_ME:
2527 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2528 		amdgpu_ucode_print_gfx_hdr(hdr);
2529 		break;
2530 	case AMDGPU_UCODE_ID_CP_MEC1:
2531 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2532 		amdgpu_ucode_print_gfx_hdr(hdr);
2533 		break;
2534 	case AMDGPU_UCODE_ID_RLC_G:
2535 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2536 		amdgpu_ucode_print_rlc_hdr(hdr);
2537 		break;
2538 	case AMDGPU_UCODE_ID_SMC:
2539 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
2540 		amdgpu_ucode_print_smc_hdr(hdr);
2541 		break;
2542 	default:
2543 		break;
2544 	}
2545 }
2546 
2547 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2548 				       struct amdgpu_firmware_info *ucode,
2549 				       struct psp_gfx_cmd_resp *cmd)
2550 {
2551 	int ret;
2552 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
2553 
2554 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2555 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2556 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2557 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2558 
2559 	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2560 	if (ret)
2561 		dev_err(psp->adev->dev, "Unknown firmware type\n");
2562 
2563 	return ret;
2564 }
2565 
2566 int psp_execute_ip_fw_load(struct psp_context *psp,
2567 			   struct amdgpu_firmware_info *ucode)
2568 {
2569 	int ret = 0;
2570 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2571 
2572 	ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2573 	if (!ret) {
2574 		ret = psp_cmd_submit_buf(psp, ucode, cmd,
2575 					 psp->fence_buf_mc_addr);
2576 	}
2577 
2578 	release_psp_cmd_buf(psp);
2579 
2580 	return ret;
2581 }
2582 
2583 static int psp_load_p2s_table(struct psp_context *psp)
2584 {
2585 	int ret;
2586 	struct amdgpu_device *adev = psp->adev;
2587 	struct amdgpu_firmware_info *ucode =
2588 		&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2589 
2590 	if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
2591 		return 0;
2592 
2593 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) {
2594 		uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2595 								0x0036003C;
2596 		if (psp->sos.fw_version < supp_vers)
2597 			return 0;
2598 	}
2599 
2600 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2601 		return 0;
2602 
2603 	ret = psp_execute_ip_fw_load(psp, ucode);
2604 
2605 	return ret;
2606 }
2607 
2608 static int psp_load_smu_fw(struct psp_context *psp)
2609 {
2610 	int ret;
2611 	struct amdgpu_device *adev = psp->adev;
2612 	struct amdgpu_firmware_info *ucode =
2613 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2614 	struct amdgpu_ras *ras = psp->ras_context.ras;
2615 
2616 	/*
2617 	 * Skip SMU FW reloading in case of using BACO for runpm only,
2618 	 * as SMU is always alive.
2619 	 */
2620 	if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
2621 		return 0;
2622 
2623 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2624 		return 0;
2625 
2626 	if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2627 	     (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2628 	      amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2629 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2630 		if (ret)
2631 			dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2632 	}
2633 
2634 	ret = psp_execute_ip_fw_load(psp, ucode);
2635 
2636 	if (ret)
2637 		dev_err(adev->dev, "PSP load smu failed!\n");
2638 
2639 	return ret;
2640 }
2641 
2642 static bool fw_load_skip_check(struct psp_context *psp,
2643 			       struct amdgpu_firmware_info *ucode)
2644 {
2645 	if (!ucode->fw || !ucode->ucode_size)
2646 		return true;
2647 
2648 	if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2649 		return true;
2650 
2651 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2652 	    (psp_smu_reload_quirk(psp) ||
2653 	     psp->autoload_supported ||
2654 	     psp->pmfw_centralized_cstate_management))
2655 		return true;
2656 
2657 	if (amdgpu_sriov_vf(psp->adev) &&
2658 	    amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2659 		return true;
2660 
2661 	if (psp->autoload_supported &&
2662 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2663 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2664 		/* skip mec JT when autoload is enabled */
2665 		return true;
2666 
2667 	return false;
2668 }
2669 
2670 int psp_load_fw_list(struct psp_context *psp,
2671 		     struct amdgpu_firmware_info **ucode_list, int ucode_count)
2672 {
2673 	int ret = 0, i;
2674 	struct amdgpu_firmware_info *ucode;
2675 
2676 	for (i = 0; i < ucode_count; ++i) {
2677 		ucode = ucode_list[i];
2678 		psp_print_fw_hdr(psp, ucode);
2679 		ret = psp_execute_ip_fw_load(psp, ucode);
2680 		if (ret)
2681 			return ret;
2682 	}
2683 	return ret;
2684 }
2685 
2686 static int psp_load_non_psp_fw(struct psp_context *psp)
2687 {
2688 	int i, ret;
2689 	struct amdgpu_firmware_info *ucode;
2690 	struct amdgpu_device *adev = psp->adev;
2691 
2692 	if (psp->autoload_supported &&
2693 	    !psp->pmfw_centralized_cstate_management) {
2694 		ret = psp_load_smu_fw(psp);
2695 		if (ret)
2696 			return ret;
2697 	}
2698 
2699 	/* Load P2S table first if it's available */
2700 	psp_load_p2s_table(psp);
2701 
2702 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
2703 		ucode = &adev->firmware.ucode[i];
2704 
2705 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2706 		    !fw_load_skip_check(psp, ucode)) {
2707 			ret = psp_load_smu_fw(psp);
2708 			if (ret)
2709 				return ret;
2710 			continue;
2711 		}
2712 
2713 		if (fw_load_skip_check(psp, ucode))
2714 			continue;
2715 
2716 		if (psp->autoload_supported &&
2717 		    (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2718 			     IP_VERSION(11, 0, 7) ||
2719 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2720 			     IP_VERSION(11, 0, 11) ||
2721 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2722 			     IP_VERSION(11, 0, 12)) &&
2723 		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2724 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2725 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2726 			/* PSP only receive one SDMA fw for sienna_cichlid,
2727 			 * as all four sdma fw are same
2728 			 */
2729 			continue;
2730 
2731 		psp_print_fw_hdr(psp, ucode);
2732 
2733 		ret = psp_execute_ip_fw_load(psp, ucode);
2734 		if (ret)
2735 			return ret;
2736 
2737 		/* Start rlc autoload after psp recieved all the gfx firmware */
2738 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2739 		    adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2740 			ret = psp_rlc_autoload_start(psp);
2741 			if (ret) {
2742 				dev_err(adev->dev, "Failed to start rlc autoload\n");
2743 				return ret;
2744 			}
2745 		}
2746 	}
2747 
2748 	return 0;
2749 }
2750 
2751 static int psp_load_fw(struct amdgpu_device *adev)
2752 {
2753 	int ret;
2754 	struct psp_context *psp = &adev->psp;
2755 
2756 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2757 		/* should not destroy ring, only stop */
2758 		psp_ring_stop(psp, PSP_RING_TYPE__KM);
2759 	} else {
2760 		memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2761 
2762 		ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2763 		if (ret) {
2764 			dev_err(adev->dev, "PSP ring init failed!\n");
2765 			goto failed;
2766 		}
2767 	}
2768 
2769 	ret = psp_hw_start(psp);
2770 	if (ret)
2771 		goto failed;
2772 
2773 	ret = psp_load_non_psp_fw(psp);
2774 	if (ret)
2775 		goto failed1;
2776 
2777 	ret = psp_asd_initialize(psp);
2778 	if (ret) {
2779 		dev_err(adev->dev, "PSP load asd failed!\n");
2780 		goto failed1;
2781 	}
2782 
2783 	ret = psp_rl_load(adev);
2784 	if (ret) {
2785 		dev_err(adev->dev, "PSP load RL failed!\n");
2786 		goto failed1;
2787 	}
2788 
2789 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2790 		if (adev->gmc.xgmi.num_physical_nodes > 1) {
2791 			ret = psp_xgmi_initialize(psp, false, true);
2792 			/* Warning the XGMI seesion initialize failure
2793 			 * Instead of stop driver initialization
2794 			 */
2795 			if (ret)
2796 				dev_err(psp->adev->dev,
2797 					"XGMI: Failed to initialize XGMI session\n");
2798 		}
2799 	}
2800 
2801 	if (psp->ta_fw) {
2802 		ret = psp_ras_initialize(psp);
2803 		if (ret)
2804 			dev_err(psp->adev->dev,
2805 				"RAS: Failed to initialize RAS\n");
2806 
2807 		ret = psp_hdcp_initialize(psp);
2808 		if (ret)
2809 			dev_err(psp->adev->dev,
2810 				"HDCP: Failed to initialize HDCP\n");
2811 
2812 		ret = psp_dtm_initialize(psp);
2813 		if (ret)
2814 			dev_err(psp->adev->dev,
2815 				"DTM: Failed to initialize DTM\n");
2816 
2817 		ret = psp_rap_initialize(psp);
2818 		if (ret)
2819 			dev_err(psp->adev->dev,
2820 				"RAP: Failed to initialize RAP\n");
2821 
2822 		ret = psp_securedisplay_initialize(psp);
2823 		if (ret)
2824 			dev_err(psp->adev->dev,
2825 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2826 	}
2827 
2828 	return 0;
2829 
2830 failed1:
2831 	psp_free_shared_bufs(psp);
2832 failed:
2833 	/*
2834 	 * all cleanup jobs (xgmi terminate, ras terminate,
2835 	 * ring destroy, cmd/fence/fw buffers destory,
2836 	 * psp->cmd destory) are delayed to psp_hw_fini
2837 	 */
2838 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2839 	return ret;
2840 }
2841 
2842 static int psp_hw_init(void *handle)
2843 {
2844 	int ret;
2845 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2846 
2847 	mutex_lock(&adev->firmware.mutex);
2848 	/*
2849 	 * This sequence is just used on hw_init only once, no need on
2850 	 * resume.
2851 	 */
2852 	ret = amdgpu_ucode_init_bo(adev);
2853 	if (ret)
2854 		goto failed;
2855 
2856 	ret = psp_load_fw(adev);
2857 	if (ret) {
2858 		dev_err(adev->dev, "PSP firmware loading failed\n");
2859 		goto failed;
2860 	}
2861 
2862 	mutex_unlock(&adev->firmware.mutex);
2863 	return 0;
2864 
2865 failed:
2866 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
2867 	mutex_unlock(&adev->firmware.mutex);
2868 	return -EINVAL;
2869 }
2870 
2871 static int psp_hw_fini(void *handle)
2872 {
2873 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2874 	struct psp_context *psp = &adev->psp;
2875 
2876 	if (psp->ta_fw) {
2877 		psp_ras_terminate(psp);
2878 		psp_securedisplay_terminate(psp);
2879 		psp_rap_terminate(psp);
2880 		psp_dtm_terminate(psp);
2881 		psp_hdcp_terminate(psp);
2882 
2883 		if (adev->gmc.xgmi.num_physical_nodes > 1)
2884 			psp_xgmi_terminate(psp);
2885 	}
2886 
2887 	psp_asd_terminate(psp);
2888 	psp_tmr_terminate(psp);
2889 
2890 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2891 
2892 	return 0;
2893 }
2894 
2895 static int psp_suspend(void *handle)
2896 {
2897 	int ret = 0;
2898 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2899 	struct psp_context *psp = &adev->psp;
2900 
2901 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
2902 	    psp->xgmi_context.context.initialized) {
2903 		ret = psp_xgmi_terminate(psp);
2904 		if (ret) {
2905 			dev_err(adev->dev, "Failed to terminate xgmi ta\n");
2906 			goto out;
2907 		}
2908 	}
2909 
2910 	if (psp->ta_fw) {
2911 		ret = psp_ras_terminate(psp);
2912 		if (ret) {
2913 			dev_err(adev->dev, "Failed to terminate ras ta\n");
2914 			goto out;
2915 		}
2916 		ret = psp_hdcp_terminate(psp);
2917 		if (ret) {
2918 			dev_err(adev->dev, "Failed to terminate hdcp ta\n");
2919 			goto out;
2920 		}
2921 		ret = psp_dtm_terminate(psp);
2922 		if (ret) {
2923 			dev_err(adev->dev, "Failed to terminate dtm ta\n");
2924 			goto out;
2925 		}
2926 		ret = psp_rap_terminate(psp);
2927 		if (ret) {
2928 			dev_err(adev->dev, "Failed to terminate rap ta\n");
2929 			goto out;
2930 		}
2931 		ret = psp_securedisplay_terminate(psp);
2932 		if (ret) {
2933 			dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
2934 			goto out;
2935 		}
2936 	}
2937 
2938 	ret = psp_asd_terminate(psp);
2939 	if (ret) {
2940 		dev_err(adev->dev, "Failed to terminate asd\n");
2941 		goto out;
2942 	}
2943 
2944 	ret = psp_tmr_terminate(psp);
2945 	if (ret) {
2946 		dev_err(adev->dev, "Failed to terminate tmr\n");
2947 		goto out;
2948 	}
2949 
2950 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
2951 	if (ret)
2952 		dev_err(adev->dev, "PSP ring stop failed\n");
2953 
2954 out:
2955 	return ret;
2956 }
2957 
2958 static int psp_resume(void *handle)
2959 {
2960 	int ret;
2961 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2962 	struct psp_context *psp = &adev->psp;
2963 
2964 	dev_info(adev->dev, "PSP is resuming...\n");
2965 
2966 	if (psp->mem_train_ctx.enable_mem_training) {
2967 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
2968 		if (ret) {
2969 			dev_err(adev->dev, "Failed to process memory training!\n");
2970 			return ret;
2971 		}
2972 	}
2973 
2974 	mutex_lock(&adev->firmware.mutex);
2975 
2976 	ret = psp_hw_start(psp);
2977 	if (ret)
2978 		goto failed;
2979 
2980 	ret = psp_load_non_psp_fw(psp);
2981 	if (ret)
2982 		goto failed;
2983 
2984 	ret = psp_asd_initialize(psp);
2985 	if (ret) {
2986 		dev_err(adev->dev, "PSP load asd failed!\n");
2987 		goto failed;
2988 	}
2989 
2990 	ret = psp_rl_load(adev);
2991 	if (ret) {
2992 		dev_err(adev->dev, "PSP load RL failed!\n");
2993 		goto failed;
2994 	}
2995 
2996 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2997 		ret = psp_xgmi_initialize(psp, false, true);
2998 		/* Warning the XGMI seesion initialize failure
2999 		 * Instead of stop driver initialization
3000 		 */
3001 		if (ret)
3002 			dev_err(psp->adev->dev,
3003 				"XGMI: Failed to initialize XGMI session\n");
3004 	}
3005 
3006 	if (psp->ta_fw) {
3007 		ret = psp_ras_initialize(psp);
3008 		if (ret)
3009 			dev_err(psp->adev->dev,
3010 				"RAS: Failed to initialize RAS\n");
3011 
3012 		ret = psp_hdcp_initialize(psp);
3013 		if (ret)
3014 			dev_err(psp->adev->dev,
3015 				"HDCP: Failed to initialize HDCP\n");
3016 
3017 		ret = psp_dtm_initialize(psp);
3018 		if (ret)
3019 			dev_err(psp->adev->dev,
3020 				"DTM: Failed to initialize DTM\n");
3021 
3022 		ret = psp_rap_initialize(psp);
3023 		if (ret)
3024 			dev_err(psp->adev->dev,
3025 				"RAP: Failed to initialize RAP\n");
3026 
3027 		ret = psp_securedisplay_initialize(psp);
3028 		if (ret)
3029 			dev_err(psp->adev->dev,
3030 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3031 	}
3032 
3033 	mutex_unlock(&adev->firmware.mutex);
3034 
3035 	return 0;
3036 
3037 failed:
3038 	dev_err(adev->dev, "PSP resume failed\n");
3039 	mutex_unlock(&adev->firmware.mutex);
3040 	return ret;
3041 }
3042 
3043 int psp_gpu_reset(struct amdgpu_device *adev)
3044 {
3045 	int ret;
3046 
3047 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3048 		return 0;
3049 
3050 	mutex_lock(&adev->psp.mutex);
3051 	ret = psp_mode1_reset(&adev->psp);
3052 	mutex_unlock(&adev->psp.mutex);
3053 
3054 	return ret;
3055 }
3056 
3057 int psp_rlc_autoload_start(struct psp_context *psp)
3058 {
3059 	int ret;
3060 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3061 
3062 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3063 
3064 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
3065 				 psp->fence_buf_mc_addr);
3066 
3067 	release_psp_cmd_buf(psp);
3068 
3069 	return ret;
3070 }
3071 
3072 int psp_ring_cmd_submit(struct psp_context *psp,
3073 			uint64_t cmd_buf_mc_addr,
3074 			uint64_t fence_mc_addr,
3075 			int index)
3076 {
3077 	unsigned int psp_write_ptr_reg = 0;
3078 	struct psp_gfx_rb_frame *write_frame;
3079 	struct psp_ring *ring = &psp->km_ring;
3080 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3081 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3082 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3083 	struct amdgpu_device *adev = psp->adev;
3084 	uint32_t ring_size_dw = ring->ring_size / 4;
3085 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3086 
3087 	/* KM (GPCOM) prepare write pointer */
3088 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
3089 
3090 	/* Update KM RB frame pointer to new frame */
3091 	/* write_frame ptr increments by size of rb_frame in bytes */
3092 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3093 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
3094 		write_frame = ring_buffer_start;
3095 	else
3096 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3097 	/* Check invalid write_frame ptr address */
3098 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3099 		dev_err(adev->dev,
3100 			"ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3101 			ring_buffer_start, ring_buffer_end, write_frame);
3102 		dev_err(adev->dev,
3103 			"write_frame is pointing to address out of bounds\n");
3104 		return -EINVAL;
3105 	}
3106 
3107 	/* Initialize KM RB frame */
3108 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3109 
3110 	/* Update KM RB frame */
3111 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3112 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3113 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3114 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3115 	write_frame->fence_value = index;
3116 	amdgpu_device_flush_hdp(adev, NULL);
3117 
3118 	/* Update the write Pointer in DWORDs */
3119 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3120 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
3121 	return 0;
3122 }
3123 
3124 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3125 {
3126 	struct amdgpu_device *adev = psp->adev;
3127 	char fw_name[PSP_FW_NAME_LEN];
3128 	const struct psp_firmware_header_v1_0 *asd_hdr;
3129 	int err = 0;
3130 
3131 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
3132 	err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name);
3133 	if (err)
3134 		goto out;
3135 
3136 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3137 	adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3138 	adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3139 	adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3140 	adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3141 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3142 	return 0;
3143 out:
3144 	amdgpu_ucode_release(&adev->psp.asd_fw);
3145 	return err;
3146 }
3147 
3148 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3149 {
3150 	struct amdgpu_device *adev = psp->adev;
3151 	char fw_name[PSP_FW_NAME_LEN];
3152 	const struct psp_firmware_header_v1_0 *toc_hdr;
3153 	int err = 0;
3154 
3155 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name);
3156 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
3157 	if (err)
3158 		goto out;
3159 
3160 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3161 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3162 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3163 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3164 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3165 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3166 	return 0;
3167 out:
3168 	amdgpu_ucode_release(&adev->psp.toc_fw);
3169 	return err;
3170 }
3171 
3172 static int parse_sos_bin_descriptor(struct psp_context *psp,
3173 				   const struct psp_fw_bin_desc *desc,
3174 				   const struct psp_firmware_header_v2_0 *sos_hdr)
3175 {
3176 	uint8_t *ucode_start_addr  = NULL;
3177 
3178 	if (!psp || !desc || !sos_hdr)
3179 		return -EINVAL;
3180 
3181 	ucode_start_addr  = (uint8_t *)sos_hdr +
3182 			    le32_to_cpu(desc->offset_bytes) +
3183 			    le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3184 
3185 	switch (desc->fw_type) {
3186 	case PSP_FW_TYPE_PSP_SOS:
3187 		psp->sos.fw_version        = le32_to_cpu(desc->fw_version);
3188 		psp->sos.feature_version   = le32_to_cpu(desc->fw_version);
3189 		psp->sos.size_bytes        = le32_to_cpu(desc->size_bytes);
3190 		psp->sos.start_addr	   = ucode_start_addr;
3191 		break;
3192 	case PSP_FW_TYPE_PSP_SYS_DRV:
3193 		psp->sys.fw_version        = le32_to_cpu(desc->fw_version);
3194 		psp->sys.feature_version   = le32_to_cpu(desc->fw_version);
3195 		psp->sys.size_bytes        = le32_to_cpu(desc->size_bytes);
3196 		psp->sys.start_addr        = ucode_start_addr;
3197 		break;
3198 	case PSP_FW_TYPE_PSP_KDB:
3199 		psp->kdb.fw_version        = le32_to_cpu(desc->fw_version);
3200 		psp->kdb.feature_version   = le32_to_cpu(desc->fw_version);
3201 		psp->kdb.size_bytes        = le32_to_cpu(desc->size_bytes);
3202 		psp->kdb.start_addr        = ucode_start_addr;
3203 		break;
3204 	case PSP_FW_TYPE_PSP_TOC:
3205 		psp->toc.fw_version        = le32_to_cpu(desc->fw_version);
3206 		psp->toc.feature_version   = le32_to_cpu(desc->fw_version);
3207 		psp->toc.size_bytes        = le32_to_cpu(desc->size_bytes);
3208 		psp->toc.start_addr        = ucode_start_addr;
3209 		break;
3210 	case PSP_FW_TYPE_PSP_SPL:
3211 		psp->spl.fw_version        = le32_to_cpu(desc->fw_version);
3212 		psp->spl.feature_version   = le32_to_cpu(desc->fw_version);
3213 		psp->spl.size_bytes        = le32_to_cpu(desc->size_bytes);
3214 		psp->spl.start_addr        = ucode_start_addr;
3215 		break;
3216 	case PSP_FW_TYPE_PSP_RL:
3217 		psp->rl.fw_version         = le32_to_cpu(desc->fw_version);
3218 		psp->rl.feature_version    = le32_to_cpu(desc->fw_version);
3219 		psp->rl.size_bytes         = le32_to_cpu(desc->size_bytes);
3220 		psp->rl.start_addr         = ucode_start_addr;
3221 		break;
3222 	case PSP_FW_TYPE_PSP_SOC_DRV:
3223 		psp->soc_drv.fw_version         = le32_to_cpu(desc->fw_version);
3224 		psp->soc_drv.feature_version    = le32_to_cpu(desc->fw_version);
3225 		psp->soc_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3226 		psp->soc_drv.start_addr         = ucode_start_addr;
3227 		break;
3228 	case PSP_FW_TYPE_PSP_INTF_DRV:
3229 		psp->intf_drv.fw_version        = le32_to_cpu(desc->fw_version);
3230 		psp->intf_drv.feature_version   = le32_to_cpu(desc->fw_version);
3231 		psp->intf_drv.size_bytes        = le32_to_cpu(desc->size_bytes);
3232 		psp->intf_drv.start_addr        = ucode_start_addr;
3233 		break;
3234 	case PSP_FW_TYPE_PSP_DBG_DRV:
3235 		psp->dbg_drv.fw_version         = le32_to_cpu(desc->fw_version);
3236 		psp->dbg_drv.feature_version    = le32_to_cpu(desc->fw_version);
3237 		psp->dbg_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3238 		psp->dbg_drv.start_addr         = ucode_start_addr;
3239 		break;
3240 	case PSP_FW_TYPE_PSP_RAS_DRV:
3241 		psp->ras_drv.fw_version         = le32_to_cpu(desc->fw_version);
3242 		psp->ras_drv.feature_version    = le32_to_cpu(desc->fw_version);
3243 		psp->ras_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3244 		psp->ras_drv.start_addr         = ucode_start_addr;
3245 		break;
3246 	default:
3247 		dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3248 		break;
3249 	}
3250 
3251 	return 0;
3252 }
3253 
3254 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3255 {
3256 	const struct psp_firmware_header_v1_0 *sos_hdr;
3257 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3258 	uint8_t *ucode_array_start_addr;
3259 
3260 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3261 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3262 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3263 
3264 	if (adev->gmc.xgmi.connected_to_cpu ||
3265 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3266 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3267 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3268 
3269 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3270 		adev->psp.sys.start_addr = ucode_array_start_addr;
3271 
3272 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3273 		adev->psp.sos.start_addr = ucode_array_start_addr +
3274 				le32_to_cpu(sos_hdr->sos.offset_bytes);
3275 	} else {
3276 		/* Load alternate PSP SOS FW */
3277 		sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3278 
3279 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3280 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3281 
3282 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3283 		adev->psp.sys.start_addr = ucode_array_start_addr +
3284 			le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3285 
3286 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3287 		adev->psp.sos.start_addr = ucode_array_start_addr +
3288 			le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3289 	}
3290 
3291 	if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3292 		dev_warn(adev->dev, "PSP SOS FW not available");
3293 		return -EINVAL;
3294 	}
3295 
3296 	return 0;
3297 }
3298 
3299 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3300 {
3301 	struct amdgpu_device *adev = psp->adev;
3302 	char fw_name[PSP_FW_NAME_LEN];
3303 	const struct psp_firmware_header_v1_0 *sos_hdr;
3304 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3305 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3306 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3307 	const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3308 	int err = 0;
3309 	uint8_t *ucode_array_start_addr;
3310 	int fw_index = 0;
3311 
3312 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
3313 	err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name);
3314 	if (err)
3315 		goto out;
3316 
3317 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3318 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3319 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3320 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3321 
3322 	switch (sos_hdr->header.header_version_major) {
3323 	case 1:
3324 		err = psp_init_sos_base_fw(adev);
3325 		if (err)
3326 			goto out;
3327 
3328 		if (sos_hdr->header.header_version_minor == 1) {
3329 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3330 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3331 			adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3332 					le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3333 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3334 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3335 					le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3336 		}
3337 		if (sos_hdr->header.header_version_minor == 2) {
3338 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3339 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3340 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3341 						    le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3342 		}
3343 		if (sos_hdr->header.header_version_minor == 3) {
3344 			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3345 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3346 			adev->psp.toc.start_addr = ucode_array_start_addr +
3347 				le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3348 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3349 			adev->psp.kdb.start_addr = ucode_array_start_addr +
3350 				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3351 			adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3352 			adev->psp.spl.start_addr = ucode_array_start_addr +
3353 				le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3354 			adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3355 			adev->psp.rl.start_addr = ucode_array_start_addr +
3356 				le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3357 		}
3358 		break;
3359 	case 2:
3360 		sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3361 
3362 		if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3363 			dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3364 			err = -EINVAL;
3365 			goto out;
3366 		}
3367 
3368 		for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) {
3369 			err = parse_sos_bin_descriptor(psp,
3370 						       &sos_hdr_v2_0->psp_fw_bin[fw_index],
3371 						       sos_hdr_v2_0);
3372 			if (err)
3373 				goto out;
3374 		}
3375 		break;
3376 	default:
3377 		dev_err(adev->dev,
3378 			"unsupported psp sos firmware\n");
3379 		err = -EINVAL;
3380 		goto out;
3381 	}
3382 
3383 	return 0;
3384 out:
3385 	amdgpu_ucode_release(&adev->psp.sos_fw);
3386 
3387 	return err;
3388 }
3389 
3390 static int parse_ta_bin_descriptor(struct psp_context *psp,
3391 				   const struct psp_fw_bin_desc *desc,
3392 				   const struct ta_firmware_header_v2_0 *ta_hdr)
3393 {
3394 	uint8_t *ucode_start_addr  = NULL;
3395 
3396 	if (!psp || !desc || !ta_hdr)
3397 		return -EINVAL;
3398 
3399 	ucode_start_addr  = (uint8_t *)ta_hdr +
3400 			    le32_to_cpu(desc->offset_bytes) +
3401 			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3402 
3403 	switch (desc->fw_type) {
3404 	case TA_FW_TYPE_PSP_ASD:
3405 		psp->asd_context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3406 		psp->asd_context.bin_desc.feature_version   = le32_to_cpu(desc->fw_version);
3407 		psp->asd_context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3408 		psp->asd_context.bin_desc.start_addr        = ucode_start_addr;
3409 		break;
3410 	case TA_FW_TYPE_PSP_XGMI:
3411 		psp->xgmi_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3412 		psp->xgmi_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3413 		psp->xgmi_context.context.bin_desc.start_addr       = ucode_start_addr;
3414 		break;
3415 	case TA_FW_TYPE_PSP_RAS:
3416 		psp->ras_context.context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3417 		psp->ras_context.context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3418 		psp->ras_context.context.bin_desc.start_addr        = ucode_start_addr;
3419 		break;
3420 	case TA_FW_TYPE_PSP_HDCP:
3421 		psp->hdcp_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3422 		psp->hdcp_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3423 		psp->hdcp_context.context.bin_desc.start_addr       = ucode_start_addr;
3424 		break;
3425 	case TA_FW_TYPE_PSP_DTM:
3426 		psp->dtm_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3427 		psp->dtm_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3428 		psp->dtm_context.context.bin_desc.start_addr       = ucode_start_addr;
3429 		break;
3430 	case TA_FW_TYPE_PSP_RAP:
3431 		psp->rap_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3432 		psp->rap_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3433 		psp->rap_context.context.bin_desc.start_addr       = ucode_start_addr;
3434 		break;
3435 	case TA_FW_TYPE_PSP_SECUREDISPLAY:
3436 		psp->securedisplay_context.context.bin_desc.fw_version =
3437 			le32_to_cpu(desc->fw_version);
3438 		psp->securedisplay_context.context.bin_desc.size_bytes =
3439 			le32_to_cpu(desc->size_bytes);
3440 		psp->securedisplay_context.context.bin_desc.start_addr =
3441 			ucode_start_addr;
3442 		break;
3443 	default:
3444 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3445 		break;
3446 	}
3447 
3448 	return 0;
3449 }
3450 
3451 static int parse_ta_v1_microcode(struct psp_context *psp)
3452 {
3453 	const struct ta_firmware_header_v1_0 *ta_hdr;
3454 	struct amdgpu_device *adev = psp->adev;
3455 
3456 	ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3457 
3458 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3459 		return -EINVAL;
3460 
3461 	adev->psp.xgmi_context.context.bin_desc.fw_version =
3462 		le32_to_cpu(ta_hdr->xgmi.fw_version);
3463 	adev->psp.xgmi_context.context.bin_desc.size_bytes =
3464 		le32_to_cpu(ta_hdr->xgmi.size_bytes);
3465 	adev->psp.xgmi_context.context.bin_desc.start_addr =
3466 		(uint8_t *)ta_hdr +
3467 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3468 
3469 	adev->psp.ras_context.context.bin_desc.fw_version =
3470 		le32_to_cpu(ta_hdr->ras.fw_version);
3471 	adev->psp.ras_context.context.bin_desc.size_bytes =
3472 		le32_to_cpu(ta_hdr->ras.size_bytes);
3473 	adev->psp.ras_context.context.bin_desc.start_addr =
3474 		(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3475 		le32_to_cpu(ta_hdr->ras.offset_bytes);
3476 
3477 	adev->psp.hdcp_context.context.bin_desc.fw_version =
3478 		le32_to_cpu(ta_hdr->hdcp.fw_version);
3479 	adev->psp.hdcp_context.context.bin_desc.size_bytes =
3480 		le32_to_cpu(ta_hdr->hdcp.size_bytes);
3481 	adev->psp.hdcp_context.context.bin_desc.start_addr =
3482 		(uint8_t *)ta_hdr +
3483 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3484 
3485 	adev->psp.dtm_context.context.bin_desc.fw_version =
3486 		le32_to_cpu(ta_hdr->dtm.fw_version);
3487 	adev->psp.dtm_context.context.bin_desc.size_bytes =
3488 		le32_to_cpu(ta_hdr->dtm.size_bytes);
3489 	adev->psp.dtm_context.context.bin_desc.start_addr =
3490 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3491 		le32_to_cpu(ta_hdr->dtm.offset_bytes);
3492 
3493 	adev->psp.securedisplay_context.context.bin_desc.fw_version =
3494 		le32_to_cpu(ta_hdr->securedisplay.fw_version);
3495 	adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3496 		le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3497 	adev->psp.securedisplay_context.context.bin_desc.start_addr =
3498 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3499 		le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3500 
3501 	adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3502 
3503 	return 0;
3504 }
3505 
3506 static int parse_ta_v2_microcode(struct psp_context *psp)
3507 {
3508 	const struct ta_firmware_header_v2_0 *ta_hdr;
3509 	struct amdgpu_device *adev = psp->adev;
3510 	int err = 0;
3511 	int ta_index = 0;
3512 
3513 	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3514 
3515 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3516 		return -EINVAL;
3517 
3518 	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3519 		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3520 		return -EINVAL;
3521 	}
3522 
3523 	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3524 		err = parse_ta_bin_descriptor(psp,
3525 					      &ta_hdr->ta_fw_bin[ta_index],
3526 					      ta_hdr);
3527 		if (err)
3528 			return err;
3529 	}
3530 
3531 	return 0;
3532 }
3533 
3534 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3535 {
3536 	const struct common_firmware_header *hdr;
3537 	struct amdgpu_device *adev = psp->adev;
3538 	char fw_name[PSP_FW_NAME_LEN];
3539 	int err;
3540 
3541 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
3542 	err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name);
3543 	if (err)
3544 		return err;
3545 
3546 	hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3547 	switch (le16_to_cpu(hdr->header_version_major)) {
3548 	case 1:
3549 		err = parse_ta_v1_microcode(psp);
3550 		break;
3551 	case 2:
3552 		err = parse_ta_v2_microcode(psp);
3553 		break;
3554 	default:
3555 		dev_err(adev->dev, "unsupported TA header version\n");
3556 		err = -EINVAL;
3557 	}
3558 
3559 	if (err)
3560 		amdgpu_ucode_release(&adev->psp.ta_fw);
3561 
3562 	return err;
3563 }
3564 
3565 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3566 {
3567 	struct amdgpu_device *adev = psp->adev;
3568 	char fw_name[PSP_FW_NAME_LEN];
3569 	const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3570 	struct amdgpu_firmware_info *info = NULL;
3571 	int err = 0;
3572 
3573 	if (!amdgpu_sriov_vf(adev)) {
3574 		dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3575 		return -EINVAL;
3576 	}
3577 
3578 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
3579 	err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name);
3580 	if (err) {
3581 		if (err == -ENODEV) {
3582 			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3583 			err = 0;
3584 			goto out;
3585 		}
3586 		dev_err(adev->dev, "fail to initialize cap microcode\n");
3587 	}
3588 
3589 	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3590 	info->ucode_id = AMDGPU_UCODE_ID_CAP;
3591 	info->fw = adev->psp.cap_fw;
3592 	cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3593 		adev->psp.cap_fw->data;
3594 	adev->firmware.fw_size += ALIGN(
3595 			le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3596 	adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3597 	adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3598 	adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3599 
3600 	return 0;
3601 
3602 out:
3603 	amdgpu_ucode_release(&adev->psp.cap_fw);
3604 	return err;
3605 }
3606 
3607 static int psp_set_clockgating_state(void *handle,
3608 				     enum amd_clockgating_state state)
3609 {
3610 	return 0;
3611 }
3612 
3613 static int psp_set_powergating_state(void *handle,
3614 				     enum amd_powergating_state state)
3615 {
3616 	return 0;
3617 }
3618 
3619 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3620 					 struct device_attribute *attr,
3621 					 char *buf)
3622 {
3623 	struct drm_device *ddev = dev_get_drvdata(dev);
3624 	struct amdgpu_device *adev = drm_to_adev(ddev);
3625 	uint32_t fw_ver;
3626 	int ret;
3627 
3628 	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3629 		dev_info(adev->dev, "PSP block is not ready yet\n.");
3630 		return -EBUSY;
3631 	}
3632 
3633 	mutex_lock(&adev->psp.mutex);
3634 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3635 	mutex_unlock(&adev->psp.mutex);
3636 
3637 	if (ret) {
3638 		dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
3639 		return ret;
3640 	}
3641 
3642 	return sysfs_emit(buf, "%x\n", fw_ver);
3643 }
3644 
3645 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3646 						       struct device_attribute *attr,
3647 						       const char *buf,
3648 						       size_t count)
3649 {
3650 	struct drm_device *ddev = dev_get_drvdata(dev);
3651 	struct amdgpu_device *adev = drm_to_adev(ddev);
3652 	int ret, idx;
3653 	char fw_name[100];
3654 	const struct firmware *usbc_pd_fw;
3655 	struct amdgpu_bo *fw_buf_bo = NULL;
3656 	uint64_t fw_pri_mc_addr;
3657 	void *fw_pri_cpu_addr;
3658 
3659 	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3660 		dev_err(adev->dev, "PSP block is not ready yet.");
3661 		return -EBUSY;
3662 	}
3663 
3664 	if (!drm_dev_enter(ddev, &idx))
3665 		return -ENODEV;
3666 
3667 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
3668 	ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
3669 	if (ret)
3670 		goto fail;
3671 
3672 	/* LFB address which is aligned to 1MB boundary per PSP request */
3673 	ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3674 				      AMDGPU_GEM_DOMAIN_VRAM |
3675 				      AMDGPU_GEM_DOMAIN_GTT,
3676 				      &fw_buf_bo, &fw_pri_mc_addr,
3677 				      &fw_pri_cpu_addr);
3678 	if (ret)
3679 		goto rel_buf;
3680 
3681 	memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3682 
3683 	mutex_lock(&adev->psp.mutex);
3684 	ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3685 	mutex_unlock(&adev->psp.mutex);
3686 
3687 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3688 
3689 rel_buf:
3690 	release_firmware(usbc_pd_fw);
3691 fail:
3692 	if (ret) {
3693 		dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
3694 		count = ret;
3695 	}
3696 
3697 	drm_dev_exit(idx);
3698 	return count;
3699 }
3700 
3701 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3702 {
3703 	int idx;
3704 
3705 	if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
3706 		return;
3707 
3708 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3709 	memcpy(psp->fw_pri_buf, start_addr, bin_size);
3710 
3711 	drm_dev_exit(idx);
3712 }
3713 
3714 /**
3715  * DOC: usbc_pd_fw
3716  * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3717  * this file will trigger the update process.
3718  */
3719 static DEVICE_ATTR(usbc_pd_fw, 0644,
3720 		   psp_usbc_pd_fw_sysfs_read,
3721 		   psp_usbc_pd_fw_sysfs_write);
3722 
3723 int is_psp_fw_valid(struct psp_bin_desc bin)
3724 {
3725 	return bin.size_bytes;
3726 }
3727 
3728 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
3729 					struct bin_attribute *bin_attr,
3730 					char *buffer, loff_t pos, size_t count)
3731 {
3732 	struct device *dev = kobj_to_dev(kobj);
3733 	struct drm_device *ddev = dev_get_drvdata(dev);
3734 	struct amdgpu_device *adev = drm_to_adev(ddev);
3735 
3736 	adev->psp.vbflash_done = false;
3737 
3738 	/* Safeguard against memory drain */
3739 	if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
3740 		dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
3741 		kvfree(adev->psp.vbflash_tmp_buf);
3742 		adev->psp.vbflash_tmp_buf = NULL;
3743 		adev->psp.vbflash_image_size = 0;
3744 		return -ENOMEM;
3745 	}
3746 
3747 	/* TODO Just allocate max for now and optimize to realloc later if needed */
3748 	if (!adev->psp.vbflash_tmp_buf) {
3749 		adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
3750 		if (!adev->psp.vbflash_tmp_buf)
3751 			return -ENOMEM;
3752 	}
3753 
3754 	mutex_lock(&adev->psp.mutex);
3755 	memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
3756 	adev->psp.vbflash_image_size += count;
3757 	mutex_unlock(&adev->psp.mutex);
3758 
3759 	dev_dbg(adev->dev, "IFWI staged for update\n");
3760 
3761 	return count;
3762 }
3763 
3764 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
3765 				       struct bin_attribute *bin_attr, char *buffer,
3766 				       loff_t pos, size_t count)
3767 {
3768 	struct device *dev = kobj_to_dev(kobj);
3769 	struct drm_device *ddev = dev_get_drvdata(dev);
3770 	struct amdgpu_device *adev = drm_to_adev(ddev);
3771 	struct amdgpu_bo *fw_buf_bo = NULL;
3772 	uint64_t fw_pri_mc_addr;
3773 	void *fw_pri_cpu_addr;
3774 	int ret;
3775 
3776 	if (adev->psp.vbflash_image_size == 0)
3777 		return -EINVAL;
3778 
3779 	dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
3780 
3781 	ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
3782 					AMDGPU_GPU_PAGE_SIZE,
3783 					AMDGPU_GEM_DOMAIN_VRAM,
3784 					&fw_buf_bo,
3785 					&fw_pri_mc_addr,
3786 					&fw_pri_cpu_addr);
3787 	if (ret)
3788 		goto rel_buf;
3789 
3790 	memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
3791 
3792 	mutex_lock(&adev->psp.mutex);
3793 	ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
3794 	mutex_unlock(&adev->psp.mutex);
3795 
3796 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3797 
3798 rel_buf:
3799 	kvfree(adev->psp.vbflash_tmp_buf);
3800 	adev->psp.vbflash_tmp_buf = NULL;
3801 	adev->psp.vbflash_image_size = 0;
3802 
3803 	if (ret) {
3804 		dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
3805 		return ret;
3806 	}
3807 
3808 	dev_dbg(adev->dev, "PSP IFWI flash process done\n");
3809 	return 0;
3810 }
3811 
3812 /**
3813  * DOC: psp_vbflash
3814  * Writing to this file will stage an IFWI for update. Reading from this file
3815  * will trigger the update process.
3816  */
3817 static struct bin_attribute psp_vbflash_bin_attr = {
3818 	.attr = {.name = "psp_vbflash", .mode = 0660},
3819 	.size = 0,
3820 	.write = amdgpu_psp_vbflash_write,
3821 	.read = amdgpu_psp_vbflash_read,
3822 };
3823 
3824 /**
3825  * DOC: psp_vbflash_status
3826  * The status of the flash process.
3827  * 0: IFWI flash not complete.
3828  * 1: IFWI flash complete.
3829  */
3830 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
3831 					 struct device_attribute *attr,
3832 					 char *buf)
3833 {
3834 	struct drm_device *ddev = dev_get_drvdata(dev);
3835 	struct amdgpu_device *adev = drm_to_adev(ddev);
3836 	uint32_t vbflash_status;
3837 
3838 	vbflash_status = psp_vbflash_status(&adev->psp);
3839 	if (!adev->psp.vbflash_done)
3840 		vbflash_status = 0;
3841 	else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
3842 		vbflash_status = 1;
3843 
3844 	return sysfs_emit(buf, "0x%x\n", vbflash_status);
3845 }
3846 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
3847 
3848 static struct bin_attribute *bin_flash_attrs[] = {
3849 	&psp_vbflash_bin_attr,
3850 	NULL
3851 };
3852 
3853 static struct attribute *flash_attrs[] = {
3854 	&dev_attr_psp_vbflash_status.attr,
3855 	&dev_attr_usbc_pd_fw.attr,
3856 	NULL
3857 };
3858 
3859 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
3860 {
3861 	struct device *dev = kobj_to_dev(kobj);
3862 	struct drm_device *ddev = dev_get_drvdata(dev);
3863 	struct amdgpu_device *adev = drm_to_adev(ddev);
3864 
3865 	if (attr == &dev_attr_usbc_pd_fw.attr)
3866 		return adev->psp.sup_pd_fw_up ? 0660 : 0;
3867 
3868 	return adev->psp.sup_ifwi_up ? 0440 : 0;
3869 }
3870 
3871 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
3872 						struct bin_attribute *attr,
3873 						int idx)
3874 {
3875 	struct device *dev = kobj_to_dev(kobj);
3876 	struct drm_device *ddev = dev_get_drvdata(dev);
3877 	struct amdgpu_device *adev = drm_to_adev(ddev);
3878 
3879 	return adev->psp.sup_ifwi_up ? 0660 : 0;
3880 }
3881 
3882 const struct attribute_group amdgpu_flash_attr_group = {
3883 	.attrs = flash_attrs,
3884 	.bin_attrs = bin_flash_attrs,
3885 	.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
3886 	.is_visible = amdgpu_flash_attr_is_visible,
3887 };
3888 
3889 const struct amd_ip_funcs psp_ip_funcs = {
3890 	.name = "psp",
3891 	.early_init = psp_early_init,
3892 	.late_init = NULL,
3893 	.sw_init = psp_sw_init,
3894 	.sw_fini = psp_sw_fini,
3895 	.hw_init = psp_hw_init,
3896 	.hw_fini = psp_hw_fini,
3897 	.suspend = psp_suspend,
3898 	.resume = psp_resume,
3899 	.is_idle = NULL,
3900 	.check_soft_reset = NULL,
3901 	.wait_for_idle = NULL,
3902 	.soft_reset = NULL,
3903 	.set_clockgating_state = psp_set_clockgating_state,
3904 	.set_powergating_state = psp_set_powergating_state,
3905 };
3906 
3907 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
3908 	.type = AMD_IP_BLOCK_TYPE_PSP,
3909 	.major = 3,
3910 	.minor = 1,
3911 	.rev = 0,
3912 	.funcs = &psp_ip_funcs,
3913 };
3914 
3915 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
3916 	.type = AMD_IP_BLOCK_TYPE_PSP,
3917 	.major = 10,
3918 	.minor = 0,
3919 	.rev = 0,
3920 	.funcs = &psp_ip_funcs,
3921 };
3922 
3923 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
3924 	.type = AMD_IP_BLOCK_TYPE_PSP,
3925 	.major = 11,
3926 	.minor = 0,
3927 	.rev = 0,
3928 	.funcs = &psp_ip_funcs,
3929 };
3930 
3931 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
3932 	.type = AMD_IP_BLOCK_TYPE_PSP,
3933 	.major = 11,
3934 	.minor = 0,
3935 	.rev = 8,
3936 	.funcs = &psp_ip_funcs,
3937 };
3938 
3939 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
3940 	.type = AMD_IP_BLOCK_TYPE_PSP,
3941 	.major = 12,
3942 	.minor = 0,
3943 	.rev = 0,
3944 	.funcs = &psp_ip_funcs,
3945 };
3946 
3947 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
3948 	.type = AMD_IP_BLOCK_TYPE_PSP,
3949 	.major = 13,
3950 	.minor = 0,
3951 	.rev = 0,
3952 	.funcs = &psp_ip_funcs,
3953 };
3954 
3955 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
3956 	.type = AMD_IP_BLOCK_TYPE_PSP,
3957 	.major = 13,
3958 	.minor = 0,
3959 	.rev = 4,
3960 	.funcs = &psp_ip_funcs,
3961 };
3962