xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c (revision f7543209ce5dc09e3f5a27a7d4ee53e226283719)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42 
43 #include "amdgpu_ras.h"
44 #include "amdgpu_securedisplay.h"
45 #include "amdgpu_atomfirmware.h"
46 
47 #define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*3)
48 
49 static int psp_load_smu_fw(struct psp_context *psp);
50 static int psp_rap_terminate(struct psp_context *psp);
51 static int psp_securedisplay_terminate(struct psp_context *psp);
52 
53 static int psp_ring_init(struct psp_context *psp,
54 			 enum psp_ring_type ring_type)
55 {
56 	int ret = 0;
57 	struct psp_ring *ring;
58 	struct amdgpu_device *adev = psp->adev;
59 
60 	ring = &psp->km_ring;
61 
62 	ring->ring_type = ring_type;
63 
64 	/* allocate 4k Page of Local Frame Buffer memory for ring */
65 	ring->ring_size = 0x1000;
66 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 				      AMDGPU_GEM_DOMAIN_VRAM |
68 				      AMDGPU_GEM_DOMAIN_GTT,
69 				      &adev->firmware.rbuf,
70 				      &ring->ring_mem_mc_addr,
71 				      (void **)&ring->ring_mem);
72 	if (ret) {
73 		ring->ring_size = 0;
74 		return ret;
75 	}
76 
77 	return 0;
78 }
79 
80 /*
81  * Due to DF Cstate management centralized to PMFW, the firmware
82  * loading sequence will be updated as below:
83  *   - Load KDB
84  *   - Load SYS_DRV
85  *   - Load tOS
86  *   - Load PMFW
87  *   - Setup TMR
88  *   - Load other non-psp fw
89  *   - Load ASD
90  *   - Load XGMI/RAS/HDCP/DTM TA if any
91  *
92  * This new sequence is required for
93  *   - Arcturus and onwards
94  */
95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96 {
97 	struct amdgpu_device *adev = psp->adev;
98 
99 	if (amdgpu_sriov_vf(adev)) {
100 		psp->pmfw_centralized_cstate_management = false;
101 		return;
102 	}
103 
104 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 	case IP_VERSION(11, 0, 0):
106 	case IP_VERSION(11, 0, 4):
107 	case IP_VERSION(11, 0, 5):
108 	case IP_VERSION(11, 0, 7):
109 	case IP_VERSION(11, 0, 9):
110 	case IP_VERSION(11, 0, 11):
111 	case IP_VERSION(11, 0, 12):
112 	case IP_VERSION(11, 0, 13):
113 	case IP_VERSION(13, 0, 0):
114 	case IP_VERSION(13, 0, 2):
115 	case IP_VERSION(13, 0, 7):
116 		psp->pmfw_centralized_cstate_management = true;
117 		break;
118 	default:
119 		psp->pmfw_centralized_cstate_management = false;
120 		break;
121 	}
122 }
123 
124 static int psp_init_sriov_microcode(struct psp_context *psp)
125 {
126 	struct amdgpu_device *adev = psp->adev;
127 	char ucode_prefix[30];
128 	int ret = 0;
129 
130 	amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131 
132 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 	case IP_VERSION(9, 0, 0):
134 	case IP_VERSION(11, 0, 7):
135 	case IP_VERSION(11, 0, 9):
136 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 		ret = psp_init_cap_microcode(psp, ucode_prefix);
138 		break;
139 	case IP_VERSION(13, 0, 2):
140 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 		ret = psp_init_cap_microcode(psp, ucode_prefix);
142 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 		break;
144 	case IP_VERSION(13, 0, 0):
145 		adev->virt.autoload_ucode_id = 0;
146 		break;
147 	case IP_VERSION(13, 0, 6):
148 	case IP_VERSION(13, 0, 14):
149 		ret = psp_init_cap_microcode(psp, ucode_prefix);
150 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
151 		break;
152 	case IP_VERSION(13, 0, 10):
153 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
154 		ret = psp_init_cap_microcode(psp, ucode_prefix);
155 		break;
156 	default:
157 		return -EINVAL;
158 	}
159 	return ret;
160 }
161 
162 static int psp_early_init(void *handle)
163 {
164 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
165 	struct psp_context *psp = &adev->psp;
166 
167 	psp->autoload_supported = true;
168 	psp->boot_time_tmr = true;
169 
170 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
171 	case IP_VERSION(9, 0, 0):
172 		psp_v3_1_set_psp_funcs(psp);
173 		psp->autoload_supported = false;
174 		psp->boot_time_tmr = false;
175 		break;
176 	case IP_VERSION(10, 0, 0):
177 	case IP_VERSION(10, 0, 1):
178 		psp_v10_0_set_psp_funcs(psp);
179 		psp->autoload_supported = false;
180 		psp->boot_time_tmr = false;
181 		break;
182 	case IP_VERSION(11, 0, 2):
183 	case IP_VERSION(11, 0, 4):
184 		psp_v11_0_set_psp_funcs(psp);
185 		psp->autoload_supported = false;
186 		psp->boot_time_tmr = false;
187 		break;
188 	case IP_VERSION(11, 0, 0):
189 	case IP_VERSION(11, 0, 7):
190 		adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
191 		fallthrough;
192 	case IP_VERSION(11, 0, 5):
193 	case IP_VERSION(11, 0, 9):
194 	case IP_VERSION(11, 0, 11):
195 	case IP_VERSION(11, 5, 0):
196 	case IP_VERSION(11, 0, 12):
197 	case IP_VERSION(11, 0, 13):
198 		psp_v11_0_set_psp_funcs(psp);
199 		psp->boot_time_tmr = false;
200 		break;
201 	case IP_VERSION(11, 0, 3):
202 	case IP_VERSION(12, 0, 1):
203 		psp_v12_0_set_psp_funcs(psp);
204 		psp->autoload_supported = false;
205 		psp->boot_time_tmr = false;
206 		break;
207 	case IP_VERSION(13, 0, 2):
208 		psp->boot_time_tmr = false;
209 		fallthrough;
210 	case IP_VERSION(13, 0, 6):
211 	case IP_VERSION(13, 0, 14):
212 		psp_v13_0_set_psp_funcs(psp);
213 		psp->autoload_supported = false;
214 		break;
215 	case IP_VERSION(13, 0, 1):
216 	case IP_VERSION(13, 0, 3):
217 	case IP_VERSION(13, 0, 5):
218 	case IP_VERSION(13, 0, 8):
219 	case IP_VERSION(13, 0, 11):
220 	case IP_VERSION(14, 0, 0):
221 	case IP_VERSION(14, 0, 1):
222 	case IP_VERSION(14, 0, 4):
223 		psp_v13_0_set_psp_funcs(psp);
224 		psp->boot_time_tmr = false;
225 		break;
226 	case IP_VERSION(11, 0, 8):
227 		if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
228 			psp_v11_0_8_set_psp_funcs(psp);
229 		}
230 		psp->autoload_supported = false;
231 		psp->boot_time_tmr = false;
232 		break;
233 	case IP_VERSION(13, 0, 0):
234 	case IP_VERSION(13, 0, 7):
235 	case IP_VERSION(13, 0, 10):
236 		psp_v13_0_set_psp_funcs(psp);
237 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
238 		psp->boot_time_tmr = false;
239 		break;
240 	case IP_VERSION(13, 0, 4):
241 		psp_v13_0_4_set_psp_funcs(psp);
242 		psp->boot_time_tmr = false;
243 		break;
244 	case IP_VERSION(14, 0, 2):
245 	case IP_VERSION(14, 0, 3):
246 		psp_v14_0_set_psp_funcs(psp);
247 		break;
248 	default:
249 		return -EINVAL;
250 	}
251 
252 	psp->adev = adev;
253 
254 	adev->psp_timeout = 20000;
255 
256 	psp_check_pmfw_centralized_cstate_management(psp);
257 
258 	if (amdgpu_sriov_vf(adev))
259 		return psp_init_sriov_microcode(psp);
260 	else
261 		return psp_init_microcode(psp);
262 }
263 
264 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
265 {
266 	amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
267 			      &mem_ctx->shared_buf);
268 	mem_ctx->shared_bo = NULL;
269 }
270 
271 static void psp_free_shared_bufs(struct psp_context *psp)
272 {
273 	void *tmr_buf;
274 	void **pptr;
275 
276 	/* free TMR memory buffer */
277 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
278 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
279 	psp->tmr_bo = NULL;
280 
281 	/* free xgmi shared memory */
282 	psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
283 
284 	/* free ras shared memory */
285 	psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
286 
287 	/* free hdcp shared memory */
288 	psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
289 
290 	/* free dtm shared memory */
291 	psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
292 
293 	/* free rap shared memory */
294 	psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
295 
296 	/* free securedisplay shared memory */
297 	psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
298 
299 
300 }
301 
302 static void psp_memory_training_fini(struct psp_context *psp)
303 {
304 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
305 
306 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
307 	kfree(ctx->sys_cache);
308 	ctx->sys_cache = NULL;
309 }
310 
311 static int psp_memory_training_init(struct psp_context *psp)
312 {
313 	int ret;
314 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
315 
316 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
317 		dev_dbg(psp->adev->dev, "memory training is not supported!\n");
318 		return 0;
319 	}
320 
321 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
322 	if (ctx->sys_cache == NULL) {
323 		dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
324 		ret = -ENOMEM;
325 		goto Err_out;
326 	}
327 
328 	dev_dbg(psp->adev->dev,
329 		"train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
330 		ctx->train_data_size,
331 		ctx->p2c_train_data_offset,
332 		ctx->c2p_train_data_offset);
333 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
334 	return 0;
335 
336 Err_out:
337 	psp_memory_training_fini(psp);
338 	return ret;
339 }
340 
341 /*
342  * Helper funciton to query psp runtime database entry
343  *
344  * @adev: amdgpu_device pointer
345  * @entry_type: the type of psp runtime database entry
346  * @db_entry: runtime database entry pointer
347  *
348  * Return false if runtime database doesn't exit or entry is invalid
349  * or true if the specific database entry is found, and copy to @db_entry
350  */
351 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
352 				     enum psp_runtime_entry_type entry_type,
353 				     void *db_entry)
354 {
355 	uint64_t db_header_pos, db_dir_pos;
356 	struct psp_runtime_data_header db_header = {0};
357 	struct psp_runtime_data_directory db_dir = {0};
358 	bool ret = false;
359 	int i;
360 
361 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
362 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
363 		return false;
364 
365 	db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
366 	db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
367 
368 	/* read runtime db header from vram */
369 	amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
370 			sizeof(struct psp_runtime_data_header), false);
371 
372 	if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
373 		/* runtime db doesn't exist, exit */
374 		dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
375 		return false;
376 	}
377 
378 	/* read runtime database entry from vram */
379 	amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
380 			sizeof(struct psp_runtime_data_directory), false);
381 
382 	if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
383 		/* invalid db entry count, exit */
384 		dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
385 		return false;
386 	}
387 
388 	/* look up for requested entry type */
389 	for (i = 0; i < db_dir.entry_count && !ret; i++) {
390 		if (db_dir.entry_list[i].entry_type == entry_type) {
391 			switch (entry_type) {
392 			case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
393 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
394 					/* invalid db entry size */
395 					dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
396 					return false;
397 				}
398 				/* read runtime database entry */
399 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
400 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
401 				ret = true;
402 				break;
403 			case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
404 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
405 					/* invalid db entry size */
406 					dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
407 					return false;
408 				}
409 				/* read runtime database entry */
410 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
411 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
412 				ret = true;
413 				break;
414 			default:
415 				ret = false;
416 				break;
417 			}
418 		}
419 	}
420 
421 	return ret;
422 }
423 
424 static int psp_sw_init(void *handle)
425 {
426 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
427 	struct psp_context *psp = &adev->psp;
428 	int ret;
429 	struct psp_runtime_boot_cfg_entry boot_cfg_entry;
430 	struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
431 	struct psp_runtime_scpm_entry scpm_entry;
432 
433 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
434 	if (!psp->cmd) {
435 		dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
436 		ret = -ENOMEM;
437 	}
438 
439 	adev->psp.xgmi_context.supports_extended_data =
440 		!adev->gmc.xgmi.connected_to_cpu &&
441 		amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
442 
443 	memset(&scpm_entry, 0, sizeof(scpm_entry));
444 	if ((psp_get_runtime_db_entry(adev,
445 				PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
446 				&scpm_entry)) &&
447 	    (scpm_entry.scpm_status != SCPM_DISABLE)) {
448 		adev->scpm_enabled = true;
449 		adev->scpm_status = scpm_entry.scpm_status;
450 	} else {
451 		adev->scpm_enabled = false;
452 		adev->scpm_status = SCPM_DISABLE;
453 	}
454 
455 	/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
456 
457 	memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
458 	if (psp_get_runtime_db_entry(adev,
459 				PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
460 				&boot_cfg_entry)) {
461 		psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
462 		if ((psp->boot_cfg_bitmask) &
463 		    BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
464 			/* If psp runtime database exists, then
465 			 * only enable two stage memory training
466 			 * when TWO_STAGE_DRAM_TRAINING bit is set
467 			 * in runtime database
468 			 */
469 			mem_training_ctx->enable_mem_training = true;
470 		}
471 
472 	} else {
473 		/* If psp runtime database doesn't exist or is
474 		 * invalid, force enable two stage memory training
475 		 */
476 		mem_training_ctx->enable_mem_training = true;
477 	}
478 
479 	if (mem_training_ctx->enable_mem_training) {
480 		ret = psp_memory_training_init(psp);
481 		if (ret) {
482 			dev_err(adev->dev, "Failed to initialize memory training!\n");
483 			return ret;
484 		}
485 
486 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
487 		if (ret) {
488 			dev_err(adev->dev, "Failed to process memory training!\n");
489 			return ret;
490 		}
491 	}
492 
493 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
494 				      (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
495 				      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
496 				      &psp->fw_pri_bo,
497 				      &psp->fw_pri_mc_addr,
498 				      &psp->fw_pri_buf);
499 	if (ret)
500 		return ret;
501 
502 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
503 				      AMDGPU_GEM_DOMAIN_VRAM |
504 				      AMDGPU_GEM_DOMAIN_GTT,
505 				      &psp->fence_buf_bo,
506 				      &psp->fence_buf_mc_addr,
507 				      &psp->fence_buf);
508 	if (ret)
509 		goto failed1;
510 
511 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
512 				      AMDGPU_GEM_DOMAIN_VRAM |
513 				      AMDGPU_GEM_DOMAIN_GTT,
514 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
515 				      (void **)&psp->cmd_buf_mem);
516 	if (ret)
517 		goto failed2;
518 
519 	return 0;
520 
521 failed2:
522 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
523 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
524 failed1:
525 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
526 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
527 	return ret;
528 }
529 
530 static int psp_sw_fini(void *handle)
531 {
532 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
533 	struct psp_context *psp = &adev->psp;
534 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
535 
536 	psp_memory_training_fini(psp);
537 
538 	amdgpu_ucode_release(&psp->sos_fw);
539 	amdgpu_ucode_release(&psp->asd_fw);
540 	amdgpu_ucode_release(&psp->ta_fw);
541 	amdgpu_ucode_release(&psp->cap_fw);
542 	amdgpu_ucode_release(&psp->toc_fw);
543 
544 	kfree(cmd);
545 	cmd = NULL;
546 
547 	psp_free_shared_bufs(psp);
548 
549 	if (psp->km_ring.ring_mem)
550 		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
551 				      &psp->km_ring.ring_mem_mc_addr,
552 				      (void **)&psp->km_ring.ring_mem);
553 
554 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
555 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
556 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
557 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
558 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
559 			      (void **)&psp->cmd_buf_mem);
560 
561 	return 0;
562 }
563 
564 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
565 		 uint32_t reg_val, uint32_t mask, bool check_changed)
566 {
567 	uint32_t val;
568 	int i;
569 	struct amdgpu_device *adev = psp->adev;
570 
571 	if (psp->adev->no_hw_access)
572 		return 0;
573 
574 	for (i = 0; i < adev->usec_timeout; i++) {
575 		val = RREG32(reg_index);
576 		if (check_changed) {
577 			if (val != reg_val)
578 				return 0;
579 		} else {
580 			if ((val & mask) == reg_val)
581 				return 0;
582 		}
583 		udelay(1);
584 	}
585 
586 	return -ETIME;
587 }
588 
589 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
590 			       uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
591 {
592 	uint32_t val;
593 	int i;
594 	struct amdgpu_device *adev = psp->adev;
595 
596 	if (psp->adev->no_hw_access)
597 		return 0;
598 
599 	for (i = 0; i < msec_timeout; i++) {
600 		val = RREG32(reg_index);
601 		if ((val & mask) == reg_val)
602 			return 0;
603 		msleep(1);
604 	}
605 
606 	return -ETIME;
607 }
608 
609 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
610 {
611 	switch (cmd_id) {
612 	case GFX_CMD_ID_LOAD_TA:
613 		return "LOAD_TA";
614 	case GFX_CMD_ID_UNLOAD_TA:
615 		return "UNLOAD_TA";
616 	case GFX_CMD_ID_INVOKE_CMD:
617 		return "INVOKE_CMD";
618 	case GFX_CMD_ID_LOAD_ASD:
619 		return "LOAD_ASD";
620 	case GFX_CMD_ID_SETUP_TMR:
621 		return "SETUP_TMR";
622 	case GFX_CMD_ID_LOAD_IP_FW:
623 		return "LOAD_IP_FW";
624 	case GFX_CMD_ID_DESTROY_TMR:
625 		return "DESTROY_TMR";
626 	case GFX_CMD_ID_SAVE_RESTORE:
627 		return "SAVE_RESTORE_IP_FW";
628 	case GFX_CMD_ID_SETUP_VMR:
629 		return "SETUP_VMR";
630 	case GFX_CMD_ID_DESTROY_VMR:
631 		return "DESTROY_VMR";
632 	case GFX_CMD_ID_PROG_REG:
633 		return "PROG_REG";
634 	case GFX_CMD_ID_GET_FW_ATTESTATION:
635 		return "GET_FW_ATTESTATION";
636 	case GFX_CMD_ID_LOAD_TOC:
637 		return "ID_LOAD_TOC";
638 	case GFX_CMD_ID_AUTOLOAD_RLC:
639 		return "AUTOLOAD_RLC";
640 	case GFX_CMD_ID_BOOT_CFG:
641 		return "BOOT_CFG";
642 	default:
643 		return "UNKNOWN CMD";
644 	}
645 }
646 
647 static bool psp_err_warn(struct psp_context *psp)
648 {
649 	struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
650 
651 	/* This response indicates reg list is already loaded */
652 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
653 	    cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
654 	    cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
655 	    cmd->resp.status == TEE_ERROR_CANCEL)
656 		return false;
657 
658 	return true;
659 }
660 
661 static int
662 psp_cmd_submit_buf(struct psp_context *psp,
663 		   struct amdgpu_firmware_info *ucode,
664 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
665 {
666 	int ret;
667 	int index;
668 	int timeout = psp->adev->psp_timeout;
669 	bool ras_intr = false;
670 	bool skip_unsupport = false;
671 
672 	if (psp->adev->no_hw_access)
673 		return 0;
674 
675 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
676 
677 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
678 
679 	index = atomic_inc_return(&psp->fence_value);
680 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
681 	if (ret) {
682 		atomic_dec(&psp->fence_value);
683 		goto exit;
684 	}
685 
686 	amdgpu_device_invalidate_hdp(psp->adev, NULL);
687 	while (*((unsigned int *)psp->fence_buf) != index) {
688 		if (--timeout == 0)
689 			break;
690 		/*
691 		 * Shouldn't wait for timeout when err_event_athub occurs,
692 		 * because gpu reset thread triggered and lock resource should
693 		 * be released for psp resume sequence.
694 		 */
695 		ras_intr = amdgpu_ras_intr_triggered();
696 		if (ras_intr)
697 			break;
698 		usleep_range(10, 100);
699 		amdgpu_device_invalidate_hdp(psp->adev, NULL);
700 	}
701 
702 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
703 	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
704 		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
705 
706 	memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
707 
708 	/* In some cases, psp response status is not 0 even there is no
709 	 * problem while the command is submitted. Some version of PSP FW
710 	 * doesn't write 0 to that field.
711 	 * So here we would like to only print a warning instead of an error
712 	 * during psp initialization to avoid breaking hw_init and it doesn't
713 	 * return -EINVAL.
714 	 */
715 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
716 		if (ucode)
717 			dev_warn(psp->adev->dev,
718 				 "failed to load ucode %s(0x%X) ",
719 				 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
720 		if (psp_err_warn(psp))
721 			dev_warn(
722 				psp->adev->dev,
723 				"psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
724 				psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
725 				psp->cmd_buf_mem->cmd_id,
726 				psp->cmd_buf_mem->resp.status);
727 		/* If any firmware (including CAP) load fails under SRIOV, it should
728 		 * return failure to stop the VF from initializing.
729 		 * Also return failure in case of timeout
730 		 */
731 		if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
732 			ret = -EINVAL;
733 			goto exit;
734 		}
735 	}
736 
737 	if (ucode) {
738 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
739 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
740 	}
741 
742 exit:
743 	return ret;
744 }
745 
746 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
747 {
748 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
749 
750 	mutex_lock(&psp->mutex);
751 
752 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
753 
754 	return cmd;
755 }
756 
757 static void release_psp_cmd_buf(struct psp_context *psp)
758 {
759 	mutex_unlock(&psp->mutex);
760 }
761 
762 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
763 				 struct psp_gfx_cmd_resp *cmd,
764 				 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
765 {
766 	struct amdgpu_device *adev = psp->adev;
767 	uint32_t size = 0;
768 	uint64_t tmr_pa = 0;
769 
770 	if (tmr_bo) {
771 		size = amdgpu_bo_size(tmr_bo);
772 		tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
773 	}
774 
775 	if (amdgpu_sriov_vf(psp->adev))
776 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
777 	else
778 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
779 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
780 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
781 	cmd->cmd.cmd_setup_tmr.buf_size = size;
782 	cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
783 	cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
784 	cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
785 }
786 
787 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
788 				      uint64_t pri_buf_mc, uint32_t size)
789 {
790 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
791 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
792 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
793 	cmd->cmd.cmd_load_toc.toc_size = size;
794 }
795 
796 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
797 static int psp_load_toc(struct psp_context *psp,
798 			uint32_t *tmr_size)
799 {
800 	int ret;
801 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
802 
803 	/* Copy toc to psp firmware private buffer */
804 	psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
805 
806 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
807 
808 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
809 				 psp->fence_buf_mc_addr);
810 	if (!ret)
811 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
812 
813 	release_psp_cmd_buf(psp);
814 
815 	return ret;
816 }
817 
818 /* Set up Trusted Memory Region */
819 static int psp_tmr_init(struct psp_context *psp)
820 {
821 	int ret = 0;
822 	int tmr_size;
823 	void *tmr_buf;
824 	void **pptr;
825 
826 	/*
827 	 * According to HW engineer, they prefer the TMR address be "naturally
828 	 * aligned" , e.g. the start address be an integer divide of TMR size.
829 	 *
830 	 * Note: this memory need be reserved till the driver
831 	 * uninitializes.
832 	 */
833 	tmr_size = PSP_TMR_SIZE(psp->adev);
834 
835 	/* For ASICs support RLC autoload, psp will parse the toc
836 	 * and calculate the total size of TMR needed
837 	 */
838 	if (!amdgpu_sriov_vf(psp->adev) &&
839 	    psp->toc.start_addr &&
840 	    psp->toc.size_bytes &&
841 	    psp->fw_pri_buf) {
842 		ret = psp_load_toc(psp, &tmr_size);
843 		if (ret) {
844 			dev_err(psp->adev->dev, "Failed to load toc\n");
845 			return ret;
846 		}
847 	}
848 
849 	if (!psp->tmr_bo && !psp->boot_time_tmr) {
850 		pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
851 		ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
852 					      PSP_TMR_ALIGNMENT,
853 					      AMDGPU_HAS_VRAM(psp->adev) ?
854 					      AMDGPU_GEM_DOMAIN_VRAM :
855 					      AMDGPU_GEM_DOMAIN_GTT,
856 					      &psp->tmr_bo, &psp->tmr_mc_addr,
857 					      pptr);
858 	}
859 
860 	return ret;
861 }
862 
863 static bool psp_skip_tmr(struct psp_context *psp)
864 {
865 	switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
866 	case IP_VERSION(11, 0, 9):
867 	case IP_VERSION(11, 0, 7):
868 	case IP_VERSION(13, 0, 2):
869 	case IP_VERSION(13, 0, 6):
870 	case IP_VERSION(13, 0, 10):
871 	case IP_VERSION(13, 0, 14):
872 		return true;
873 	default:
874 		return false;
875 	}
876 }
877 
878 static int psp_tmr_load(struct psp_context *psp)
879 {
880 	int ret;
881 	struct psp_gfx_cmd_resp *cmd;
882 
883 	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
884 	 * Already set up by host driver.
885 	 */
886 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
887 		return 0;
888 
889 	cmd = acquire_psp_cmd_buf(psp);
890 
891 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
892 	if (psp->tmr_bo)
893 		dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
894 			 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
895 
896 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
897 				 psp->fence_buf_mc_addr);
898 
899 	release_psp_cmd_buf(psp);
900 
901 	return ret;
902 }
903 
904 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
905 					struct psp_gfx_cmd_resp *cmd)
906 {
907 	if (amdgpu_sriov_vf(psp->adev))
908 		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
909 	else
910 		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
911 }
912 
913 static int psp_tmr_unload(struct psp_context *psp)
914 {
915 	int ret;
916 	struct psp_gfx_cmd_resp *cmd;
917 
918 	/* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
919 	 * as TMR is not loaded at all
920 	 */
921 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
922 		return 0;
923 
924 	cmd = acquire_psp_cmd_buf(psp);
925 
926 	psp_prep_tmr_unload_cmd_buf(psp, cmd);
927 	dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
928 
929 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
930 				 psp->fence_buf_mc_addr);
931 
932 	release_psp_cmd_buf(psp);
933 
934 	return ret;
935 }
936 
937 static int psp_tmr_terminate(struct psp_context *psp)
938 {
939 	return psp_tmr_unload(psp);
940 }
941 
942 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
943 					uint64_t *output_ptr)
944 {
945 	int ret;
946 	struct psp_gfx_cmd_resp *cmd;
947 
948 	if (!output_ptr)
949 		return -EINVAL;
950 
951 	if (amdgpu_sriov_vf(psp->adev))
952 		return 0;
953 
954 	cmd = acquire_psp_cmd_buf(psp);
955 
956 	cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
957 
958 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
959 				 psp->fence_buf_mc_addr);
960 
961 	if (!ret) {
962 		*output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
963 			      ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
964 	}
965 
966 	release_psp_cmd_buf(psp);
967 
968 	return ret;
969 }
970 
971 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
972 {
973 	struct psp_context *psp = &adev->psp;
974 	struct psp_gfx_cmd_resp *cmd;
975 	int ret;
976 
977 	if (amdgpu_sriov_vf(adev))
978 		return 0;
979 
980 	cmd = acquire_psp_cmd_buf(psp);
981 
982 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
983 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
984 
985 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
986 	if (!ret) {
987 		*boot_cfg =
988 			(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
989 	}
990 
991 	release_psp_cmd_buf(psp);
992 
993 	return ret;
994 }
995 
996 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
997 {
998 	int ret;
999 	struct psp_context *psp = &adev->psp;
1000 	struct psp_gfx_cmd_resp *cmd;
1001 
1002 	if (amdgpu_sriov_vf(adev))
1003 		return 0;
1004 
1005 	cmd = acquire_psp_cmd_buf(psp);
1006 
1007 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1008 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1009 	cmd->cmd.boot_cfg.boot_config = boot_cfg;
1010 	cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1011 
1012 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1013 
1014 	release_psp_cmd_buf(psp);
1015 
1016 	return ret;
1017 }
1018 
1019 static int psp_rl_load(struct amdgpu_device *adev)
1020 {
1021 	int ret;
1022 	struct psp_context *psp = &adev->psp;
1023 	struct psp_gfx_cmd_resp *cmd;
1024 
1025 	if (!is_psp_fw_valid(psp->rl))
1026 		return 0;
1027 
1028 	cmd = acquire_psp_cmd_buf(psp);
1029 
1030 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1031 	memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1032 
1033 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1034 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1035 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1036 	cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1037 	cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1038 
1039 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1040 
1041 	release_psp_cmd_buf(psp);
1042 
1043 	return ret;
1044 }
1045 
1046 int psp_spatial_partition(struct psp_context *psp, int mode)
1047 {
1048 	struct psp_gfx_cmd_resp *cmd;
1049 	int ret;
1050 
1051 	if (amdgpu_sriov_vf(psp->adev))
1052 		return 0;
1053 
1054 	cmd = acquire_psp_cmd_buf(psp);
1055 
1056 	cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1057 	cmd->cmd.cmd_spatial_part.mode = mode;
1058 
1059 	dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1060 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1061 
1062 	release_psp_cmd_buf(psp);
1063 
1064 	return ret;
1065 }
1066 
1067 static int psp_asd_initialize(struct psp_context *psp)
1068 {
1069 	int ret;
1070 
1071 	/* If PSP version doesn't match ASD version, asd loading will be failed.
1072 	 * add workaround to bypass it for sriov now.
1073 	 * TODO: add version check to make it common
1074 	 */
1075 	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1076 		return 0;
1077 
1078 	/* bypass asd if display hardware is not available */
1079 	if (!amdgpu_device_has_display_hardware(psp->adev) &&
1080 	    amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1081 		return 0;
1082 
1083 	psp->asd_context.mem_context.shared_mc_addr  = 0;
1084 	psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1085 	psp->asd_context.ta_load_type                = GFX_CMD_ID_LOAD_ASD;
1086 
1087 	ret = psp_ta_load(psp, &psp->asd_context);
1088 	if (!ret)
1089 		psp->asd_context.initialized = true;
1090 
1091 	return ret;
1092 }
1093 
1094 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1095 				       uint32_t session_id)
1096 {
1097 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1098 	cmd->cmd.cmd_unload_ta.session_id = session_id;
1099 }
1100 
1101 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1102 {
1103 	int ret;
1104 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1105 
1106 	psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1107 
1108 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1109 
1110 	context->resp_status = cmd->resp.status;
1111 
1112 	release_psp_cmd_buf(psp);
1113 
1114 	return ret;
1115 }
1116 
1117 static int psp_asd_terminate(struct psp_context *psp)
1118 {
1119 	int ret;
1120 
1121 	if (amdgpu_sriov_vf(psp->adev))
1122 		return 0;
1123 
1124 	if (!psp->asd_context.initialized)
1125 		return 0;
1126 
1127 	ret = psp_ta_unload(psp, &psp->asd_context);
1128 	if (!ret)
1129 		psp->asd_context.initialized = false;
1130 
1131 	return ret;
1132 }
1133 
1134 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1135 		uint32_t id, uint32_t value)
1136 {
1137 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1138 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1139 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1140 }
1141 
1142 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1143 		uint32_t value)
1144 {
1145 	struct psp_gfx_cmd_resp *cmd;
1146 	int ret = 0;
1147 
1148 	if (reg >= PSP_REG_LAST)
1149 		return -EINVAL;
1150 
1151 	cmd = acquire_psp_cmd_buf(psp);
1152 
1153 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1154 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1155 	if (ret)
1156 		dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1157 
1158 	release_psp_cmd_buf(psp);
1159 
1160 	return ret;
1161 }
1162 
1163 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1164 				     uint64_t ta_bin_mc,
1165 				     struct ta_context *context)
1166 {
1167 	cmd->cmd_id				= context->ta_load_type;
1168 	cmd->cmd.cmd_load_ta.app_phy_addr_lo	= lower_32_bits(ta_bin_mc);
1169 	cmd->cmd.cmd_load_ta.app_phy_addr_hi	= upper_32_bits(ta_bin_mc);
1170 	cmd->cmd.cmd_load_ta.app_len		= context->bin_desc.size_bytes;
1171 
1172 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1173 		lower_32_bits(context->mem_context.shared_mc_addr);
1174 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1175 		upper_32_bits(context->mem_context.shared_mc_addr);
1176 	cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1177 }
1178 
1179 int psp_ta_init_shared_buf(struct psp_context *psp,
1180 				  struct ta_mem_context *mem_ctx)
1181 {
1182 	/*
1183 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1184 	 * physical) for ta to host memory
1185 	 */
1186 	return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1187 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1188 				      AMDGPU_GEM_DOMAIN_GTT,
1189 				      &mem_ctx->shared_bo,
1190 				      &mem_ctx->shared_mc_addr,
1191 				      &mem_ctx->shared_buf);
1192 }
1193 
1194 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1195 				       uint32_t ta_cmd_id,
1196 				       uint32_t session_id)
1197 {
1198 	cmd->cmd_id				= GFX_CMD_ID_INVOKE_CMD;
1199 	cmd->cmd.cmd_invoke_cmd.session_id	= session_id;
1200 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id	= ta_cmd_id;
1201 }
1202 
1203 int psp_ta_invoke(struct psp_context *psp,
1204 		  uint32_t ta_cmd_id,
1205 		  struct ta_context *context)
1206 {
1207 	int ret;
1208 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1209 
1210 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1211 
1212 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1213 				 psp->fence_buf_mc_addr);
1214 
1215 	context->resp_status = cmd->resp.status;
1216 
1217 	release_psp_cmd_buf(psp);
1218 
1219 	return ret;
1220 }
1221 
1222 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1223 {
1224 	int ret;
1225 	struct psp_gfx_cmd_resp *cmd;
1226 
1227 	cmd = acquire_psp_cmd_buf(psp);
1228 
1229 	psp_copy_fw(psp, context->bin_desc.start_addr,
1230 		    context->bin_desc.size_bytes);
1231 
1232 	psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1233 
1234 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1235 				 psp->fence_buf_mc_addr);
1236 
1237 	context->resp_status = cmd->resp.status;
1238 
1239 	if (!ret)
1240 		context->session_id = cmd->resp.session_id;
1241 
1242 	release_psp_cmd_buf(psp);
1243 
1244 	return ret;
1245 }
1246 
1247 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1248 {
1249 	return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1250 }
1251 
1252 int psp_xgmi_terminate(struct psp_context *psp)
1253 {
1254 	int ret;
1255 	struct amdgpu_device *adev = psp->adev;
1256 
1257 	/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1258 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1259 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1260 	     adev->gmc.xgmi.connected_to_cpu))
1261 		return 0;
1262 
1263 	if (!psp->xgmi_context.context.initialized)
1264 		return 0;
1265 
1266 	ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1267 
1268 	psp->xgmi_context.context.initialized = false;
1269 
1270 	return ret;
1271 }
1272 
1273 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1274 {
1275 	struct ta_xgmi_shared_memory *xgmi_cmd;
1276 	int ret;
1277 
1278 	if (!psp->ta_fw ||
1279 	    !psp->xgmi_context.context.bin_desc.size_bytes ||
1280 	    !psp->xgmi_context.context.bin_desc.start_addr)
1281 		return -ENOENT;
1282 
1283 	if (!load_ta)
1284 		goto invoke;
1285 
1286 	psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1287 	psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1288 
1289 	if (!psp->xgmi_context.context.mem_context.shared_buf) {
1290 		ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1291 		if (ret)
1292 			return ret;
1293 	}
1294 
1295 	/* Load XGMI TA */
1296 	ret = psp_ta_load(psp, &psp->xgmi_context.context);
1297 	if (!ret)
1298 		psp->xgmi_context.context.initialized = true;
1299 	else
1300 		return ret;
1301 
1302 invoke:
1303 	/* Initialize XGMI session */
1304 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1305 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1306 	xgmi_cmd->flag_extend_link_record = set_extended_data;
1307 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1308 
1309 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1310 	/* note down the capbility flag for XGMI TA */
1311 	psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1312 
1313 	return ret;
1314 }
1315 
1316 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1317 {
1318 	struct ta_xgmi_shared_memory *xgmi_cmd;
1319 	int ret;
1320 
1321 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1322 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1323 
1324 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1325 
1326 	/* Invoke xgmi ta to get hive id */
1327 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1328 	if (ret)
1329 		return ret;
1330 
1331 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1332 
1333 	return 0;
1334 }
1335 
1336 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1337 {
1338 	struct ta_xgmi_shared_memory *xgmi_cmd;
1339 	int ret;
1340 
1341 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1342 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1343 
1344 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1345 
1346 	/* Invoke xgmi ta to get the node id */
1347 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1348 	if (ret)
1349 		return ret;
1350 
1351 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1352 
1353 	return 0;
1354 }
1355 
1356 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1357 {
1358 	return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1359 			IP_VERSION(13, 0, 2) &&
1360 		psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1361 	       amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1362 		       IP_VERSION(13, 0, 6);
1363 }
1364 
1365 /*
1366  * Chips that support extended topology information require the driver to
1367  * reflect topology information in the opposite direction.  This is
1368  * because the TA has already exceeded its link record limit and if the
1369  * TA holds bi-directional information, the driver would have to do
1370  * multiple fetches instead of just two.
1371  */
1372 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1373 					struct psp_xgmi_node_info node_info)
1374 {
1375 	struct amdgpu_device *mirror_adev;
1376 	struct amdgpu_hive_info *hive;
1377 	uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1378 	uint64_t dst_node_id = node_info.node_id;
1379 	uint8_t dst_num_hops = node_info.num_hops;
1380 	uint8_t dst_num_links = node_info.num_links;
1381 
1382 	hive = amdgpu_get_xgmi_hive(psp->adev);
1383 	if (WARN_ON(!hive))
1384 		return;
1385 
1386 	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1387 		struct psp_xgmi_topology_info *mirror_top_info;
1388 		int j;
1389 
1390 		if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1391 			continue;
1392 
1393 		mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1394 		for (j = 0; j < mirror_top_info->num_nodes; j++) {
1395 			if (mirror_top_info->nodes[j].node_id != src_node_id)
1396 				continue;
1397 
1398 			mirror_top_info->nodes[j].num_hops = dst_num_hops;
1399 			/*
1400 			 * prevent 0 num_links value re-reflection since reflection
1401 			 * criteria is based on num_hops (direct or indirect).
1402 			 *
1403 			 */
1404 			if (dst_num_links)
1405 				mirror_top_info->nodes[j].num_links = dst_num_links;
1406 
1407 			break;
1408 		}
1409 
1410 		break;
1411 	}
1412 
1413 	amdgpu_put_xgmi_hive(hive);
1414 }
1415 
1416 int psp_xgmi_get_topology_info(struct psp_context *psp,
1417 			       int number_devices,
1418 			       struct psp_xgmi_topology_info *topology,
1419 			       bool get_extended_data)
1420 {
1421 	struct ta_xgmi_shared_memory *xgmi_cmd;
1422 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1423 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1424 	int i;
1425 	int ret;
1426 
1427 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1428 		return -EINVAL;
1429 
1430 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1431 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1432 	xgmi_cmd->flag_extend_link_record = get_extended_data;
1433 
1434 	/* Fill in the shared memory with topology information as input */
1435 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1436 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1437 	topology_info_input->num_nodes = number_devices;
1438 
1439 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1440 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1441 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1442 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1443 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1444 	}
1445 
1446 	/* Invoke xgmi ta to get the topology information */
1447 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1448 	if (ret)
1449 		return ret;
1450 
1451 	/* Read the output topology information from the shared memory */
1452 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1453 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1454 	for (i = 0; i < topology->num_nodes; i++) {
1455 		/* extended data will either be 0 or equal to non-extended data */
1456 		if (topology_info_output->nodes[i].num_hops)
1457 			topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1458 
1459 		/* non-extended data gets everything here so no need to update */
1460 		if (!get_extended_data) {
1461 			topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1462 			topology->nodes[i].is_sharing_enabled =
1463 					topology_info_output->nodes[i].is_sharing_enabled;
1464 			topology->nodes[i].sdma_engine =
1465 					topology_info_output->nodes[i].sdma_engine;
1466 		}
1467 
1468 	}
1469 
1470 	/* Invoke xgmi ta again to get the link information */
1471 	if (psp_xgmi_peer_link_info_supported(psp)) {
1472 		struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1473 		struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1474 		bool requires_reflection =
1475 			(psp->xgmi_context.supports_extended_data &&
1476 			 get_extended_data) ||
1477 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1478 				IP_VERSION(13, 0, 6) ||
1479 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1480 				IP_VERSION(13, 0, 14);
1481 		bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1482 				psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1483 
1484 		/* popluate the shared output buffer rather than the cmd input buffer
1485 		 * with node_ids as the input for GET_PEER_LINKS command execution.
1486 		 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1487 		 * The same requirement for GET_EXTEND_PEER_LINKS command.
1488 		 */
1489 		if (ta_port_num_support) {
1490 			link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1491 
1492 			for (i = 0; i < topology->num_nodes; i++)
1493 				link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1494 
1495 			link_extend_info_output->num_nodes = topology->num_nodes;
1496 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1497 		} else {
1498 			link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1499 
1500 			for (i = 0; i < topology->num_nodes; i++)
1501 				link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1502 
1503 			link_info_output->num_nodes = topology->num_nodes;
1504 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1505 		}
1506 
1507 		ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1508 		if (ret)
1509 			return ret;
1510 
1511 		for (i = 0; i < topology->num_nodes; i++) {
1512 			uint8_t node_num_links = ta_port_num_support ?
1513 				link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1514 			/* accumulate num_links on extended data */
1515 			if (get_extended_data) {
1516 				topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1517 			} else {
1518 				topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1519 								topology->nodes[i].num_links : node_num_links;
1520 			}
1521 			/* popluate the connected port num info if supported and available */
1522 			if (ta_port_num_support && topology->nodes[i].num_links) {
1523 				memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1524 				       sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1525 			}
1526 
1527 			/* reflect the topology information for bi-directionality */
1528 			if (requires_reflection && topology->nodes[i].num_hops)
1529 				psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1530 		}
1531 	}
1532 
1533 	return 0;
1534 }
1535 
1536 int psp_xgmi_set_topology_info(struct psp_context *psp,
1537 			       int number_devices,
1538 			       struct psp_xgmi_topology_info *topology)
1539 {
1540 	struct ta_xgmi_shared_memory *xgmi_cmd;
1541 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1542 	int i;
1543 
1544 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1545 		return -EINVAL;
1546 
1547 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1548 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1549 
1550 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1551 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1552 	topology_info_input->num_nodes = number_devices;
1553 
1554 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1555 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1556 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1557 		topology_info_input->nodes[i].is_sharing_enabled = 1;
1558 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1559 	}
1560 
1561 	/* Invoke xgmi ta to set topology information */
1562 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1563 }
1564 
1565 // ras begin
1566 static void psp_ras_ta_check_status(struct psp_context *psp)
1567 {
1568 	struct ta_ras_shared_memory *ras_cmd =
1569 		(struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1570 
1571 	switch (ras_cmd->ras_status) {
1572 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1573 		dev_warn(psp->adev->dev,
1574 			 "RAS WARNING: cmd failed due to unsupported ip\n");
1575 		break;
1576 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1577 		dev_warn(psp->adev->dev,
1578 			 "RAS WARNING: cmd failed due to unsupported error injection\n");
1579 		break;
1580 	case TA_RAS_STATUS__SUCCESS:
1581 		break;
1582 	case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1583 		if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1584 			dev_warn(psp->adev->dev,
1585 				 "RAS WARNING: Inject error to critical region is not allowed\n");
1586 		break;
1587 	default:
1588 		dev_warn(psp->adev->dev,
1589 			 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1590 		break;
1591 	}
1592 }
1593 
1594 static int psp_ras_send_cmd(struct psp_context *psp,
1595 		enum ras_command cmd_id, void *in, void *out)
1596 {
1597 	struct ta_ras_shared_memory *ras_cmd;
1598 	uint32_t cmd = cmd_id;
1599 	int ret = 0;
1600 
1601 	if (!in)
1602 		return -EINVAL;
1603 
1604 	mutex_lock(&psp->ras_context.mutex);
1605 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1606 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1607 
1608 	switch (cmd) {
1609 	case TA_RAS_COMMAND__ENABLE_FEATURES:
1610 	case TA_RAS_COMMAND__DISABLE_FEATURES:
1611 		memcpy(&ras_cmd->ras_in_message,
1612 			in, sizeof(ras_cmd->ras_in_message));
1613 		break;
1614 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1615 		memcpy(&ras_cmd->ras_in_message.trigger_error,
1616 			in, sizeof(ras_cmd->ras_in_message.trigger_error));
1617 		break;
1618 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1619 		memcpy(&ras_cmd->ras_in_message.address,
1620 			in, sizeof(ras_cmd->ras_in_message.address));
1621 		break;
1622 	default:
1623 		dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1624 		ret = -EINVAL;
1625 		goto err_out;
1626 	}
1627 
1628 	ras_cmd->cmd_id = cmd;
1629 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1630 
1631 	switch (cmd) {
1632 	case TA_RAS_COMMAND__TRIGGER_ERROR:
1633 		if (ret || psp->cmd_buf_mem->resp.status)
1634 			ret = -EINVAL;
1635 		else if (out)
1636 			memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1637 		break;
1638 	case TA_RAS_COMMAND__QUERY_ADDRESS:
1639 		if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1640 			ret = -EINVAL;
1641 		else if (out)
1642 			memcpy(out,
1643 				&ras_cmd->ras_out_message.address,
1644 				sizeof(ras_cmd->ras_out_message.address));
1645 		break;
1646 	default:
1647 		break;
1648 	}
1649 
1650 err_out:
1651 	mutex_unlock(&psp->ras_context.mutex);
1652 
1653 	return ret;
1654 }
1655 
1656 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1657 {
1658 	struct ta_ras_shared_memory *ras_cmd;
1659 	int ret;
1660 
1661 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1662 
1663 	/*
1664 	 * TODO: bypass the loading in sriov for now
1665 	 */
1666 	if (amdgpu_sriov_vf(psp->adev))
1667 		return 0;
1668 
1669 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1670 
1671 	if (amdgpu_ras_intr_triggered())
1672 		return ret;
1673 
1674 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1675 		dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1676 		return -EINVAL;
1677 	}
1678 
1679 	if (!ret) {
1680 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1681 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1682 
1683 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1684 		} else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1685 			dev_warn(psp->adev->dev,
1686 				 "RAS internal register access blocked\n");
1687 
1688 		psp_ras_ta_check_status(psp);
1689 	}
1690 
1691 	return ret;
1692 }
1693 
1694 int psp_ras_enable_features(struct psp_context *psp,
1695 		union ta_ras_cmd_input *info, bool enable)
1696 {
1697 	enum ras_command cmd_id;
1698 	int ret;
1699 
1700 	if (!psp->ras_context.context.initialized || !info)
1701 		return -EINVAL;
1702 
1703 	cmd_id = enable ?
1704 		TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1705 	ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
1706 	if (ret)
1707 		return -EINVAL;
1708 
1709 	return 0;
1710 }
1711 
1712 int psp_ras_terminate(struct psp_context *psp)
1713 {
1714 	int ret;
1715 
1716 	/*
1717 	 * TODO: bypass the terminate in sriov for now
1718 	 */
1719 	if (amdgpu_sriov_vf(psp->adev))
1720 		return 0;
1721 
1722 	if (!psp->ras_context.context.initialized)
1723 		return 0;
1724 
1725 	ret = psp_ta_unload(psp, &psp->ras_context.context);
1726 
1727 	psp->ras_context.context.initialized = false;
1728 
1729 	mutex_destroy(&psp->ras_context.mutex);
1730 
1731 	return ret;
1732 }
1733 
1734 int psp_ras_initialize(struct psp_context *psp)
1735 {
1736 	int ret;
1737 	uint32_t boot_cfg = 0xFF;
1738 	struct amdgpu_device *adev = psp->adev;
1739 	struct ta_ras_shared_memory *ras_cmd;
1740 
1741 	/*
1742 	 * TODO: bypass the initialize in sriov for now
1743 	 */
1744 	if (amdgpu_sriov_vf(adev))
1745 		return 0;
1746 
1747 	if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1748 	    !adev->psp.ras_context.context.bin_desc.start_addr) {
1749 		dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1750 		return 0;
1751 	}
1752 
1753 	if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1754 		/* query GECC enablement status from boot config
1755 		 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1756 		 */
1757 		ret = psp_boot_config_get(adev, &boot_cfg);
1758 		if (ret)
1759 			dev_warn(adev->dev, "PSP get boot config failed\n");
1760 
1761 		if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
1762 			if (!boot_cfg) {
1763 				dev_info(adev->dev, "GECC is disabled\n");
1764 			} else {
1765 				/* disable GECC in next boot cycle if ras is
1766 				 * disabled by module parameter amdgpu_ras_enable
1767 				 * and/or amdgpu_ras_mask, or boot_config_get call
1768 				 * is failed
1769 				 */
1770 				ret = psp_boot_config_set(adev, 0);
1771 				if (ret)
1772 					dev_warn(adev->dev, "PSP set boot config failed\n");
1773 				else
1774 					dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1775 			}
1776 		} else {
1777 			if (boot_cfg == 1) {
1778 				dev_info(adev->dev, "GECC is enabled\n");
1779 			} else {
1780 				/* enable GECC in next boot cycle if it is disabled
1781 				 * in boot config, or force enable GECC if failed to
1782 				 * get boot configuration
1783 				 */
1784 				ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1785 				if (ret)
1786 					dev_warn(adev->dev, "PSP set boot config failed\n");
1787 				else
1788 					dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1789 			}
1790 		}
1791 	}
1792 
1793 	psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1794 	psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1795 
1796 	if (!psp->ras_context.context.mem_context.shared_buf) {
1797 		ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1798 		if (ret)
1799 			return ret;
1800 	}
1801 
1802 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1803 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1804 
1805 	if (amdgpu_ras_is_poison_mode_supported(adev))
1806 		ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1807 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1808 		ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1809 	ras_cmd->ras_in_message.init_flags.xcc_mask =
1810 		adev->gfx.xcc_mask;
1811 	ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1812 
1813 	ret = psp_ta_load(psp, &psp->ras_context.context);
1814 
1815 	if (!ret && !ras_cmd->ras_status) {
1816 		psp->ras_context.context.initialized = true;
1817 		mutex_init(&psp->ras_context.mutex);
1818 	} else {
1819 		if (ras_cmd->ras_status)
1820 			dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1821 
1822 		/* fail to load RAS TA */
1823 		psp->ras_context.context.initialized = false;
1824 	}
1825 
1826 	return ret;
1827 }
1828 
1829 int psp_ras_trigger_error(struct psp_context *psp,
1830 			  struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1831 {
1832 	struct amdgpu_device *adev = psp->adev;
1833 	int ret;
1834 	uint32_t dev_mask;
1835 	uint32_t ras_status = 0;
1836 
1837 	if (!psp->ras_context.context.initialized || !info)
1838 		return -EINVAL;
1839 
1840 	switch (info->block_id) {
1841 	case TA_RAS_BLOCK__GFX:
1842 		dev_mask = GET_MASK(GC, instance_mask);
1843 		break;
1844 	case TA_RAS_BLOCK__SDMA:
1845 		dev_mask = GET_MASK(SDMA0, instance_mask);
1846 		break;
1847 	case TA_RAS_BLOCK__VCN:
1848 	case TA_RAS_BLOCK__JPEG:
1849 		dev_mask = GET_MASK(VCN, instance_mask);
1850 		break;
1851 	default:
1852 		dev_mask = instance_mask;
1853 		break;
1854 	}
1855 
1856 	/* reuse sub_block_index for backward compatibility */
1857 	dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1858 	dev_mask &= AMDGPU_RAS_INST_MASK;
1859 	info->sub_block_index |= dev_mask;
1860 
1861 	ret = psp_ras_send_cmd(psp,
1862 			TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
1863 	if (ret)
1864 		return -EINVAL;
1865 
1866 	/* If err_event_athub occurs error inject was successful, however
1867 	 *  return status from TA is no long reliable
1868 	 */
1869 	if (amdgpu_ras_intr_triggered())
1870 		return 0;
1871 
1872 	if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1873 		return -EACCES;
1874 	else if (ras_status)
1875 		return -EINVAL;
1876 
1877 	return 0;
1878 }
1879 
1880 int psp_ras_query_address(struct psp_context *psp,
1881 			  struct ta_ras_query_address_input *addr_in,
1882 			  struct ta_ras_query_address_output *addr_out)
1883 {
1884 	int ret;
1885 
1886 	if (!psp->ras_context.context.initialized ||
1887 		!addr_in || !addr_out)
1888 		return -EINVAL;
1889 
1890 	ret = psp_ras_send_cmd(psp,
1891 			TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
1892 
1893 	return ret;
1894 }
1895 // ras end
1896 
1897 // HDCP start
1898 static int psp_hdcp_initialize(struct psp_context *psp)
1899 {
1900 	int ret;
1901 
1902 	/*
1903 	 * TODO: bypass the initialize in sriov for now
1904 	 */
1905 	if (amdgpu_sriov_vf(psp->adev))
1906 		return 0;
1907 
1908 	/* bypass hdcp initialization if dmu is harvested */
1909 	if (!amdgpu_device_has_display_hardware(psp->adev))
1910 		return 0;
1911 
1912 	if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1913 	    !psp->hdcp_context.context.bin_desc.start_addr) {
1914 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1915 		return 0;
1916 	}
1917 
1918 	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1919 	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1920 
1921 	if (!psp->hdcp_context.context.mem_context.shared_buf) {
1922 		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1923 		if (ret)
1924 			return ret;
1925 	}
1926 
1927 	ret = psp_ta_load(psp, &psp->hdcp_context.context);
1928 	if (!ret) {
1929 		psp->hdcp_context.context.initialized = true;
1930 		mutex_init(&psp->hdcp_context.mutex);
1931 	}
1932 
1933 	return ret;
1934 }
1935 
1936 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1937 {
1938 	/*
1939 	 * TODO: bypass the loading in sriov for now
1940 	 */
1941 	if (amdgpu_sriov_vf(psp->adev))
1942 		return 0;
1943 
1944 	if (!psp->hdcp_context.context.initialized)
1945 		return 0;
1946 
1947 	return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
1948 }
1949 
1950 static int psp_hdcp_terminate(struct psp_context *psp)
1951 {
1952 	int ret;
1953 
1954 	/*
1955 	 * TODO: bypass the terminate in sriov for now
1956 	 */
1957 	if (amdgpu_sriov_vf(psp->adev))
1958 		return 0;
1959 
1960 	if (!psp->hdcp_context.context.initialized)
1961 		return 0;
1962 
1963 	ret = psp_ta_unload(psp, &psp->hdcp_context.context);
1964 
1965 	psp->hdcp_context.context.initialized = false;
1966 
1967 	return ret;
1968 }
1969 // HDCP end
1970 
1971 // DTM start
1972 static int psp_dtm_initialize(struct psp_context *psp)
1973 {
1974 	int ret;
1975 
1976 	/*
1977 	 * TODO: bypass the initialize in sriov for now
1978 	 */
1979 	if (amdgpu_sriov_vf(psp->adev))
1980 		return 0;
1981 
1982 	/* bypass dtm initialization if dmu is harvested */
1983 	if (!amdgpu_device_has_display_hardware(psp->adev))
1984 		return 0;
1985 
1986 	if (!psp->dtm_context.context.bin_desc.size_bytes ||
1987 	    !psp->dtm_context.context.bin_desc.start_addr) {
1988 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1989 		return 0;
1990 	}
1991 
1992 	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
1993 	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1994 
1995 	if (!psp->dtm_context.context.mem_context.shared_buf) {
1996 		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
1997 		if (ret)
1998 			return ret;
1999 	}
2000 
2001 	ret = psp_ta_load(psp, &psp->dtm_context.context);
2002 	if (!ret) {
2003 		psp->dtm_context.context.initialized = true;
2004 		mutex_init(&psp->dtm_context.mutex);
2005 	}
2006 
2007 	return ret;
2008 }
2009 
2010 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2011 {
2012 	/*
2013 	 * TODO: bypass the loading in sriov for now
2014 	 */
2015 	if (amdgpu_sriov_vf(psp->adev))
2016 		return 0;
2017 
2018 	if (!psp->dtm_context.context.initialized)
2019 		return 0;
2020 
2021 	return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
2022 }
2023 
2024 static int psp_dtm_terminate(struct psp_context *psp)
2025 {
2026 	int ret;
2027 
2028 	/*
2029 	 * TODO: bypass the terminate in sriov for now
2030 	 */
2031 	if (amdgpu_sriov_vf(psp->adev))
2032 		return 0;
2033 
2034 	if (!psp->dtm_context.context.initialized)
2035 		return 0;
2036 
2037 	ret = psp_ta_unload(psp, &psp->dtm_context.context);
2038 
2039 	psp->dtm_context.context.initialized = false;
2040 
2041 	return ret;
2042 }
2043 // DTM end
2044 
2045 // RAP start
2046 static int psp_rap_initialize(struct psp_context *psp)
2047 {
2048 	int ret;
2049 	enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2050 
2051 	/*
2052 	 * TODO: bypass the initialize in sriov for now
2053 	 */
2054 	if (amdgpu_sriov_vf(psp->adev))
2055 		return 0;
2056 
2057 	if (!psp->rap_context.context.bin_desc.size_bytes ||
2058 	    !psp->rap_context.context.bin_desc.start_addr) {
2059 		dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2060 		return 0;
2061 	}
2062 
2063 	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2064 	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2065 
2066 	if (!psp->rap_context.context.mem_context.shared_buf) {
2067 		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2068 		if (ret)
2069 			return ret;
2070 	}
2071 
2072 	ret = psp_ta_load(psp, &psp->rap_context.context);
2073 	if (!ret) {
2074 		psp->rap_context.context.initialized = true;
2075 		mutex_init(&psp->rap_context.mutex);
2076 	} else
2077 		return ret;
2078 
2079 	ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2080 	if (ret || status != TA_RAP_STATUS__SUCCESS) {
2081 		psp_rap_terminate(psp);
2082 		/* free rap shared memory */
2083 		psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2084 
2085 		dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2086 			 ret, status);
2087 
2088 		return ret;
2089 	}
2090 
2091 	return 0;
2092 }
2093 
2094 static int psp_rap_terminate(struct psp_context *psp)
2095 {
2096 	int ret;
2097 
2098 	if (!psp->rap_context.context.initialized)
2099 		return 0;
2100 
2101 	ret = psp_ta_unload(psp, &psp->rap_context.context);
2102 
2103 	psp->rap_context.context.initialized = false;
2104 
2105 	return ret;
2106 }
2107 
2108 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2109 {
2110 	struct ta_rap_shared_memory *rap_cmd;
2111 	int ret = 0;
2112 
2113 	if (!psp->rap_context.context.initialized)
2114 		return 0;
2115 
2116 	if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2117 	    ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2118 		return -EINVAL;
2119 
2120 	mutex_lock(&psp->rap_context.mutex);
2121 
2122 	rap_cmd = (struct ta_rap_shared_memory *)
2123 		  psp->rap_context.context.mem_context.shared_buf;
2124 	memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2125 
2126 	rap_cmd->cmd_id = ta_cmd_id;
2127 	rap_cmd->validation_method_id = METHOD_A;
2128 
2129 	ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2130 	if (ret)
2131 		goto out_unlock;
2132 
2133 	if (status)
2134 		*status = rap_cmd->rap_status;
2135 
2136 out_unlock:
2137 	mutex_unlock(&psp->rap_context.mutex);
2138 
2139 	return ret;
2140 }
2141 // RAP end
2142 
2143 /* securedisplay start */
2144 static int psp_securedisplay_initialize(struct psp_context *psp)
2145 {
2146 	int ret;
2147 	struct ta_securedisplay_cmd *securedisplay_cmd;
2148 
2149 	/*
2150 	 * TODO: bypass the initialize in sriov for now
2151 	 */
2152 	if (amdgpu_sriov_vf(psp->adev))
2153 		return 0;
2154 
2155 	/* bypass securedisplay initialization if dmu is harvested */
2156 	if (!amdgpu_device_has_display_hardware(psp->adev))
2157 		return 0;
2158 
2159 	if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2160 	    !psp->securedisplay_context.context.bin_desc.start_addr) {
2161 		dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
2162 		return 0;
2163 	}
2164 
2165 	psp->securedisplay_context.context.mem_context.shared_mem_size =
2166 		PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2167 	psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2168 
2169 	if (!psp->securedisplay_context.context.initialized) {
2170 		ret = psp_ta_init_shared_buf(psp,
2171 					     &psp->securedisplay_context.context.mem_context);
2172 		if (ret)
2173 			return ret;
2174 	}
2175 
2176 	ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2177 	if (!ret) {
2178 		psp->securedisplay_context.context.initialized = true;
2179 		mutex_init(&psp->securedisplay_context.mutex);
2180 	} else
2181 		return ret;
2182 
2183 	mutex_lock(&psp->securedisplay_context.mutex);
2184 
2185 	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2186 			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2187 
2188 	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2189 
2190 	mutex_unlock(&psp->securedisplay_context.mutex);
2191 
2192 	if (ret) {
2193 		psp_securedisplay_terminate(psp);
2194 		/* free securedisplay shared memory */
2195 		psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2196 		dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2197 		return -EINVAL;
2198 	}
2199 
2200 	if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2201 		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2202 		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2203 			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2204 		/* don't try again */
2205 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2206 	}
2207 
2208 	return 0;
2209 }
2210 
2211 static int psp_securedisplay_terminate(struct psp_context *psp)
2212 {
2213 	int ret;
2214 
2215 	/*
2216 	 * TODO:bypass the terminate in sriov for now
2217 	 */
2218 	if (amdgpu_sriov_vf(psp->adev))
2219 		return 0;
2220 
2221 	if (!psp->securedisplay_context.context.initialized)
2222 		return 0;
2223 
2224 	ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2225 
2226 	psp->securedisplay_context.context.initialized = false;
2227 
2228 	return ret;
2229 }
2230 
2231 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2232 {
2233 	int ret;
2234 
2235 	if (!psp->securedisplay_context.context.initialized)
2236 		return -EINVAL;
2237 
2238 	if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2239 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
2240 		return -EINVAL;
2241 
2242 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2243 
2244 	return ret;
2245 }
2246 /* SECUREDISPLAY end */
2247 
2248 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2249 {
2250 	struct psp_context *psp = &adev->psp;
2251 	int ret = 0;
2252 
2253 	if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2254 		ret = psp->funcs->wait_for_bootloader(psp);
2255 
2256 	return ret;
2257 }
2258 
2259 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2260 {
2261 	if (psp->funcs &&
2262 	    psp->funcs->get_ras_capability) {
2263 		return psp->funcs->get_ras_capability(psp);
2264 	} else {
2265 		return false;
2266 	}
2267 }
2268 
2269 static int psp_hw_start(struct psp_context *psp)
2270 {
2271 	struct amdgpu_device *adev = psp->adev;
2272 	int ret;
2273 
2274 	if (!amdgpu_sriov_vf(adev)) {
2275 		if ((is_psp_fw_valid(psp->kdb)) &&
2276 		    (psp->funcs->bootloader_load_kdb != NULL)) {
2277 			ret = psp_bootloader_load_kdb(psp);
2278 			if (ret) {
2279 				dev_err(adev->dev, "PSP load kdb failed!\n");
2280 				return ret;
2281 			}
2282 		}
2283 
2284 		if ((is_psp_fw_valid(psp->spl)) &&
2285 		    (psp->funcs->bootloader_load_spl != NULL)) {
2286 			ret = psp_bootloader_load_spl(psp);
2287 			if (ret) {
2288 				dev_err(adev->dev, "PSP load spl failed!\n");
2289 				return ret;
2290 			}
2291 		}
2292 
2293 		if ((is_psp_fw_valid(psp->sys)) &&
2294 		    (psp->funcs->bootloader_load_sysdrv != NULL)) {
2295 			ret = psp_bootloader_load_sysdrv(psp);
2296 			if (ret) {
2297 				dev_err(adev->dev, "PSP load sys drv failed!\n");
2298 				return ret;
2299 			}
2300 		}
2301 
2302 		if ((is_psp_fw_valid(psp->soc_drv)) &&
2303 		    (psp->funcs->bootloader_load_soc_drv != NULL)) {
2304 			ret = psp_bootloader_load_soc_drv(psp);
2305 			if (ret) {
2306 				dev_err(adev->dev, "PSP load soc drv failed!\n");
2307 				return ret;
2308 			}
2309 		}
2310 
2311 		if ((is_psp_fw_valid(psp->intf_drv)) &&
2312 		    (psp->funcs->bootloader_load_intf_drv != NULL)) {
2313 			ret = psp_bootloader_load_intf_drv(psp);
2314 			if (ret) {
2315 				dev_err(adev->dev, "PSP load intf drv failed!\n");
2316 				return ret;
2317 			}
2318 		}
2319 
2320 		if ((is_psp_fw_valid(psp->dbg_drv)) &&
2321 		    (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2322 			ret = psp_bootloader_load_dbg_drv(psp);
2323 			if (ret) {
2324 				dev_err(adev->dev, "PSP load dbg drv failed!\n");
2325 				return ret;
2326 			}
2327 		}
2328 
2329 		if ((is_psp_fw_valid(psp->ras_drv)) &&
2330 		    (psp->funcs->bootloader_load_ras_drv != NULL)) {
2331 			ret = psp_bootloader_load_ras_drv(psp);
2332 			if (ret) {
2333 				dev_err(adev->dev, "PSP load ras_drv failed!\n");
2334 				return ret;
2335 			}
2336 		}
2337 
2338 		if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2339 		    (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2340 			ret = psp_bootloader_load_ipkeymgr_drv(psp);
2341 			if (ret) {
2342 				dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2343 				return ret;
2344 			}
2345 		}
2346 
2347 		if ((is_psp_fw_valid(psp->sos)) &&
2348 		    (psp->funcs->bootloader_load_sos != NULL)) {
2349 			ret = psp_bootloader_load_sos(psp);
2350 			if (ret) {
2351 				dev_err(adev->dev, "PSP load sos failed!\n");
2352 				return ret;
2353 			}
2354 		}
2355 	}
2356 
2357 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2358 	if (ret) {
2359 		dev_err(adev->dev, "PSP create ring failed!\n");
2360 		return ret;
2361 	}
2362 
2363 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2364 		goto skip_pin_bo;
2365 
2366 	if (!psp->boot_time_tmr || psp->autoload_supported) {
2367 		ret = psp_tmr_init(psp);
2368 		if (ret) {
2369 			dev_err(adev->dev, "PSP tmr init failed!\n");
2370 			return ret;
2371 		}
2372 	}
2373 
2374 skip_pin_bo:
2375 	/*
2376 	 * For ASICs with DF Cstate management centralized
2377 	 * to PMFW, TMR setup should be performed after PMFW
2378 	 * loaded and before other non-psp firmware loaded.
2379 	 */
2380 	if (psp->pmfw_centralized_cstate_management) {
2381 		ret = psp_load_smu_fw(psp);
2382 		if (ret)
2383 			return ret;
2384 	}
2385 
2386 	if (!psp->boot_time_tmr || !psp->autoload_supported) {
2387 		ret = psp_tmr_load(psp);
2388 		if (ret) {
2389 			dev_err(adev->dev, "PSP load tmr failed!\n");
2390 			return ret;
2391 		}
2392 	}
2393 
2394 	return 0;
2395 }
2396 
2397 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2398 			   enum psp_gfx_fw_type *type)
2399 {
2400 	switch (ucode->ucode_id) {
2401 	case AMDGPU_UCODE_ID_CAP:
2402 		*type = GFX_FW_TYPE_CAP;
2403 		break;
2404 	case AMDGPU_UCODE_ID_SDMA0:
2405 		*type = GFX_FW_TYPE_SDMA0;
2406 		break;
2407 	case AMDGPU_UCODE_ID_SDMA1:
2408 		*type = GFX_FW_TYPE_SDMA1;
2409 		break;
2410 	case AMDGPU_UCODE_ID_SDMA2:
2411 		*type = GFX_FW_TYPE_SDMA2;
2412 		break;
2413 	case AMDGPU_UCODE_ID_SDMA3:
2414 		*type = GFX_FW_TYPE_SDMA3;
2415 		break;
2416 	case AMDGPU_UCODE_ID_SDMA4:
2417 		*type = GFX_FW_TYPE_SDMA4;
2418 		break;
2419 	case AMDGPU_UCODE_ID_SDMA5:
2420 		*type = GFX_FW_TYPE_SDMA5;
2421 		break;
2422 	case AMDGPU_UCODE_ID_SDMA6:
2423 		*type = GFX_FW_TYPE_SDMA6;
2424 		break;
2425 	case AMDGPU_UCODE_ID_SDMA7:
2426 		*type = GFX_FW_TYPE_SDMA7;
2427 		break;
2428 	case AMDGPU_UCODE_ID_CP_MES:
2429 		*type = GFX_FW_TYPE_CP_MES;
2430 		break;
2431 	case AMDGPU_UCODE_ID_CP_MES_DATA:
2432 		*type = GFX_FW_TYPE_MES_STACK;
2433 		break;
2434 	case AMDGPU_UCODE_ID_CP_MES1:
2435 		*type = GFX_FW_TYPE_CP_MES_KIQ;
2436 		break;
2437 	case AMDGPU_UCODE_ID_CP_MES1_DATA:
2438 		*type = GFX_FW_TYPE_MES_KIQ_STACK;
2439 		break;
2440 	case AMDGPU_UCODE_ID_CP_CE:
2441 		*type = GFX_FW_TYPE_CP_CE;
2442 		break;
2443 	case AMDGPU_UCODE_ID_CP_PFP:
2444 		*type = GFX_FW_TYPE_CP_PFP;
2445 		break;
2446 	case AMDGPU_UCODE_ID_CP_ME:
2447 		*type = GFX_FW_TYPE_CP_ME;
2448 		break;
2449 	case AMDGPU_UCODE_ID_CP_MEC1:
2450 		*type = GFX_FW_TYPE_CP_MEC;
2451 		break;
2452 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
2453 		*type = GFX_FW_TYPE_CP_MEC_ME1;
2454 		break;
2455 	case AMDGPU_UCODE_ID_CP_MEC2:
2456 		*type = GFX_FW_TYPE_CP_MEC;
2457 		break;
2458 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
2459 		*type = GFX_FW_TYPE_CP_MEC_ME2;
2460 		break;
2461 	case AMDGPU_UCODE_ID_RLC_P:
2462 		*type = GFX_FW_TYPE_RLC_P;
2463 		break;
2464 	case AMDGPU_UCODE_ID_RLC_V:
2465 		*type = GFX_FW_TYPE_RLC_V;
2466 		break;
2467 	case AMDGPU_UCODE_ID_RLC_G:
2468 		*type = GFX_FW_TYPE_RLC_G;
2469 		break;
2470 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2471 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2472 		break;
2473 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2474 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2475 		break;
2476 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2477 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2478 		break;
2479 	case AMDGPU_UCODE_ID_RLC_IRAM:
2480 		*type = GFX_FW_TYPE_RLC_IRAM;
2481 		break;
2482 	case AMDGPU_UCODE_ID_RLC_DRAM:
2483 		*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2484 		break;
2485 	case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2486 		*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2487 		break;
2488 	case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2489 		*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2490 		break;
2491 	case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2492 		*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2493 		break;
2494 	case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2495 		*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2496 		break;
2497 	case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2498 		*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2499 		break;
2500 	case AMDGPU_UCODE_ID_SMC:
2501 		*type = GFX_FW_TYPE_SMU;
2502 		break;
2503 	case AMDGPU_UCODE_ID_PPTABLE:
2504 		*type = GFX_FW_TYPE_PPTABLE;
2505 		break;
2506 	case AMDGPU_UCODE_ID_UVD:
2507 		*type = GFX_FW_TYPE_UVD;
2508 		break;
2509 	case AMDGPU_UCODE_ID_UVD1:
2510 		*type = GFX_FW_TYPE_UVD1;
2511 		break;
2512 	case AMDGPU_UCODE_ID_VCE:
2513 		*type = GFX_FW_TYPE_VCE;
2514 		break;
2515 	case AMDGPU_UCODE_ID_VCN:
2516 		*type = GFX_FW_TYPE_VCN;
2517 		break;
2518 	case AMDGPU_UCODE_ID_VCN1:
2519 		*type = GFX_FW_TYPE_VCN1;
2520 		break;
2521 	case AMDGPU_UCODE_ID_DMCU_ERAM:
2522 		*type = GFX_FW_TYPE_DMCU_ERAM;
2523 		break;
2524 	case AMDGPU_UCODE_ID_DMCU_INTV:
2525 		*type = GFX_FW_TYPE_DMCU_ISR;
2526 		break;
2527 	case AMDGPU_UCODE_ID_VCN0_RAM:
2528 		*type = GFX_FW_TYPE_VCN0_RAM;
2529 		break;
2530 	case AMDGPU_UCODE_ID_VCN1_RAM:
2531 		*type = GFX_FW_TYPE_VCN1_RAM;
2532 		break;
2533 	case AMDGPU_UCODE_ID_DMCUB:
2534 		*type = GFX_FW_TYPE_DMUB;
2535 		break;
2536 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2537 	case AMDGPU_UCODE_ID_SDMA_RS64:
2538 		*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2539 		break;
2540 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2541 		*type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2542 		break;
2543 	case AMDGPU_UCODE_ID_IMU_I:
2544 		*type = GFX_FW_TYPE_IMU_I;
2545 		break;
2546 	case AMDGPU_UCODE_ID_IMU_D:
2547 		*type = GFX_FW_TYPE_IMU_D;
2548 		break;
2549 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
2550 		*type = GFX_FW_TYPE_RS64_PFP;
2551 		break;
2552 	case AMDGPU_UCODE_ID_CP_RS64_ME:
2553 		*type = GFX_FW_TYPE_RS64_ME;
2554 		break;
2555 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
2556 		*type = GFX_FW_TYPE_RS64_MEC;
2557 		break;
2558 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2559 		*type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2560 		break;
2561 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2562 		*type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2563 		break;
2564 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2565 		*type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2566 		break;
2567 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2568 		*type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2569 		break;
2570 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2571 		*type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2572 		break;
2573 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2574 		*type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2575 		break;
2576 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2577 		*type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2578 		break;
2579 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2580 		*type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2581 		break;
2582 	case AMDGPU_UCODE_ID_VPE_CTX:
2583 		*type = GFX_FW_TYPE_VPEC_FW1;
2584 		break;
2585 	case AMDGPU_UCODE_ID_VPE_CTL:
2586 		*type = GFX_FW_TYPE_VPEC_FW2;
2587 		break;
2588 	case AMDGPU_UCODE_ID_VPE:
2589 		*type = GFX_FW_TYPE_VPE;
2590 		break;
2591 	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2592 		*type = GFX_FW_TYPE_UMSCH_UCODE;
2593 		break;
2594 	case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2595 		*type = GFX_FW_TYPE_UMSCH_DATA;
2596 		break;
2597 	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2598 		*type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2599 		break;
2600 	case AMDGPU_UCODE_ID_P2S_TABLE:
2601 		*type = GFX_FW_TYPE_P2S_TABLE;
2602 		break;
2603 	case AMDGPU_UCODE_ID_JPEG_RAM:
2604 		*type = GFX_FW_TYPE_JPEG_RAM;
2605 		break;
2606 	case AMDGPU_UCODE_ID_ISP:
2607 		*type = GFX_FW_TYPE_ISP;
2608 		break;
2609 	case AMDGPU_UCODE_ID_MAXIMUM:
2610 	default:
2611 		return -EINVAL;
2612 	}
2613 
2614 	return 0;
2615 }
2616 
2617 static void psp_print_fw_hdr(struct psp_context *psp,
2618 			     struct amdgpu_firmware_info *ucode)
2619 {
2620 	struct amdgpu_device *adev = psp->adev;
2621 	struct common_firmware_header *hdr;
2622 
2623 	switch (ucode->ucode_id) {
2624 	case AMDGPU_UCODE_ID_SDMA0:
2625 	case AMDGPU_UCODE_ID_SDMA1:
2626 	case AMDGPU_UCODE_ID_SDMA2:
2627 	case AMDGPU_UCODE_ID_SDMA3:
2628 	case AMDGPU_UCODE_ID_SDMA4:
2629 	case AMDGPU_UCODE_ID_SDMA5:
2630 	case AMDGPU_UCODE_ID_SDMA6:
2631 	case AMDGPU_UCODE_ID_SDMA7:
2632 		hdr = (struct common_firmware_header *)
2633 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2634 		amdgpu_ucode_print_sdma_hdr(hdr);
2635 		break;
2636 	case AMDGPU_UCODE_ID_CP_CE:
2637 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2638 		amdgpu_ucode_print_gfx_hdr(hdr);
2639 		break;
2640 	case AMDGPU_UCODE_ID_CP_PFP:
2641 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2642 		amdgpu_ucode_print_gfx_hdr(hdr);
2643 		break;
2644 	case AMDGPU_UCODE_ID_CP_ME:
2645 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2646 		amdgpu_ucode_print_gfx_hdr(hdr);
2647 		break;
2648 	case AMDGPU_UCODE_ID_CP_MEC1:
2649 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2650 		amdgpu_ucode_print_gfx_hdr(hdr);
2651 		break;
2652 	case AMDGPU_UCODE_ID_RLC_G:
2653 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2654 		amdgpu_ucode_print_rlc_hdr(hdr);
2655 		break;
2656 	case AMDGPU_UCODE_ID_SMC:
2657 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
2658 		amdgpu_ucode_print_smc_hdr(hdr);
2659 		break;
2660 	default:
2661 		break;
2662 	}
2663 }
2664 
2665 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2666 				       struct amdgpu_firmware_info *ucode,
2667 				       struct psp_gfx_cmd_resp *cmd)
2668 {
2669 	int ret;
2670 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
2671 
2672 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2673 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2674 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2675 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2676 
2677 	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2678 	if (ret)
2679 		dev_err(psp->adev->dev, "Unknown firmware type\n");
2680 
2681 	return ret;
2682 }
2683 
2684 int psp_execute_ip_fw_load(struct psp_context *psp,
2685 			   struct amdgpu_firmware_info *ucode)
2686 {
2687 	int ret = 0;
2688 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2689 
2690 	ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2691 	if (!ret) {
2692 		ret = psp_cmd_submit_buf(psp, ucode, cmd,
2693 					 psp->fence_buf_mc_addr);
2694 	}
2695 
2696 	release_psp_cmd_buf(psp);
2697 
2698 	return ret;
2699 }
2700 
2701 static int psp_load_p2s_table(struct psp_context *psp)
2702 {
2703 	int ret;
2704 	struct amdgpu_device *adev = psp->adev;
2705 	struct amdgpu_firmware_info *ucode =
2706 		&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2707 
2708 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2709 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2710 		return 0;
2711 
2712 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2713 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2714 		uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2715 								0x0036003C;
2716 		if (psp->sos.fw_version < supp_vers)
2717 			return 0;
2718 	}
2719 
2720 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2721 		return 0;
2722 
2723 	ret = psp_execute_ip_fw_load(psp, ucode);
2724 
2725 	return ret;
2726 }
2727 
2728 static int psp_load_smu_fw(struct psp_context *psp)
2729 {
2730 	int ret;
2731 	struct amdgpu_device *adev = psp->adev;
2732 	struct amdgpu_firmware_info *ucode =
2733 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2734 	struct amdgpu_ras *ras = psp->ras_context.ras;
2735 
2736 	/*
2737 	 * Skip SMU FW reloading in case of using BACO for runpm only,
2738 	 * as SMU is always alive.
2739 	 */
2740 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2741 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2742 		return 0;
2743 
2744 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2745 		return 0;
2746 
2747 	if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2748 	     (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2749 	      amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2750 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2751 		if (ret)
2752 			dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2753 	}
2754 
2755 	ret = psp_execute_ip_fw_load(psp, ucode);
2756 
2757 	if (ret)
2758 		dev_err(adev->dev, "PSP load smu failed!\n");
2759 
2760 	return ret;
2761 }
2762 
2763 static bool fw_load_skip_check(struct psp_context *psp,
2764 			       struct amdgpu_firmware_info *ucode)
2765 {
2766 	if (!ucode->fw || !ucode->ucode_size)
2767 		return true;
2768 
2769 	if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2770 		return true;
2771 
2772 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2773 	    (psp_smu_reload_quirk(psp) ||
2774 	     psp->autoload_supported ||
2775 	     psp->pmfw_centralized_cstate_management))
2776 		return true;
2777 
2778 	if (amdgpu_sriov_vf(psp->adev) &&
2779 	    amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2780 		return true;
2781 
2782 	if (psp->autoload_supported &&
2783 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2784 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2785 		/* skip mec JT when autoload is enabled */
2786 		return true;
2787 
2788 	return false;
2789 }
2790 
2791 int psp_load_fw_list(struct psp_context *psp,
2792 		     struct amdgpu_firmware_info **ucode_list, int ucode_count)
2793 {
2794 	int ret = 0, i;
2795 	struct amdgpu_firmware_info *ucode;
2796 
2797 	for (i = 0; i < ucode_count; ++i) {
2798 		ucode = ucode_list[i];
2799 		psp_print_fw_hdr(psp, ucode);
2800 		ret = psp_execute_ip_fw_load(psp, ucode);
2801 		if (ret)
2802 			return ret;
2803 	}
2804 	return ret;
2805 }
2806 
2807 static int psp_load_non_psp_fw(struct psp_context *psp)
2808 {
2809 	int i, ret;
2810 	struct amdgpu_firmware_info *ucode;
2811 	struct amdgpu_device *adev = psp->adev;
2812 
2813 	if (psp->autoload_supported &&
2814 	    !psp->pmfw_centralized_cstate_management) {
2815 		ret = psp_load_smu_fw(psp);
2816 		if (ret)
2817 			return ret;
2818 	}
2819 
2820 	/* Load P2S table first if it's available */
2821 	psp_load_p2s_table(psp);
2822 
2823 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
2824 		ucode = &adev->firmware.ucode[i];
2825 
2826 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2827 		    !fw_load_skip_check(psp, ucode)) {
2828 			ret = psp_load_smu_fw(psp);
2829 			if (ret)
2830 				return ret;
2831 			continue;
2832 		}
2833 
2834 		if (fw_load_skip_check(psp, ucode))
2835 			continue;
2836 
2837 		if (psp->autoload_supported &&
2838 		    (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2839 			     IP_VERSION(11, 0, 7) ||
2840 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2841 			     IP_VERSION(11, 0, 11) ||
2842 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2843 			     IP_VERSION(11, 0, 12)) &&
2844 		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2845 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2846 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2847 			/* PSP only receive one SDMA fw for sienna_cichlid,
2848 			 * as all four sdma fw are same
2849 			 */
2850 			continue;
2851 
2852 		psp_print_fw_hdr(psp, ucode);
2853 
2854 		ret = psp_execute_ip_fw_load(psp, ucode);
2855 		if (ret)
2856 			return ret;
2857 
2858 		/* Start rlc autoload after psp recieved all the gfx firmware */
2859 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2860 		    adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2861 			ret = psp_rlc_autoload_start(psp);
2862 			if (ret) {
2863 				dev_err(adev->dev, "Failed to start rlc autoload\n");
2864 				return ret;
2865 			}
2866 		}
2867 	}
2868 
2869 	return 0;
2870 }
2871 
2872 static int psp_load_fw(struct amdgpu_device *adev)
2873 {
2874 	int ret;
2875 	struct psp_context *psp = &adev->psp;
2876 
2877 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2878 		/* should not destroy ring, only stop */
2879 		psp_ring_stop(psp, PSP_RING_TYPE__KM);
2880 	} else {
2881 		memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2882 
2883 		ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2884 		if (ret) {
2885 			dev_err(adev->dev, "PSP ring init failed!\n");
2886 			goto failed;
2887 		}
2888 	}
2889 
2890 	ret = psp_hw_start(psp);
2891 	if (ret)
2892 		goto failed;
2893 
2894 	ret = psp_load_non_psp_fw(psp);
2895 	if (ret)
2896 		goto failed1;
2897 
2898 	ret = psp_asd_initialize(psp);
2899 	if (ret) {
2900 		dev_err(adev->dev, "PSP load asd failed!\n");
2901 		goto failed1;
2902 	}
2903 
2904 	ret = psp_rl_load(adev);
2905 	if (ret) {
2906 		dev_err(adev->dev, "PSP load RL failed!\n");
2907 		goto failed1;
2908 	}
2909 
2910 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2911 		if (adev->gmc.xgmi.num_physical_nodes > 1) {
2912 			ret = psp_xgmi_initialize(psp, false, true);
2913 			/* Warning the XGMI seesion initialize failure
2914 			 * Instead of stop driver initialization
2915 			 */
2916 			if (ret)
2917 				dev_err(psp->adev->dev,
2918 					"XGMI: Failed to initialize XGMI session\n");
2919 		}
2920 	}
2921 
2922 	if (psp->ta_fw) {
2923 		ret = psp_ras_initialize(psp);
2924 		if (ret)
2925 			dev_err(psp->adev->dev,
2926 				"RAS: Failed to initialize RAS\n");
2927 
2928 		ret = psp_hdcp_initialize(psp);
2929 		if (ret)
2930 			dev_err(psp->adev->dev,
2931 				"HDCP: Failed to initialize HDCP\n");
2932 
2933 		ret = psp_dtm_initialize(psp);
2934 		if (ret)
2935 			dev_err(psp->adev->dev,
2936 				"DTM: Failed to initialize DTM\n");
2937 
2938 		ret = psp_rap_initialize(psp);
2939 		if (ret)
2940 			dev_err(psp->adev->dev,
2941 				"RAP: Failed to initialize RAP\n");
2942 
2943 		ret = psp_securedisplay_initialize(psp);
2944 		if (ret)
2945 			dev_err(psp->adev->dev,
2946 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2947 	}
2948 
2949 	return 0;
2950 
2951 failed1:
2952 	psp_free_shared_bufs(psp);
2953 failed:
2954 	/*
2955 	 * all cleanup jobs (xgmi terminate, ras terminate,
2956 	 * ring destroy, cmd/fence/fw buffers destory,
2957 	 * psp->cmd destory) are delayed to psp_hw_fini
2958 	 */
2959 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2960 	return ret;
2961 }
2962 
2963 static int psp_hw_init(void *handle)
2964 {
2965 	int ret;
2966 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2967 
2968 	mutex_lock(&adev->firmware.mutex);
2969 	/*
2970 	 * This sequence is just used on hw_init only once, no need on
2971 	 * resume.
2972 	 */
2973 	ret = amdgpu_ucode_init_bo(adev);
2974 	if (ret)
2975 		goto failed;
2976 
2977 	ret = psp_load_fw(adev);
2978 	if (ret) {
2979 		dev_err(adev->dev, "PSP firmware loading failed\n");
2980 		goto failed;
2981 	}
2982 
2983 	mutex_unlock(&adev->firmware.mutex);
2984 	return 0;
2985 
2986 failed:
2987 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
2988 	mutex_unlock(&adev->firmware.mutex);
2989 	return -EINVAL;
2990 }
2991 
2992 static int psp_hw_fini(void *handle)
2993 {
2994 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2995 	struct psp_context *psp = &adev->psp;
2996 
2997 	if (psp->ta_fw) {
2998 		psp_ras_terminate(psp);
2999 		psp_securedisplay_terminate(psp);
3000 		psp_rap_terminate(psp);
3001 		psp_dtm_terminate(psp);
3002 		psp_hdcp_terminate(psp);
3003 
3004 		if (adev->gmc.xgmi.num_physical_nodes > 1)
3005 			psp_xgmi_terminate(psp);
3006 	}
3007 
3008 	psp_asd_terminate(psp);
3009 	psp_tmr_terminate(psp);
3010 
3011 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3012 
3013 	return 0;
3014 }
3015 
3016 static int psp_suspend(void *handle)
3017 {
3018 	int ret = 0;
3019 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3020 	struct psp_context *psp = &adev->psp;
3021 
3022 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3023 	    psp->xgmi_context.context.initialized) {
3024 		ret = psp_xgmi_terminate(psp);
3025 		if (ret) {
3026 			dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3027 			goto out;
3028 		}
3029 	}
3030 
3031 	if (psp->ta_fw) {
3032 		ret = psp_ras_terminate(psp);
3033 		if (ret) {
3034 			dev_err(adev->dev, "Failed to terminate ras ta\n");
3035 			goto out;
3036 		}
3037 		ret = psp_hdcp_terminate(psp);
3038 		if (ret) {
3039 			dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3040 			goto out;
3041 		}
3042 		ret = psp_dtm_terminate(psp);
3043 		if (ret) {
3044 			dev_err(adev->dev, "Failed to terminate dtm ta\n");
3045 			goto out;
3046 		}
3047 		ret = psp_rap_terminate(psp);
3048 		if (ret) {
3049 			dev_err(adev->dev, "Failed to terminate rap ta\n");
3050 			goto out;
3051 		}
3052 		ret = psp_securedisplay_terminate(psp);
3053 		if (ret) {
3054 			dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3055 			goto out;
3056 		}
3057 	}
3058 
3059 	ret = psp_asd_terminate(psp);
3060 	if (ret) {
3061 		dev_err(adev->dev, "Failed to terminate asd\n");
3062 		goto out;
3063 	}
3064 
3065 	ret = psp_tmr_terminate(psp);
3066 	if (ret) {
3067 		dev_err(adev->dev, "Failed to terminate tmr\n");
3068 		goto out;
3069 	}
3070 
3071 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3072 	if (ret)
3073 		dev_err(adev->dev, "PSP ring stop failed\n");
3074 
3075 out:
3076 	return ret;
3077 }
3078 
3079 static int psp_resume(void *handle)
3080 {
3081 	int ret;
3082 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3083 	struct psp_context *psp = &adev->psp;
3084 
3085 	dev_info(adev->dev, "PSP is resuming...\n");
3086 
3087 	if (psp->mem_train_ctx.enable_mem_training) {
3088 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3089 		if (ret) {
3090 			dev_err(adev->dev, "Failed to process memory training!\n");
3091 			return ret;
3092 		}
3093 	}
3094 
3095 	mutex_lock(&adev->firmware.mutex);
3096 
3097 	ret = psp_hw_start(psp);
3098 	if (ret)
3099 		goto failed;
3100 
3101 	ret = psp_load_non_psp_fw(psp);
3102 	if (ret)
3103 		goto failed;
3104 
3105 	ret = psp_asd_initialize(psp);
3106 	if (ret) {
3107 		dev_err(adev->dev, "PSP load asd failed!\n");
3108 		goto failed;
3109 	}
3110 
3111 	ret = psp_rl_load(adev);
3112 	if (ret) {
3113 		dev_err(adev->dev, "PSP load RL failed!\n");
3114 		goto failed;
3115 	}
3116 
3117 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3118 		ret = psp_xgmi_initialize(psp, false, true);
3119 		/* Warning the XGMI seesion initialize failure
3120 		 * Instead of stop driver initialization
3121 		 */
3122 		if (ret)
3123 			dev_err(psp->adev->dev,
3124 				"XGMI: Failed to initialize XGMI session\n");
3125 	}
3126 
3127 	if (psp->ta_fw) {
3128 		ret = psp_ras_initialize(psp);
3129 		if (ret)
3130 			dev_err(psp->adev->dev,
3131 				"RAS: Failed to initialize RAS\n");
3132 
3133 		ret = psp_hdcp_initialize(psp);
3134 		if (ret)
3135 			dev_err(psp->adev->dev,
3136 				"HDCP: Failed to initialize HDCP\n");
3137 
3138 		ret = psp_dtm_initialize(psp);
3139 		if (ret)
3140 			dev_err(psp->adev->dev,
3141 				"DTM: Failed to initialize DTM\n");
3142 
3143 		ret = psp_rap_initialize(psp);
3144 		if (ret)
3145 			dev_err(psp->adev->dev,
3146 				"RAP: Failed to initialize RAP\n");
3147 
3148 		ret = psp_securedisplay_initialize(psp);
3149 		if (ret)
3150 			dev_err(psp->adev->dev,
3151 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3152 	}
3153 
3154 	mutex_unlock(&adev->firmware.mutex);
3155 
3156 	return 0;
3157 
3158 failed:
3159 	dev_err(adev->dev, "PSP resume failed\n");
3160 	mutex_unlock(&adev->firmware.mutex);
3161 	return ret;
3162 }
3163 
3164 int psp_gpu_reset(struct amdgpu_device *adev)
3165 {
3166 	int ret;
3167 
3168 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3169 		return 0;
3170 
3171 	mutex_lock(&adev->psp.mutex);
3172 	ret = psp_mode1_reset(&adev->psp);
3173 	mutex_unlock(&adev->psp.mutex);
3174 
3175 	return ret;
3176 }
3177 
3178 int psp_rlc_autoload_start(struct psp_context *psp)
3179 {
3180 	int ret;
3181 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3182 
3183 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3184 
3185 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
3186 				 psp->fence_buf_mc_addr);
3187 
3188 	release_psp_cmd_buf(psp);
3189 
3190 	return ret;
3191 }
3192 
3193 int psp_ring_cmd_submit(struct psp_context *psp,
3194 			uint64_t cmd_buf_mc_addr,
3195 			uint64_t fence_mc_addr,
3196 			int index)
3197 {
3198 	unsigned int psp_write_ptr_reg = 0;
3199 	struct psp_gfx_rb_frame *write_frame;
3200 	struct psp_ring *ring = &psp->km_ring;
3201 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3202 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3203 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3204 	struct amdgpu_device *adev = psp->adev;
3205 	uint32_t ring_size_dw = ring->ring_size / 4;
3206 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3207 
3208 	/* KM (GPCOM) prepare write pointer */
3209 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
3210 
3211 	/* Update KM RB frame pointer to new frame */
3212 	/* write_frame ptr increments by size of rb_frame in bytes */
3213 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3214 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
3215 		write_frame = ring_buffer_start;
3216 	else
3217 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3218 	/* Check invalid write_frame ptr address */
3219 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3220 		dev_err(adev->dev,
3221 			"ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3222 			ring_buffer_start, ring_buffer_end, write_frame);
3223 		dev_err(adev->dev,
3224 			"write_frame is pointing to address out of bounds\n");
3225 		return -EINVAL;
3226 	}
3227 
3228 	/* Initialize KM RB frame */
3229 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3230 
3231 	/* Update KM RB frame */
3232 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3233 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3234 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3235 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3236 	write_frame->fence_value = index;
3237 	amdgpu_device_flush_hdp(adev, NULL);
3238 
3239 	/* Update the write Pointer in DWORDs */
3240 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3241 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
3242 	return 0;
3243 }
3244 
3245 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3246 {
3247 	struct amdgpu_device *adev = psp->adev;
3248 	const struct psp_firmware_header_v1_0 *asd_hdr;
3249 	int err = 0;
3250 
3251 	err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, "amdgpu/%s_asd.bin", chip_name);
3252 	if (err)
3253 		goto out;
3254 
3255 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3256 	adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3257 	adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3258 	adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3259 	adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3260 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3261 	return 0;
3262 out:
3263 	amdgpu_ucode_release(&adev->psp.asd_fw);
3264 	return err;
3265 }
3266 
3267 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3268 {
3269 	struct amdgpu_device *adev = psp->adev;
3270 	const struct psp_firmware_header_v1_0 *toc_hdr;
3271 	int err = 0;
3272 
3273 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, "amdgpu/%s_toc.bin", chip_name);
3274 	if (err)
3275 		goto out;
3276 
3277 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3278 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3279 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3280 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3281 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3282 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3283 	return 0;
3284 out:
3285 	amdgpu_ucode_release(&adev->psp.toc_fw);
3286 	return err;
3287 }
3288 
3289 static int parse_sos_bin_descriptor(struct psp_context *psp,
3290 				   const struct psp_fw_bin_desc *desc,
3291 				   const struct psp_firmware_header_v2_0 *sos_hdr)
3292 {
3293 	uint8_t *ucode_start_addr  = NULL;
3294 
3295 	if (!psp || !desc || !sos_hdr)
3296 		return -EINVAL;
3297 
3298 	ucode_start_addr  = (uint8_t *)sos_hdr +
3299 			    le32_to_cpu(desc->offset_bytes) +
3300 			    le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3301 
3302 	switch (desc->fw_type) {
3303 	case PSP_FW_TYPE_PSP_SOS:
3304 		psp->sos.fw_version        = le32_to_cpu(desc->fw_version);
3305 		psp->sos.feature_version   = le32_to_cpu(desc->fw_version);
3306 		psp->sos.size_bytes        = le32_to_cpu(desc->size_bytes);
3307 		psp->sos.start_addr	   = ucode_start_addr;
3308 		break;
3309 	case PSP_FW_TYPE_PSP_SYS_DRV:
3310 		psp->sys.fw_version        = le32_to_cpu(desc->fw_version);
3311 		psp->sys.feature_version   = le32_to_cpu(desc->fw_version);
3312 		psp->sys.size_bytes        = le32_to_cpu(desc->size_bytes);
3313 		psp->sys.start_addr        = ucode_start_addr;
3314 		break;
3315 	case PSP_FW_TYPE_PSP_KDB:
3316 		psp->kdb.fw_version        = le32_to_cpu(desc->fw_version);
3317 		psp->kdb.feature_version   = le32_to_cpu(desc->fw_version);
3318 		psp->kdb.size_bytes        = le32_to_cpu(desc->size_bytes);
3319 		psp->kdb.start_addr        = ucode_start_addr;
3320 		break;
3321 	case PSP_FW_TYPE_PSP_TOC:
3322 		psp->toc.fw_version        = le32_to_cpu(desc->fw_version);
3323 		psp->toc.feature_version   = le32_to_cpu(desc->fw_version);
3324 		psp->toc.size_bytes        = le32_to_cpu(desc->size_bytes);
3325 		psp->toc.start_addr        = ucode_start_addr;
3326 		break;
3327 	case PSP_FW_TYPE_PSP_SPL:
3328 		psp->spl.fw_version        = le32_to_cpu(desc->fw_version);
3329 		psp->spl.feature_version   = le32_to_cpu(desc->fw_version);
3330 		psp->spl.size_bytes        = le32_to_cpu(desc->size_bytes);
3331 		psp->spl.start_addr        = ucode_start_addr;
3332 		break;
3333 	case PSP_FW_TYPE_PSP_RL:
3334 		psp->rl.fw_version         = le32_to_cpu(desc->fw_version);
3335 		psp->rl.feature_version    = le32_to_cpu(desc->fw_version);
3336 		psp->rl.size_bytes         = le32_to_cpu(desc->size_bytes);
3337 		psp->rl.start_addr         = ucode_start_addr;
3338 		break;
3339 	case PSP_FW_TYPE_PSP_SOC_DRV:
3340 		psp->soc_drv.fw_version         = le32_to_cpu(desc->fw_version);
3341 		psp->soc_drv.feature_version    = le32_to_cpu(desc->fw_version);
3342 		psp->soc_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3343 		psp->soc_drv.start_addr         = ucode_start_addr;
3344 		break;
3345 	case PSP_FW_TYPE_PSP_INTF_DRV:
3346 		psp->intf_drv.fw_version        = le32_to_cpu(desc->fw_version);
3347 		psp->intf_drv.feature_version   = le32_to_cpu(desc->fw_version);
3348 		psp->intf_drv.size_bytes        = le32_to_cpu(desc->size_bytes);
3349 		psp->intf_drv.start_addr        = ucode_start_addr;
3350 		break;
3351 	case PSP_FW_TYPE_PSP_DBG_DRV:
3352 		psp->dbg_drv.fw_version         = le32_to_cpu(desc->fw_version);
3353 		psp->dbg_drv.feature_version    = le32_to_cpu(desc->fw_version);
3354 		psp->dbg_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3355 		psp->dbg_drv.start_addr         = ucode_start_addr;
3356 		break;
3357 	case PSP_FW_TYPE_PSP_RAS_DRV:
3358 		psp->ras_drv.fw_version         = le32_to_cpu(desc->fw_version);
3359 		psp->ras_drv.feature_version    = le32_to_cpu(desc->fw_version);
3360 		psp->ras_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3361 		psp->ras_drv.start_addr         = ucode_start_addr;
3362 		break;
3363 	case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3364 		psp->ipkeymgr_drv.fw_version         = le32_to_cpu(desc->fw_version);
3365 		psp->ipkeymgr_drv.feature_version    = le32_to_cpu(desc->fw_version);
3366 		psp->ipkeymgr_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3367 		psp->ipkeymgr_drv.start_addr         = ucode_start_addr;
3368 		break;
3369 	default:
3370 		dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3371 		break;
3372 	}
3373 
3374 	return 0;
3375 }
3376 
3377 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3378 {
3379 	const struct psp_firmware_header_v1_0 *sos_hdr;
3380 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3381 	uint8_t *ucode_array_start_addr;
3382 
3383 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3384 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3385 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3386 
3387 	if (adev->gmc.xgmi.connected_to_cpu ||
3388 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3389 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3390 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3391 
3392 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3393 		adev->psp.sys.start_addr = ucode_array_start_addr;
3394 
3395 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3396 		adev->psp.sos.start_addr = ucode_array_start_addr +
3397 				le32_to_cpu(sos_hdr->sos.offset_bytes);
3398 	} else {
3399 		/* Load alternate PSP SOS FW */
3400 		sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3401 
3402 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3403 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3404 
3405 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3406 		adev->psp.sys.start_addr = ucode_array_start_addr +
3407 			le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3408 
3409 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3410 		adev->psp.sos.start_addr = ucode_array_start_addr +
3411 			le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3412 	}
3413 
3414 	if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3415 		dev_warn(adev->dev, "PSP SOS FW not available");
3416 		return -EINVAL;
3417 	}
3418 
3419 	return 0;
3420 }
3421 
3422 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3423 {
3424 	struct amdgpu_device *adev = psp->adev;
3425 	const struct psp_firmware_header_v1_0 *sos_hdr;
3426 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3427 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3428 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3429 	const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3430 	int err = 0;
3431 	uint8_t *ucode_array_start_addr;
3432 	int fw_index = 0;
3433 
3434 	err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
3435 	if (err)
3436 		goto out;
3437 
3438 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3439 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3440 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3441 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3442 
3443 	switch (sos_hdr->header.header_version_major) {
3444 	case 1:
3445 		err = psp_init_sos_base_fw(adev);
3446 		if (err)
3447 			goto out;
3448 
3449 		if (sos_hdr->header.header_version_minor == 1) {
3450 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3451 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3452 			adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3453 					le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3454 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3455 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3456 					le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3457 		}
3458 		if (sos_hdr->header.header_version_minor == 2) {
3459 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3460 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3461 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3462 						    le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3463 		}
3464 		if (sos_hdr->header.header_version_minor == 3) {
3465 			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3466 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3467 			adev->psp.toc.start_addr = ucode_array_start_addr +
3468 				le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3469 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3470 			adev->psp.kdb.start_addr = ucode_array_start_addr +
3471 				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3472 			adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3473 			adev->psp.spl.start_addr = ucode_array_start_addr +
3474 				le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3475 			adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3476 			adev->psp.rl.start_addr = ucode_array_start_addr +
3477 				le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3478 		}
3479 		break;
3480 	case 2:
3481 		sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3482 
3483 		if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3484 			dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3485 			err = -EINVAL;
3486 			goto out;
3487 		}
3488 
3489 		for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) {
3490 			err = parse_sos_bin_descriptor(psp,
3491 						       &sos_hdr_v2_0->psp_fw_bin[fw_index],
3492 						       sos_hdr_v2_0);
3493 			if (err)
3494 				goto out;
3495 		}
3496 		break;
3497 	default:
3498 		dev_err(adev->dev,
3499 			"unsupported psp sos firmware\n");
3500 		err = -EINVAL;
3501 		goto out;
3502 	}
3503 
3504 	return 0;
3505 out:
3506 	amdgpu_ucode_release(&adev->psp.sos_fw);
3507 
3508 	return err;
3509 }
3510 
3511 static int parse_ta_bin_descriptor(struct psp_context *psp,
3512 				   const struct psp_fw_bin_desc *desc,
3513 				   const struct ta_firmware_header_v2_0 *ta_hdr)
3514 {
3515 	uint8_t *ucode_start_addr  = NULL;
3516 
3517 	if (!psp || !desc || !ta_hdr)
3518 		return -EINVAL;
3519 
3520 	ucode_start_addr  = (uint8_t *)ta_hdr +
3521 			    le32_to_cpu(desc->offset_bytes) +
3522 			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3523 
3524 	switch (desc->fw_type) {
3525 	case TA_FW_TYPE_PSP_ASD:
3526 		psp->asd_context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3527 		psp->asd_context.bin_desc.feature_version   = le32_to_cpu(desc->fw_version);
3528 		psp->asd_context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3529 		psp->asd_context.bin_desc.start_addr        = ucode_start_addr;
3530 		break;
3531 	case TA_FW_TYPE_PSP_XGMI:
3532 		psp->xgmi_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3533 		psp->xgmi_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3534 		psp->xgmi_context.context.bin_desc.start_addr       = ucode_start_addr;
3535 		break;
3536 	case TA_FW_TYPE_PSP_RAS:
3537 		psp->ras_context.context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3538 		psp->ras_context.context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3539 		psp->ras_context.context.bin_desc.start_addr        = ucode_start_addr;
3540 		break;
3541 	case TA_FW_TYPE_PSP_HDCP:
3542 		psp->hdcp_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3543 		psp->hdcp_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3544 		psp->hdcp_context.context.bin_desc.start_addr       = ucode_start_addr;
3545 		break;
3546 	case TA_FW_TYPE_PSP_DTM:
3547 		psp->dtm_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3548 		psp->dtm_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3549 		psp->dtm_context.context.bin_desc.start_addr       = ucode_start_addr;
3550 		break;
3551 	case TA_FW_TYPE_PSP_RAP:
3552 		psp->rap_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3553 		psp->rap_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3554 		psp->rap_context.context.bin_desc.start_addr       = ucode_start_addr;
3555 		break;
3556 	case TA_FW_TYPE_PSP_SECUREDISPLAY:
3557 		psp->securedisplay_context.context.bin_desc.fw_version =
3558 			le32_to_cpu(desc->fw_version);
3559 		psp->securedisplay_context.context.bin_desc.size_bytes =
3560 			le32_to_cpu(desc->size_bytes);
3561 		psp->securedisplay_context.context.bin_desc.start_addr =
3562 			ucode_start_addr;
3563 		break;
3564 	default:
3565 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3566 		break;
3567 	}
3568 
3569 	return 0;
3570 }
3571 
3572 static int parse_ta_v1_microcode(struct psp_context *psp)
3573 {
3574 	const struct ta_firmware_header_v1_0 *ta_hdr;
3575 	struct amdgpu_device *adev = psp->adev;
3576 
3577 	ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3578 
3579 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3580 		return -EINVAL;
3581 
3582 	adev->psp.xgmi_context.context.bin_desc.fw_version =
3583 		le32_to_cpu(ta_hdr->xgmi.fw_version);
3584 	adev->psp.xgmi_context.context.bin_desc.size_bytes =
3585 		le32_to_cpu(ta_hdr->xgmi.size_bytes);
3586 	adev->psp.xgmi_context.context.bin_desc.start_addr =
3587 		(uint8_t *)ta_hdr +
3588 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3589 
3590 	adev->psp.ras_context.context.bin_desc.fw_version =
3591 		le32_to_cpu(ta_hdr->ras.fw_version);
3592 	adev->psp.ras_context.context.bin_desc.size_bytes =
3593 		le32_to_cpu(ta_hdr->ras.size_bytes);
3594 	adev->psp.ras_context.context.bin_desc.start_addr =
3595 		(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3596 		le32_to_cpu(ta_hdr->ras.offset_bytes);
3597 
3598 	adev->psp.hdcp_context.context.bin_desc.fw_version =
3599 		le32_to_cpu(ta_hdr->hdcp.fw_version);
3600 	adev->psp.hdcp_context.context.bin_desc.size_bytes =
3601 		le32_to_cpu(ta_hdr->hdcp.size_bytes);
3602 	adev->psp.hdcp_context.context.bin_desc.start_addr =
3603 		(uint8_t *)ta_hdr +
3604 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3605 
3606 	adev->psp.dtm_context.context.bin_desc.fw_version =
3607 		le32_to_cpu(ta_hdr->dtm.fw_version);
3608 	adev->psp.dtm_context.context.bin_desc.size_bytes =
3609 		le32_to_cpu(ta_hdr->dtm.size_bytes);
3610 	adev->psp.dtm_context.context.bin_desc.start_addr =
3611 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3612 		le32_to_cpu(ta_hdr->dtm.offset_bytes);
3613 
3614 	adev->psp.securedisplay_context.context.bin_desc.fw_version =
3615 		le32_to_cpu(ta_hdr->securedisplay.fw_version);
3616 	adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3617 		le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3618 	adev->psp.securedisplay_context.context.bin_desc.start_addr =
3619 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3620 		le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3621 
3622 	adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3623 
3624 	return 0;
3625 }
3626 
3627 static int parse_ta_v2_microcode(struct psp_context *psp)
3628 {
3629 	const struct ta_firmware_header_v2_0 *ta_hdr;
3630 	struct amdgpu_device *adev = psp->adev;
3631 	int err = 0;
3632 	int ta_index = 0;
3633 
3634 	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3635 
3636 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3637 		return -EINVAL;
3638 
3639 	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3640 		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3641 		return -EINVAL;
3642 	}
3643 
3644 	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3645 		err = parse_ta_bin_descriptor(psp,
3646 					      &ta_hdr->ta_fw_bin[ta_index],
3647 					      ta_hdr);
3648 		if (err)
3649 			return err;
3650 	}
3651 
3652 	return 0;
3653 }
3654 
3655 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3656 {
3657 	const struct common_firmware_header *hdr;
3658 	struct amdgpu_device *adev = psp->adev;
3659 	int err;
3660 
3661 	err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name);
3662 	if (err)
3663 		return err;
3664 
3665 	hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3666 	switch (le16_to_cpu(hdr->header_version_major)) {
3667 	case 1:
3668 		err = parse_ta_v1_microcode(psp);
3669 		break;
3670 	case 2:
3671 		err = parse_ta_v2_microcode(psp);
3672 		break;
3673 	default:
3674 		dev_err(adev->dev, "unsupported TA header version\n");
3675 		err = -EINVAL;
3676 	}
3677 
3678 	if (err)
3679 		amdgpu_ucode_release(&adev->psp.ta_fw);
3680 
3681 	return err;
3682 }
3683 
3684 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3685 {
3686 	struct amdgpu_device *adev = psp->adev;
3687 	const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3688 	struct amdgpu_firmware_info *info = NULL;
3689 	int err = 0;
3690 
3691 	if (!amdgpu_sriov_vf(adev)) {
3692 		dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3693 		return -EINVAL;
3694 	}
3695 
3696 	err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, "amdgpu/%s_cap.bin", chip_name);
3697 	if (err) {
3698 		if (err == -ENODEV) {
3699 			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3700 			err = 0;
3701 			goto out;
3702 		}
3703 		dev_err(adev->dev, "fail to initialize cap microcode\n");
3704 	}
3705 
3706 	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3707 	info->ucode_id = AMDGPU_UCODE_ID_CAP;
3708 	info->fw = adev->psp.cap_fw;
3709 	cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3710 		adev->psp.cap_fw->data;
3711 	adev->firmware.fw_size += ALIGN(
3712 			le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3713 	adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3714 	adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3715 	adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3716 
3717 	return 0;
3718 
3719 out:
3720 	amdgpu_ucode_release(&adev->psp.cap_fw);
3721 	return err;
3722 }
3723 
3724 static int psp_set_clockgating_state(void *handle,
3725 				     enum amd_clockgating_state state)
3726 {
3727 	return 0;
3728 }
3729 
3730 static int psp_set_powergating_state(void *handle,
3731 				     enum amd_powergating_state state)
3732 {
3733 	return 0;
3734 }
3735 
3736 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3737 					 struct device_attribute *attr,
3738 					 char *buf)
3739 {
3740 	struct drm_device *ddev = dev_get_drvdata(dev);
3741 	struct amdgpu_device *adev = drm_to_adev(ddev);
3742 	uint32_t fw_ver;
3743 	int ret;
3744 
3745 	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3746 		dev_info(adev->dev, "PSP block is not ready yet\n.");
3747 		return -EBUSY;
3748 	}
3749 
3750 	mutex_lock(&adev->psp.mutex);
3751 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3752 	mutex_unlock(&adev->psp.mutex);
3753 
3754 	if (ret) {
3755 		dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
3756 		return ret;
3757 	}
3758 
3759 	return sysfs_emit(buf, "%x\n", fw_ver);
3760 }
3761 
3762 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3763 						       struct device_attribute *attr,
3764 						       const char *buf,
3765 						       size_t count)
3766 {
3767 	struct drm_device *ddev = dev_get_drvdata(dev);
3768 	struct amdgpu_device *adev = drm_to_adev(ddev);
3769 	int ret, idx;
3770 	const struct firmware *usbc_pd_fw;
3771 	struct amdgpu_bo *fw_buf_bo = NULL;
3772 	uint64_t fw_pri_mc_addr;
3773 	void *fw_pri_cpu_addr;
3774 
3775 	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3776 		dev_err(adev->dev, "PSP block is not ready yet.");
3777 		return -EBUSY;
3778 	}
3779 
3780 	if (!drm_dev_enter(ddev, &idx))
3781 		return -ENODEV;
3782 
3783 	ret = amdgpu_ucode_request(adev, &usbc_pd_fw, "amdgpu/%s", buf);
3784 	if (ret)
3785 		goto fail;
3786 
3787 	/* LFB address which is aligned to 1MB boundary per PSP request */
3788 	ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3789 				      AMDGPU_GEM_DOMAIN_VRAM |
3790 				      AMDGPU_GEM_DOMAIN_GTT,
3791 				      &fw_buf_bo, &fw_pri_mc_addr,
3792 				      &fw_pri_cpu_addr);
3793 	if (ret)
3794 		goto rel_buf;
3795 
3796 	memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3797 
3798 	mutex_lock(&adev->psp.mutex);
3799 	ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3800 	mutex_unlock(&adev->psp.mutex);
3801 
3802 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3803 
3804 rel_buf:
3805 	amdgpu_ucode_release(&usbc_pd_fw);
3806 fail:
3807 	if (ret) {
3808 		dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
3809 		count = ret;
3810 	}
3811 
3812 	drm_dev_exit(idx);
3813 	return count;
3814 }
3815 
3816 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3817 {
3818 	int idx;
3819 
3820 	if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
3821 		return;
3822 
3823 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3824 	memcpy(psp->fw_pri_buf, start_addr, bin_size);
3825 
3826 	drm_dev_exit(idx);
3827 }
3828 
3829 /**
3830  * DOC: usbc_pd_fw
3831  * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3832  * this file will trigger the update process.
3833  */
3834 static DEVICE_ATTR(usbc_pd_fw, 0644,
3835 		   psp_usbc_pd_fw_sysfs_read,
3836 		   psp_usbc_pd_fw_sysfs_write);
3837 
3838 int is_psp_fw_valid(struct psp_bin_desc bin)
3839 {
3840 	return bin.size_bytes;
3841 }
3842 
3843 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
3844 					struct bin_attribute *bin_attr,
3845 					char *buffer, loff_t pos, size_t count)
3846 {
3847 	struct device *dev = kobj_to_dev(kobj);
3848 	struct drm_device *ddev = dev_get_drvdata(dev);
3849 	struct amdgpu_device *adev = drm_to_adev(ddev);
3850 
3851 	adev->psp.vbflash_done = false;
3852 
3853 	/* Safeguard against memory drain */
3854 	if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
3855 		dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
3856 		kvfree(adev->psp.vbflash_tmp_buf);
3857 		adev->psp.vbflash_tmp_buf = NULL;
3858 		adev->psp.vbflash_image_size = 0;
3859 		return -ENOMEM;
3860 	}
3861 
3862 	/* TODO Just allocate max for now and optimize to realloc later if needed */
3863 	if (!adev->psp.vbflash_tmp_buf) {
3864 		adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
3865 		if (!adev->psp.vbflash_tmp_buf)
3866 			return -ENOMEM;
3867 	}
3868 
3869 	mutex_lock(&adev->psp.mutex);
3870 	memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
3871 	adev->psp.vbflash_image_size += count;
3872 	mutex_unlock(&adev->psp.mutex);
3873 
3874 	dev_dbg(adev->dev, "IFWI staged for update\n");
3875 
3876 	return count;
3877 }
3878 
3879 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
3880 				       struct bin_attribute *bin_attr, char *buffer,
3881 				       loff_t pos, size_t count)
3882 {
3883 	struct device *dev = kobj_to_dev(kobj);
3884 	struct drm_device *ddev = dev_get_drvdata(dev);
3885 	struct amdgpu_device *adev = drm_to_adev(ddev);
3886 	struct amdgpu_bo *fw_buf_bo = NULL;
3887 	uint64_t fw_pri_mc_addr;
3888 	void *fw_pri_cpu_addr;
3889 	int ret;
3890 
3891 	if (adev->psp.vbflash_image_size == 0)
3892 		return -EINVAL;
3893 
3894 	dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
3895 
3896 	ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
3897 					AMDGPU_GPU_PAGE_SIZE,
3898 					AMDGPU_GEM_DOMAIN_VRAM,
3899 					&fw_buf_bo,
3900 					&fw_pri_mc_addr,
3901 					&fw_pri_cpu_addr);
3902 	if (ret)
3903 		goto rel_buf;
3904 
3905 	memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
3906 
3907 	mutex_lock(&adev->psp.mutex);
3908 	ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
3909 	mutex_unlock(&adev->psp.mutex);
3910 
3911 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3912 
3913 rel_buf:
3914 	kvfree(adev->psp.vbflash_tmp_buf);
3915 	adev->psp.vbflash_tmp_buf = NULL;
3916 	adev->psp.vbflash_image_size = 0;
3917 
3918 	if (ret) {
3919 		dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
3920 		return ret;
3921 	}
3922 
3923 	dev_dbg(adev->dev, "PSP IFWI flash process done\n");
3924 	return 0;
3925 }
3926 
3927 /**
3928  * DOC: psp_vbflash
3929  * Writing to this file will stage an IFWI for update. Reading from this file
3930  * will trigger the update process.
3931  */
3932 static struct bin_attribute psp_vbflash_bin_attr = {
3933 	.attr = {.name = "psp_vbflash", .mode = 0660},
3934 	.size = 0,
3935 	.write = amdgpu_psp_vbflash_write,
3936 	.read = amdgpu_psp_vbflash_read,
3937 };
3938 
3939 /**
3940  * DOC: psp_vbflash_status
3941  * The status of the flash process.
3942  * 0: IFWI flash not complete.
3943  * 1: IFWI flash complete.
3944  */
3945 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
3946 					 struct device_attribute *attr,
3947 					 char *buf)
3948 {
3949 	struct drm_device *ddev = dev_get_drvdata(dev);
3950 	struct amdgpu_device *adev = drm_to_adev(ddev);
3951 	uint32_t vbflash_status;
3952 
3953 	vbflash_status = psp_vbflash_status(&adev->psp);
3954 	if (!adev->psp.vbflash_done)
3955 		vbflash_status = 0;
3956 	else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
3957 		vbflash_status = 1;
3958 
3959 	return sysfs_emit(buf, "0x%x\n", vbflash_status);
3960 }
3961 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
3962 
3963 static struct bin_attribute *bin_flash_attrs[] = {
3964 	&psp_vbflash_bin_attr,
3965 	NULL
3966 };
3967 
3968 static struct attribute *flash_attrs[] = {
3969 	&dev_attr_psp_vbflash_status.attr,
3970 	&dev_attr_usbc_pd_fw.attr,
3971 	NULL
3972 };
3973 
3974 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
3975 {
3976 	struct device *dev = kobj_to_dev(kobj);
3977 	struct drm_device *ddev = dev_get_drvdata(dev);
3978 	struct amdgpu_device *adev = drm_to_adev(ddev);
3979 
3980 	if (attr == &dev_attr_usbc_pd_fw.attr)
3981 		return adev->psp.sup_pd_fw_up ? 0660 : 0;
3982 
3983 	return adev->psp.sup_ifwi_up ? 0440 : 0;
3984 }
3985 
3986 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
3987 						struct bin_attribute *attr,
3988 						int idx)
3989 {
3990 	struct device *dev = kobj_to_dev(kobj);
3991 	struct drm_device *ddev = dev_get_drvdata(dev);
3992 	struct amdgpu_device *adev = drm_to_adev(ddev);
3993 
3994 	return adev->psp.sup_ifwi_up ? 0660 : 0;
3995 }
3996 
3997 const struct attribute_group amdgpu_flash_attr_group = {
3998 	.attrs = flash_attrs,
3999 	.bin_attrs = bin_flash_attrs,
4000 	.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4001 	.is_visible = amdgpu_flash_attr_is_visible,
4002 };
4003 
4004 const struct amd_ip_funcs psp_ip_funcs = {
4005 	.name = "psp",
4006 	.early_init = psp_early_init,
4007 	.late_init = NULL,
4008 	.sw_init = psp_sw_init,
4009 	.sw_fini = psp_sw_fini,
4010 	.hw_init = psp_hw_init,
4011 	.hw_fini = psp_hw_fini,
4012 	.suspend = psp_suspend,
4013 	.resume = psp_resume,
4014 	.is_idle = NULL,
4015 	.check_soft_reset = NULL,
4016 	.wait_for_idle = NULL,
4017 	.soft_reset = NULL,
4018 	.set_clockgating_state = psp_set_clockgating_state,
4019 	.set_powergating_state = psp_set_powergating_state,
4020 };
4021 
4022 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4023 	.type = AMD_IP_BLOCK_TYPE_PSP,
4024 	.major = 3,
4025 	.minor = 1,
4026 	.rev = 0,
4027 	.funcs = &psp_ip_funcs,
4028 };
4029 
4030 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4031 	.type = AMD_IP_BLOCK_TYPE_PSP,
4032 	.major = 10,
4033 	.minor = 0,
4034 	.rev = 0,
4035 	.funcs = &psp_ip_funcs,
4036 };
4037 
4038 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4039 	.type = AMD_IP_BLOCK_TYPE_PSP,
4040 	.major = 11,
4041 	.minor = 0,
4042 	.rev = 0,
4043 	.funcs = &psp_ip_funcs,
4044 };
4045 
4046 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4047 	.type = AMD_IP_BLOCK_TYPE_PSP,
4048 	.major = 11,
4049 	.minor = 0,
4050 	.rev = 8,
4051 	.funcs = &psp_ip_funcs,
4052 };
4053 
4054 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4055 	.type = AMD_IP_BLOCK_TYPE_PSP,
4056 	.major = 12,
4057 	.minor = 0,
4058 	.rev = 0,
4059 	.funcs = &psp_ip_funcs,
4060 };
4061 
4062 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4063 	.type = AMD_IP_BLOCK_TYPE_PSP,
4064 	.major = 13,
4065 	.minor = 0,
4066 	.rev = 0,
4067 	.funcs = &psp_ip_funcs,
4068 };
4069 
4070 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4071 	.type = AMD_IP_BLOCK_TYPE_PSP,
4072 	.major = 13,
4073 	.minor = 0,
4074 	.rev = 4,
4075 	.funcs = &psp_ip_funcs,
4076 };
4077 
4078 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4079 	.type = AMD_IP_BLOCK_TYPE_PSP,
4080 	.major = 14,
4081 	.minor = 0,
4082 	.rev = 0,
4083 	.funcs = &psp_ip_funcs,
4084 };
4085