xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42 
43 #include "amdgpu_ras.h"
44 #include "amdgpu_securedisplay.h"
45 #include "amdgpu_atomfirmware.h"
46 
47 #define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*3)
48 
49 static int psp_load_smu_fw(struct psp_context *psp);
50 static int psp_rap_terminate(struct psp_context *psp);
51 static int psp_securedisplay_terminate(struct psp_context *psp);
52 
53 static int psp_ring_init(struct psp_context *psp,
54 			 enum psp_ring_type ring_type)
55 {
56 	int ret = 0;
57 	struct psp_ring *ring;
58 	struct amdgpu_device *adev = psp->adev;
59 
60 	ring = &psp->km_ring;
61 
62 	ring->ring_type = ring_type;
63 
64 	/* allocate 4k Page of Local Frame Buffer memory for ring */
65 	ring->ring_size = 0x1000;
66 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 				      AMDGPU_GEM_DOMAIN_VRAM |
68 				      AMDGPU_GEM_DOMAIN_GTT,
69 				      &adev->firmware.rbuf,
70 				      &ring->ring_mem_mc_addr,
71 				      (void **)&ring->ring_mem);
72 	if (ret) {
73 		ring->ring_size = 0;
74 		return ret;
75 	}
76 
77 	return 0;
78 }
79 
80 /*
81  * Due to DF Cstate management centralized to PMFW, the firmware
82  * loading sequence will be updated as below:
83  *   - Load KDB
84  *   - Load SYS_DRV
85  *   - Load tOS
86  *   - Load PMFW
87  *   - Setup TMR
88  *   - Load other non-psp fw
89  *   - Load ASD
90  *   - Load XGMI/RAS/HDCP/DTM TA if any
91  *
92  * This new sequence is required for
93  *   - Arcturus and onwards
94  */
95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96 {
97 	struct amdgpu_device *adev = psp->adev;
98 
99 	if (amdgpu_sriov_vf(adev)) {
100 		psp->pmfw_centralized_cstate_management = false;
101 		return;
102 	}
103 
104 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 	case IP_VERSION(11, 0, 0):
106 	case IP_VERSION(11, 0, 4):
107 	case IP_VERSION(11, 0, 5):
108 	case IP_VERSION(11, 0, 7):
109 	case IP_VERSION(11, 0, 9):
110 	case IP_VERSION(11, 0, 11):
111 	case IP_VERSION(11, 0, 12):
112 	case IP_VERSION(11, 0, 13):
113 	case IP_VERSION(13, 0, 0):
114 	case IP_VERSION(13, 0, 2):
115 	case IP_VERSION(13, 0, 7):
116 		psp->pmfw_centralized_cstate_management = true;
117 		break;
118 	default:
119 		psp->pmfw_centralized_cstate_management = false;
120 		break;
121 	}
122 }
123 
124 static int psp_init_sriov_microcode(struct psp_context *psp)
125 {
126 	struct amdgpu_device *adev = psp->adev;
127 	char ucode_prefix[30];
128 	int ret = 0;
129 
130 	amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131 
132 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 	case IP_VERSION(9, 0, 0):
134 	case IP_VERSION(11, 0, 7):
135 	case IP_VERSION(11, 0, 9):
136 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 		ret = psp_init_cap_microcode(psp, ucode_prefix);
138 		break;
139 	case IP_VERSION(13, 0, 2):
140 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 		ret = psp_init_cap_microcode(psp, ucode_prefix);
142 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 		break;
144 	case IP_VERSION(13, 0, 0):
145 		adev->virt.autoload_ucode_id = 0;
146 		break;
147 	case IP_VERSION(13, 0, 6):
148 		ret = psp_init_cap_microcode(psp, ucode_prefix);
149 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
150 		break;
151 	case IP_VERSION(13, 0, 10):
152 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
153 		ret = psp_init_cap_microcode(psp, ucode_prefix);
154 		break;
155 	default:
156 		return -EINVAL;
157 	}
158 	return ret;
159 }
160 
161 static int psp_early_init(void *handle)
162 {
163 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
164 	struct psp_context *psp = &adev->psp;
165 
166 	psp->autoload_supported = true;
167 	psp->boot_time_tmr = true;
168 
169 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
170 	case IP_VERSION(9, 0, 0):
171 		psp_v3_1_set_psp_funcs(psp);
172 		psp->autoload_supported = false;
173 		psp->boot_time_tmr = false;
174 		break;
175 	case IP_VERSION(10, 0, 0):
176 	case IP_VERSION(10, 0, 1):
177 		psp_v10_0_set_psp_funcs(psp);
178 		psp->autoload_supported = false;
179 		psp->boot_time_tmr = false;
180 		break;
181 	case IP_VERSION(11, 0, 2):
182 	case IP_VERSION(11, 0, 4):
183 		psp_v11_0_set_psp_funcs(psp);
184 		psp->autoload_supported = false;
185 		psp->boot_time_tmr = false;
186 		break;
187 	case IP_VERSION(11, 0, 0):
188 	case IP_VERSION(11, 0, 7):
189 		adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
190 		fallthrough;
191 	case IP_VERSION(11, 0, 5):
192 	case IP_VERSION(11, 0, 9):
193 	case IP_VERSION(11, 0, 11):
194 	case IP_VERSION(11, 5, 0):
195 	case IP_VERSION(11, 0, 12):
196 	case IP_VERSION(11, 0, 13):
197 		psp_v11_0_set_psp_funcs(psp);
198 		psp->boot_time_tmr = false;
199 		break;
200 	case IP_VERSION(11, 0, 3):
201 	case IP_VERSION(12, 0, 1):
202 		psp_v12_0_set_psp_funcs(psp);
203 		psp->autoload_supported = false;
204 		psp->boot_time_tmr = false;
205 		break;
206 	case IP_VERSION(13, 0, 2):
207 		psp->boot_time_tmr = false;
208 		fallthrough;
209 	case IP_VERSION(13, 0, 6):
210 		psp_v13_0_set_psp_funcs(psp);
211 		psp->autoload_supported = false;
212 		break;
213 	case IP_VERSION(13, 0, 1):
214 	case IP_VERSION(13, 0, 3):
215 	case IP_VERSION(13, 0, 5):
216 	case IP_VERSION(13, 0, 8):
217 	case IP_VERSION(13, 0, 11):
218 	case IP_VERSION(14, 0, 0):
219 	case IP_VERSION(14, 0, 1):
220 		psp_v13_0_set_psp_funcs(psp);
221 		psp->boot_time_tmr = false;
222 		break;
223 	case IP_VERSION(11, 0, 8):
224 		if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
225 			psp_v11_0_8_set_psp_funcs(psp);
226 		}
227 		psp->autoload_supported = false;
228 		psp->boot_time_tmr = false;
229 		break;
230 	case IP_VERSION(13, 0, 0):
231 	case IP_VERSION(13, 0, 7):
232 	case IP_VERSION(13, 0, 10):
233 		psp_v13_0_set_psp_funcs(psp);
234 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
235 		psp->boot_time_tmr = false;
236 		break;
237 	case IP_VERSION(13, 0, 4):
238 		psp_v13_0_4_set_psp_funcs(psp);
239 		psp->boot_time_tmr = false;
240 		break;
241 	case IP_VERSION(14, 0, 2):
242 	case IP_VERSION(14, 0, 3):
243 		psp_v14_0_set_psp_funcs(psp);
244 		break;
245 	default:
246 		return -EINVAL;
247 	}
248 
249 	psp->adev = adev;
250 
251 	adev->psp_timeout = 20000;
252 
253 	psp_check_pmfw_centralized_cstate_management(psp);
254 
255 	if (amdgpu_sriov_vf(adev))
256 		return psp_init_sriov_microcode(psp);
257 	else
258 		return psp_init_microcode(psp);
259 }
260 
261 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
262 {
263 	amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
264 			      &mem_ctx->shared_buf);
265 	mem_ctx->shared_bo = NULL;
266 }
267 
268 static void psp_free_shared_bufs(struct psp_context *psp)
269 {
270 	void *tmr_buf;
271 	void **pptr;
272 
273 	/* free TMR memory buffer */
274 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
275 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
276 	psp->tmr_bo = NULL;
277 
278 	/* free xgmi shared memory */
279 	psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
280 
281 	/* free ras shared memory */
282 	psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
283 
284 	/* free hdcp shared memory */
285 	psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
286 
287 	/* free dtm shared memory */
288 	psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
289 
290 	/* free rap shared memory */
291 	psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
292 
293 	/* free securedisplay shared memory */
294 	psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
295 
296 
297 }
298 
299 static void psp_memory_training_fini(struct psp_context *psp)
300 {
301 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
302 
303 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
304 	kfree(ctx->sys_cache);
305 	ctx->sys_cache = NULL;
306 }
307 
308 static int psp_memory_training_init(struct psp_context *psp)
309 {
310 	int ret;
311 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
312 
313 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
314 		dev_dbg(psp->adev->dev, "memory training is not supported!\n");
315 		return 0;
316 	}
317 
318 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
319 	if (ctx->sys_cache == NULL) {
320 		dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
321 		ret = -ENOMEM;
322 		goto Err_out;
323 	}
324 
325 	dev_dbg(psp->adev->dev,
326 		"train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
327 		ctx->train_data_size,
328 		ctx->p2c_train_data_offset,
329 		ctx->c2p_train_data_offset);
330 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
331 	return 0;
332 
333 Err_out:
334 	psp_memory_training_fini(psp);
335 	return ret;
336 }
337 
338 /*
339  * Helper funciton to query psp runtime database entry
340  *
341  * @adev: amdgpu_device pointer
342  * @entry_type: the type of psp runtime database entry
343  * @db_entry: runtime database entry pointer
344  *
345  * Return false if runtime database doesn't exit or entry is invalid
346  * or true if the specific database entry is found, and copy to @db_entry
347  */
348 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
349 				     enum psp_runtime_entry_type entry_type,
350 				     void *db_entry)
351 {
352 	uint64_t db_header_pos, db_dir_pos;
353 	struct psp_runtime_data_header db_header = {0};
354 	struct psp_runtime_data_directory db_dir = {0};
355 	bool ret = false;
356 	int i;
357 
358 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6))
359 		return false;
360 
361 	db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
362 	db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
363 
364 	/* read runtime db header from vram */
365 	amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
366 			sizeof(struct psp_runtime_data_header), false);
367 
368 	if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
369 		/* runtime db doesn't exist, exit */
370 		dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
371 		return false;
372 	}
373 
374 	/* read runtime database entry from vram */
375 	amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
376 			sizeof(struct psp_runtime_data_directory), false);
377 
378 	if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
379 		/* invalid db entry count, exit */
380 		dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
381 		return false;
382 	}
383 
384 	/* look up for requested entry type */
385 	for (i = 0; i < db_dir.entry_count && !ret; i++) {
386 		if (db_dir.entry_list[i].entry_type == entry_type) {
387 			switch (entry_type) {
388 			case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
389 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
390 					/* invalid db entry size */
391 					dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
392 					return false;
393 				}
394 				/* read runtime database entry */
395 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
396 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
397 				ret = true;
398 				break;
399 			case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
400 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
401 					/* invalid db entry size */
402 					dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
403 					return false;
404 				}
405 				/* read runtime database entry */
406 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
407 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
408 				ret = true;
409 				break;
410 			default:
411 				ret = false;
412 				break;
413 			}
414 		}
415 	}
416 
417 	return ret;
418 }
419 
420 static int psp_sw_init(void *handle)
421 {
422 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
423 	struct psp_context *psp = &adev->psp;
424 	int ret;
425 	struct psp_runtime_boot_cfg_entry boot_cfg_entry;
426 	struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
427 	struct psp_runtime_scpm_entry scpm_entry;
428 
429 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
430 	if (!psp->cmd) {
431 		dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
432 		ret = -ENOMEM;
433 	}
434 
435 	adev->psp.xgmi_context.supports_extended_data =
436 		!adev->gmc.xgmi.connected_to_cpu &&
437 		amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
438 
439 	memset(&scpm_entry, 0, sizeof(scpm_entry));
440 	if ((psp_get_runtime_db_entry(adev,
441 				PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
442 				&scpm_entry)) &&
443 	    (scpm_entry.scpm_status != SCPM_DISABLE)) {
444 		adev->scpm_enabled = true;
445 		adev->scpm_status = scpm_entry.scpm_status;
446 	} else {
447 		adev->scpm_enabled = false;
448 		adev->scpm_status = SCPM_DISABLE;
449 	}
450 
451 	/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
452 
453 	memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
454 	if (psp_get_runtime_db_entry(adev,
455 				PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
456 				&boot_cfg_entry)) {
457 		psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
458 		if ((psp->boot_cfg_bitmask) &
459 		    BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
460 			/* If psp runtime database exists, then
461 			 * only enable two stage memory training
462 			 * when TWO_STAGE_DRAM_TRAINING bit is set
463 			 * in runtime database
464 			 */
465 			mem_training_ctx->enable_mem_training = true;
466 		}
467 
468 	} else {
469 		/* If psp runtime database doesn't exist or is
470 		 * invalid, force enable two stage memory training
471 		 */
472 		mem_training_ctx->enable_mem_training = true;
473 	}
474 
475 	if (mem_training_ctx->enable_mem_training) {
476 		ret = psp_memory_training_init(psp);
477 		if (ret) {
478 			dev_err(adev->dev, "Failed to initialize memory training!\n");
479 			return ret;
480 		}
481 
482 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
483 		if (ret) {
484 			dev_err(adev->dev, "Failed to process memory training!\n");
485 			return ret;
486 		}
487 	}
488 
489 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
490 				      (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
491 				      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
492 				      &psp->fw_pri_bo,
493 				      &psp->fw_pri_mc_addr,
494 				      &psp->fw_pri_buf);
495 	if (ret)
496 		return ret;
497 
498 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
499 				      AMDGPU_GEM_DOMAIN_VRAM |
500 				      AMDGPU_GEM_DOMAIN_GTT,
501 				      &psp->fence_buf_bo,
502 				      &psp->fence_buf_mc_addr,
503 				      &psp->fence_buf);
504 	if (ret)
505 		goto failed1;
506 
507 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
508 				      AMDGPU_GEM_DOMAIN_VRAM |
509 				      AMDGPU_GEM_DOMAIN_GTT,
510 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
511 				      (void **)&psp->cmd_buf_mem);
512 	if (ret)
513 		goto failed2;
514 
515 	return 0;
516 
517 failed2:
518 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
519 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
520 failed1:
521 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
522 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
523 	return ret;
524 }
525 
526 static int psp_sw_fini(void *handle)
527 {
528 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
529 	struct psp_context *psp = &adev->psp;
530 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
531 
532 	psp_memory_training_fini(psp);
533 
534 	amdgpu_ucode_release(&psp->sos_fw);
535 	amdgpu_ucode_release(&psp->asd_fw);
536 	amdgpu_ucode_release(&psp->ta_fw);
537 	amdgpu_ucode_release(&psp->cap_fw);
538 	amdgpu_ucode_release(&psp->toc_fw);
539 
540 	kfree(cmd);
541 	cmd = NULL;
542 
543 	psp_free_shared_bufs(psp);
544 
545 	if (psp->km_ring.ring_mem)
546 		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
547 				      &psp->km_ring.ring_mem_mc_addr,
548 				      (void **)&psp->km_ring.ring_mem);
549 
550 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
551 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
552 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
553 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
554 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
555 			      (void **)&psp->cmd_buf_mem);
556 
557 	return 0;
558 }
559 
560 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
561 		 uint32_t reg_val, uint32_t mask, bool check_changed)
562 {
563 	uint32_t val;
564 	int i;
565 	struct amdgpu_device *adev = psp->adev;
566 
567 	if (psp->adev->no_hw_access)
568 		return 0;
569 
570 	for (i = 0; i < adev->usec_timeout; i++) {
571 		val = RREG32(reg_index);
572 		if (check_changed) {
573 			if (val != reg_val)
574 				return 0;
575 		} else {
576 			if ((val & mask) == reg_val)
577 				return 0;
578 		}
579 		udelay(1);
580 	}
581 
582 	return -ETIME;
583 }
584 
585 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
586 			       uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
587 {
588 	uint32_t val;
589 	int i;
590 	struct amdgpu_device *adev = psp->adev;
591 
592 	if (psp->adev->no_hw_access)
593 		return 0;
594 
595 	for (i = 0; i < msec_timeout; i++) {
596 		val = RREG32(reg_index);
597 		if ((val & mask) == reg_val)
598 			return 0;
599 		msleep(1);
600 	}
601 
602 	return -ETIME;
603 }
604 
605 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
606 {
607 	switch (cmd_id) {
608 	case GFX_CMD_ID_LOAD_TA:
609 		return "LOAD_TA";
610 	case GFX_CMD_ID_UNLOAD_TA:
611 		return "UNLOAD_TA";
612 	case GFX_CMD_ID_INVOKE_CMD:
613 		return "INVOKE_CMD";
614 	case GFX_CMD_ID_LOAD_ASD:
615 		return "LOAD_ASD";
616 	case GFX_CMD_ID_SETUP_TMR:
617 		return "SETUP_TMR";
618 	case GFX_CMD_ID_LOAD_IP_FW:
619 		return "LOAD_IP_FW";
620 	case GFX_CMD_ID_DESTROY_TMR:
621 		return "DESTROY_TMR";
622 	case GFX_CMD_ID_SAVE_RESTORE:
623 		return "SAVE_RESTORE_IP_FW";
624 	case GFX_CMD_ID_SETUP_VMR:
625 		return "SETUP_VMR";
626 	case GFX_CMD_ID_DESTROY_VMR:
627 		return "DESTROY_VMR";
628 	case GFX_CMD_ID_PROG_REG:
629 		return "PROG_REG";
630 	case GFX_CMD_ID_GET_FW_ATTESTATION:
631 		return "GET_FW_ATTESTATION";
632 	case GFX_CMD_ID_LOAD_TOC:
633 		return "ID_LOAD_TOC";
634 	case GFX_CMD_ID_AUTOLOAD_RLC:
635 		return "AUTOLOAD_RLC";
636 	case GFX_CMD_ID_BOOT_CFG:
637 		return "BOOT_CFG";
638 	default:
639 		return "UNKNOWN CMD";
640 	}
641 }
642 
643 static int
644 psp_cmd_submit_buf(struct psp_context *psp,
645 		   struct amdgpu_firmware_info *ucode,
646 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
647 {
648 	int ret;
649 	int index;
650 	int timeout = psp->adev->psp_timeout;
651 	bool ras_intr = false;
652 	bool skip_unsupport = false;
653 
654 	if (psp->adev->no_hw_access)
655 		return 0;
656 
657 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
658 
659 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
660 
661 	index = atomic_inc_return(&psp->fence_value);
662 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
663 	if (ret) {
664 		atomic_dec(&psp->fence_value);
665 		goto exit;
666 	}
667 
668 	amdgpu_device_invalidate_hdp(psp->adev, NULL);
669 	while (*((unsigned int *)psp->fence_buf) != index) {
670 		if (--timeout == 0)
671 			break;
672 		/*
673 		 * Shouldn't wait for timeout when err_event_athub occurs,
674 		 * because gpu reset thread triggered and lock resource should
675 		 * be released for psp resume sequence.
676 		 */
677 		ras_intr = amdgpu_ras_intr_triggered();
678 		if (ras_intr)
679 			break;
680 		usleep_range(10, 100);
681 		amdgpu_device_invalidate_hdp(psp->adev, NULL);
682 	}
683 
684 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
685 	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
686 		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
687 
688 	memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
689 
690 	/* In some cases, psp response status is not 0 even there is no
691 	 * problem while the command is submitted. Some version of PSP FW
692 	 * doesn't write 0 to that field.
693 	 * So here we would like to only print a warning instead of an error
694 	 * during psp initialization to avoid breaking hw_init and it doesn't
695 	 * return -EINVAL.
696 	 */
697 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
698 		if (ucode)
699 			dev_warn(psp->adev->dev,
700 				 "failed to load ucode %s(0x%X) ",
701 				 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
702 		dev_warn(psp->adev->dev,
703 			 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
704 			 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
705 			 psp->cmd_buf_mem->resp.status);
706 		/* If any firmware (including CAP) load fails under SRIOV, it should
707 		 * return failure to stop the VF from initializing.
708 		 * Also return failure in case of timeout
709 		 */
710 		if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
711 			ret = -EINVAL;
712 			goto exit;
713 		}
714 	}
715 
716 	if (ucode) {
717 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
718 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
719 	}
720 
721 exit:
722 	return ret;
723 }
724 
725 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
726 {
727 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
728 
729 	mutex_lock(&psp->mutex);
730 
731 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
732 
733 	return cmd;
734 }
735 
736 static void release_psp_cmd_buf(struct psp_context *psp)
737 {
738 	mutex_unlock(&psp->mutex);
739 }
740 
741 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
742 				 struct psp_gfx_cmd_resp *cmd,
743 				 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
744 {
745 	struct amdgpu_device *adev = psp->adev;
746 	uint32_t size = 0;
747 	uint64_t tmr_pa = 0;
748 
749 	if (tmr_bo) {
750 		size = amdgpu_bo_size(tmr_bo);
751 		tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
752 	}
753 
754 	if (amdgpu_sriov_vf(psp->adev))
755 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
756 	else
757 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
758 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
759 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
760 	cmd->cmd.cmd_setup_tmr.buf_size = size;
761 	cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
762 	cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
763 	cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
764 }
765 
766 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
767 				      uint64_t pri_buf_mc, uint32_t size)
768 {
769 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
770 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
771 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
772 	cmd->cmd.cmd_load_toc.toc_size = size;
773 }
774 
775 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
776 static int psp_load_toc(struct psp_context *psp,
777 			uint32_t *tmr_size)
778 {
779 	int ret;
780 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
781 
782 	/* Copy toc to psp firmware private buffer */
783 	psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
784 
785 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
786 
787 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
788 				 psp->fence_buf_mc_addr);
789 	if (!ret)
790 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
791 
792 	release_psp_cmd_buf(psp);
793 
794 	return ret;
795 }
796 
797 /* Set up Trusted Memory Region */
798 static int psp_tmr_init(struct psp_context *psp)
799 {
800 	int ret = 0;
801 	int tmr_size;
802 	void *tmr_buf;
803 	void **pptr;
804 
805 	/*
806 	 * According to HW engineer, they prefer the TMR address be "naturally
807 	 * aligned" , e.g. the start address be an integer divide of TMR size.
808 	 *
809 	 * Note: this memory need be reserved till the driver
810 	 * uninitializes.
811 	 */
812 	tmr_size = PSP_TMR_SIZE(psp->adev);
813 
814 	/* For ASICs support RLC autoload, psp will parse the toc
815 	 * and calculate the total size of TMR needed
816 	 */
817 	if (!amdgpu_sriov_vf(psp->adev) &&
818 	    psp->toc.start_addr &&
819 	    psp->toc.size_bytes &&
820 	    psp->fw_pri_buf) {
821 		ret = psp_load_toc(psp, &tmr_size);
822 		if (ret) {
823 			dev_err(psp->adev->dev, "Failed to load toc\n");
824 			return ret;
825 		}
826 	}
827 
828 	if (!psp->tmr_bo && !psp->boot_time_tmr) {
829 		pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
830 		ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
831 					      PSP_TMR_ALIGNMENT,
832 					      AMDGPU_HAS_VRAM(psp->adev) ?
833 					      AMDGPU_GEM_DOMAIN_VRAM :
834 					      AMDGPU_GEM_DOMAIN_GTT,
835 					      &psp->tmr_bo, &psp->tmr_mc_addr,
836 					      pptr);
837 	}
838 
839 	return ret;
840 }
841 
842 static bool psp_skip_tmr(struct psp_context *psp)
843 {
844 	switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
845 	case IP_VERSION(11, 0, 9):
846 	case IP_VERSION(11, 0, 7):
847 	case IP_VERSION(13, 0, 2):
848 	case IP_VERSION(13, 0, 6):
849 	case IP_VERSION(13, 0, 10):
850 		return true;
851 	default:
852 		return false;
853 	}
854 }
855 
856 static int psp_tmr_load(struct psp_context *psp)
857 {
858 	int ret;
859 	struct psp_gfx_cmd_resp *cmd;
860 
861 	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
862 	 * Already set up by host driver.
863 	 */
864 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
865 		return 0;
866 
867 	cmd = acquire_psp_cmd_buf(psp);
868 
869 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
870 	if (psp->tmr_bo)
871 		dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
872 			 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
873 
874 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
875 				 psp->fence_buf_mc_addr);
876 
877 	release_psp_cmd_buf(psp);
878 
879 	return ret;
880 }
881 
882 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
883 					struct psp_gfx_cmd_resp *cmd)
884 {
885 	if (amdgpu_sriov_vf(psp->adev))
886 		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
887 	else
888 		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
889 }
890 
891 static int psp_tmr_unload(struct psp_context *psp)
892 {
893 	int ret;
894 	struct psp_gfx_cmd_resp *cmd;
895 
896 	/* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
897 	 * as TMR is not loaded at all
898 	 */
899 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
900 		return 0;
901 
902 	cmd = acquire_psp_cmd_buf(psp);
903 
904 	psp_prep_tmr_unload_cmd_buf(psp, cmd);
905 	dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
906 
907 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
908 				 psp->fence_buf_mc_addr);
909 
910 	release_psp_cmd_buf(psp);
911 
912 	return ret;
913 }
914 
915 static int psp_tmr_terminate(struct psp_context *psp)
916 {
917 	return psp_tmr_unload(psp);
918 }
919 
920 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
921 					uint64_t *output_ptr)
922 {
923 	int ret;
924 	struct psp_gfx_cmd_resp *cmd;
925 
926 	if (!output_ptr)
927 		return -EINVAL;
928 
929 	if (amdgpu_sriov_vf(psp->adev))
930 		return 0;
931 
932 	cmd = acquire_psp_cmd_buf(psp);
933 
934 	cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
935 
936 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
937 				 psp->fence_buf_mc_addr);
938 
939 	if (!ret) {
940 		*output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
941 			      ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
942 	}
943 
944 	release_psp_cmd_buf(psp);
945 
946 	return ret;
947 }
948 
949 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
950 {
951 	struct psp_context *psp = &adev->psp;
952 	struct psp_gfx_cmd_resp *cmd;
953 	int ret;
954 
955 	if (amdgpu_sriov_vf(adev))
956 		return 0;
957 
958 	cmd = acquire_psp_cmd_buf(psp);
959 
960 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
961 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
962 
963 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
964 	if (!ret) {
965 		*boot_cfg =
966 			(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
967 	}
968 
969 	release_psp_cmd_buf(psp);
970 
971 	return ret;
972 }
973 
974 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
975 {
976 	int ret;
977 	struct psp_context *psp = &adev->psp;
978 	struct psp_gfx_cmd_resp *cmd;
979 
980 	if (amdgpu_sriov_vf(adev))
981 		return 0;
982 
983 	cmd = acquire_psp_cmd_buf(psp);
984 
985 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
986 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
987 	cmd->cmd.boot_cfg.boot_config = boot_cfg;
988 	cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
989 
990 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
991 
992 	release_psp_cmd_buf(psp);
993 
994 	return ret;
995 }
996 
997 static int psp_rl_load(struct amdgpu_device *adev)
998 {
999 	int ret;
1000 	struct psp_context *psp = &adev->psp;
1001 	struct psp_gfx_cmd_resp *cmd;
1002 
1003 	if (!is_psp_fw_valid(psp->rl))
1004 		return 0;
1005 
1006 	cmd = acquire_psp_cmd_buf(psp);
1007 
1008 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1009 	memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1010 
1011 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1012 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1013 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1014 	cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1015 	cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1016 
1017 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1018 
1019 	release_psp_cmd_buf(psp);
1020 
1021 	return ret;
1022 }
1023 
1024 int psp_spatial_partition(struct psp_context *psp, int mode)
1025 {
1026 	struct psp_gfx_cmd_resp *cmd;
1027 	int ret;
1028 
1029 	if (amdgpu_sriov_vf(psp->adev))
1030 		return 0;
1031 
1032 	cmd = acquire_psp_cmd_buf(psp);
1033 
1034 	cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1035 	cmd->cmd.cmd_spatial_part.mode = mode;
1036 
1037 	dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1038 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1039 
1040 	release_psp_cmd_buf(psp);
1041 
1042 	return ret;
1043 }
1044 
1045 static int psp_asd_initialize(struct psp_context *psp)
1046 {
1047 	int ret;
1048 
1049 	/* If PSP version doesn't match ASD version, asd loading will be failed.
1050 	 * add workaround to bypass it for sriov now.
1051 	 * TODO: add version check to make it common
1052 	 */
1053 	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1054 		return 0;
1055 
1056 	psp->asd_context.mem_context.shared_mc_addr  = 0;
1057 	psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1058 	psp->asd_context.ta_load_type                = GFX_CMD_ID_LOAD_ASD;
1059 
1060 	ret = psp_ta_load(psp, &psp->asd_context);
1061 	if (!ret)
1062 		psp->asd_context.initialized = true;
1063 
1064 	return ret;
1065 }
1066 
1067 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1068 				       uint32_t session_id)
1069 {
1070 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1071 	cmd->cmd.cmd_unload_ta.session_id = session_id;
1072 }
1073 
1074 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1075 {
1076 	int ret;
1077 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1078 
1079 	psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1080 
1081 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1082 
1083 	context->resp_status = cmd->resp.status;
1084 
1085 	release_psp_cmd_buf(psp);
1086 
1087 	return ret;
1088 }
1089 
1090 static int psp_asd_terminate(struct psp_context *psp)
1091 {
1092 	int ret;
1093 
1094 	if (amdgpu_sriov_vf(psp->adev))
1095 		return 0;
1096 
1097 	if (!psp->asd_context.initialized)
1098 		return 0;
1099 
1100 	ret = psp_ta_unload(psp, &psp->asd_context);
1101 	if (!ret)
1102 		psp->asd_context.initialized = false;
1103 
1104 	return ret;
1105 }
1106 
1107 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1108 		uint32_t id, uint32_t value)
1109 {
1110 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1111 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1112 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1113 }
1114 
1115 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1116 		uint32_t value)
1117 {
1118 	struct psp_gfx_cmd_resp *cmd;
1119 	int ret = 0;
1120 
1121 	if (reg >= PSP_REG_LAST)
1122 		return -EINVAL;
1123 
1124 	cmd = acquire_psp_cmd_buf(psp);
1125 
1126 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1127 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1128 	if (ret)
1129 		dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1130 
1131 	release_psp_cmd_buf(psp);
1132 
1133 	return ret;
1134 }
1135 
1136 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1137 				     uint64_t ta_bin_mc,
1138 				     struct ta_context *context)
1139 {
1140 	cmd->cmd_id				= context->ta_load_type;
1141 	cmd->cmd.cmd_load_ta.app_phy_addr_lo	= lower_32_bits(ta_bin_mc);
1142 	cmd->cmd.cmd_load_ta.app_phy_addr_hi	= upper_32_bits(ta_bin_mc);
1143 	cmd->cmd.cmd_load_ta.app_len		= context->bin_desc.size_bytes;
1144 
1145 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1146 		lower_32_bits(context->mem_context.shared_mc_addr);
1147 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1148 		upper_32_bits(context->mem_context.shared_mc_addr);
1149 	cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1150 }
1151 
1152 int psp_ta_init_shared_buf(struct psp_context *psp,
1153 				  struct ta_mem_context *mem_ctx)
1154 {
1155 	/*
1156 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1157 	 * physical) for ta to host memory
1158 	 */
1159 	return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1160 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1161 				      AMDGPU_GEM_DOMAIN_GTT,
1162 				      &mem_ctx->shared_bo,
1163 				      &mem_ctx->shared_mc_addr,
1164 				      &mem_ctx->shared_buf);
1165 }
1166 
1167 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1168 				       uint32_t ta_cmd_id,
1169 				       uint32_t session_id)
1170 {
1171 	cmd->cmd_id				= GFX_CMD_ID_INVOKE_CMD;
1172 	cmd->cmd.cmd_invoke_cmd.session_id	= session_id;
1173 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id	= ta_cmd_id;
1174 }
1175 
1176 int psp_ta_invoke(struct psp_context *psp,
1177 		  uint32_t ta_cmd_id,
1178 		  struct ta_context *context)
1179 {
1180 	int ret;
1181 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1182 
1183 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1184 
1185 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1186 				 psp->fence_buf_mc_addr);
1187 
1188 	context->resp_status = cmd->resp.status;
1189 
1190 	release_psp_cmd_buf(psp);
1191 
1192 	return ret;
1193 }
1194 
1195 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1196 {
1197 	int ret;
1198 	struct psp_gfx_cmd_resp *cmd;
1199 
1200 	cmd = acquire_psp_cmd_buf(psp);
1201 
1202 	psp_copy_fw(psp, context->bin_desc.start_addr,
1203 		    context->bin_desc.size_bytes);
1204 
1205 	psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1206 
1207 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1208 				 psp->fence_buf_mc_addr);
1209 
1210 	context->resp_status = cmd->resp.status;
1211 
1212 	if (!ret)
1213 		context->session_id = cmd->resp.session_id;
1214 
1215 	release_psp_cmd_buf(psp);
1216 
1217 	return ret;
1218 }
1219 
1220 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1221 {
1222 	return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1223 }
1224 
1225 int psp_xgmi_terminate(struct psp_context *psp)
1226 {
1227 	int ret;
1228 	struct amdgpu_device *adev = psp->adev;
1229 
1230 	/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1231 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1232 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1233 	     adev->gmc.xgmi.connected_to_cpu))
1234 		return 0;
1235 
1236 	if (!psp->xgmi_context.context.initialized)
1237 		return 0;
1238 
1239 	ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1240 
1241 	psp->xgmi_context.context.initialized = false;
1242 
1243 	return ret;
1244 }
1245 
1246 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1247 {
1248 	struct ta_xgmi_shared_memory *xgmi_cmd;
1249 	int ret;
1250 
1251 	if (!psp->ta_fw ||
1252 	    !psp->xgmi_context.context.bin_desc.size_bytes ||
1253 	    !psp->xgmi_context.context.bin_desc.start_addr)
1254 		return -ENOENT;
1255 
1256 	if (!load_ta)
1257 		goto invoke;
1258 
1259 	psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1260 	psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1261 
1262 	if (!psp->xgmi_context.context.mem_context.shared_buf) {
1263 		ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1264 		if (ret)
1265 			return ret;
1266 	}
1267 
1268 	/* Load XGMI TA */
1269 	ret = psp_ta_load(psp, &psp->xgmi_context.context);
1270 	if (!ret)
1271 		psp->xgmi_context.context.initialized = true;
1272 	else
1273 		return ret;
1274 
1275 invoke:
1276 	/* Initialize XGMI session */
1277 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1278 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1279 	xgmi_cmd->flag_extend_link_record = set_extended_data;
1280 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1281 
1282 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1283 	/* note down the capbility flag for XGMI TA */
1284 	psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1285 
1286 	return ret;
1287 }
1288 
1289 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1290 {
1291 	struct ta_xgmi_shared_memory *xgmi_cmd;
1292 	int ret;
1293 
1294 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1295 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1296 
1297 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1298 
1299 	/* Invoke xgmi ta to get hive id */
1300 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1301 	if (ret)
1302 		return ret;
1303 
1304 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1305 
1306 	return 0;
1307 }
1308 
1309 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1310 {
1311 	struct ta_xgmi_shared_memory *xgmi_cmd;
1312 	int ret;
1313 
1314 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1315 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1316 
1317 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1318 
1319 	/* Invoke xgmi ta to get the node id */
1320 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1321 	if (ret)
1322 		return ret;
1323 
1324 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1325 
1326 	return 0;
1327 }
1328 
1329 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1330 {
1331 	return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1332 			IP_VERSION(13, 0, 2) &&
1333 		psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1334 	       amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1335 		       IP_VERSION(13, 0, 6);
1336 }
1337 
1338 /*
1339  * Chips that support extended topology information require the driver to
1340  * reflect topology information in the opposite direction.  This is
1341  * because the TA has already exceeded its link record limit and if the
1342  * TA holds bi-directional information, the driver would have to do
1343  * multiple fetches instead of just two.
1344  */
1345 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1346 					struct psp_xgmi_node_info node_info)
1347 {
1348 	struct amdgpu_device *mirror_adev;
1349 	struct amdgpu_hive_info *hive;
1350 	uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1351 	uint64_t dst_node_id = node_info.node_id;
1352 	uint8_t dst_num_hops = node_info.num_hops;
1353 	uint8_t dst_num_links = node_info.num_links;
1354 
1355 	hive = amdgpu_get_xgmi_hive(psp->adev);
1356 	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1357 		struct psp_xgmi_topology_info *mirror_top_info;
1358 		int j;
1359 
1360 		if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1361 			continue;
1362 
1363 		mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1364 		for (j = 0; j < mirror_top_info->num_nodes; j++) {
1365 			if (mirror_top_info->nodes[j].node_id != src_node_id)
1366 				continue;
1367 
1368 			mirror_top_info->nodes[j].num_hops = dst_num_hops;
1369 			/*
1370 			 * prevent 0 num_links value re-reflection since reflection
1371 			 * criteria is based on num_hops (direct or indirect).
1372 			 *
1373 			 */
1374 			if (dst_num_links)
1375 				mirror_top_info->nodes[j].num_links = dst_num_links;
1376 
1377 			break;
1378 		}
1379 
1380 		break;
1381 	}
1382 
1383 	amdgpu_put_xgmi_hive(hive);
1384 }
1385 
1386 int psp_xgmi_get_topology_info(struct psp_context *psp,
1387 			       int number_devices,
1388 			       struct psp_xgmi_topology_info *topology,
1389 			       bool get_extended_data)
1390 {
1391 	struct ta_xgmi_shared_memory *xgmi_cmd;
1392 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1393 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1394 	int i;
1395 	int ret;
1396 
1397 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1398 		return -EINVAL;
1399 
1400 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1401 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1402 	xgmi_cmd->flag_extend_link_record = get_extended_data;
1403 
1404 	/* Fill in the shared memory with topology information as input */
1405 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1406 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1407 	topology_info_input->num_nodes = number_devices;
1408 
1409 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1410 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1411 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1412 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1413 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1414 	}
1415 
1416 	/* Invoke xgmi ta to get the topology information */
1417 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1418 	if (ret)
1419 		return ret;
1420 
1421 	/* Read the output topology information from the shared memory */
1422 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1423 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1424 	for (i = 0; i < topology->num_nodes; i++) {
1425 		/* extended data will either be 0 or equal to non-extended data */
1426 		if (topology_info_output->nodes[i].num_hops)
1427 			topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1428 
1429 		/* non-extended data gets everything here so no need to update */
1430 		if (!get_extended_data) {
1431 			topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1432 			topology->nodes[i].is_sharing_enabled =
1433 					topology_info_output->nodes[i].is_sharing_enabled;
1434 			topology->nodes[i].sdma_engine =
1435 					topology_info_output->nodes[i].sdma_engine;
1436 		}
1437 
1438 	}
1439 
1440 	/* Invoke xgmi ta again to get the link information */
1441 	if (psp_xgmi_peer_link_info_supported(psp)) {
1442 		struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1443 		struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1444 		bool requires_reflection =
1445 			(psp->xgmi_context.supports_extended_data &&
1446 			 get_extended_data) ||
1447 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1448 				IP_VERSION(13, 0, 6);
1449 		bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1450 				psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1451 
1452 		/* popluate the shared output buffer rather than the cmd input buffer
1453 		 * with node_ids as the input for GET_PEER_LINKS command execution.
1454 		 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1455 		 * The same requirement for GET_EXTEND_PEER_LINKS command.
1456 		 */
1457 		if (ta_port_num_support) {
1458 			link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1459 
1460 			for (i = 0; i < topology->num_nodes; i++)
1461 				link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1462 
1463 			link_extend_info_output->num_nodes = topology->num_nodes;
1464 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1465 		} else {
1466 			link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1467 
1468 			for (i = 0; i < topology->num_nodes; i++)
1469 				link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1470 
1471 			link_info_output->num_nodes = topology->num_nodes;
1472 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1473 		}
1474 
1475 		ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1476 		if (ret)
1477 			return ret;
1478 
1479 		for (i = 0; i < topology->num_nodes; i++) {
1480 			uint8_t node_num_links = ta_port_num_support ?
1481 				link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1482 			/* accumulate num_links on extended data */
1483 			if (get_extended_data) {
1484 				topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1485 			} else {
1486 				topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1487 								topology->nodes[i].num_links : node_num_links;
1488 			}
1489 			/* popluate the connected port num info if supported and available */
1490 			if (ta_port_num_support && topology->nodes[i].num_links) {
1491 				memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1492 				       sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1493 			}
1494 
1495 			/* reflect the topology information for bi-directionality */
1496 			if (requires_reflection && topology->nodes[i].num_hops)
1497 				psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1498 		}
1499 	}
1500 
1501 	return 0;
1502 }
1503 
1504 int psp_xgmi_set_topology_info(struct psp_context *psp,
1505 			       int number_devices,
1506 			       struct psp_xgmi_topology_info *topology)
1507 {
1508 	struct ta_xgmi_shared_memory *xgmi_cmd;
1509 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1510 	int i;
1511 
1512 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1513 		return -EINVAL;
1514 
1515 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1516 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1517 
1518 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1519 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1520 	topology_info_input->num_nodes = number_devices;
1521 
1522 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1523 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1524 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1525 		topology_info_input->nodes[i].is_sharing_enabled = 1;
1526 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1527 	}
1528 
1529 	/* Invoke xgmi ta to set topology information */
1530 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1531 }
1532 
1533 // ras begin
1534 static void psp_ras_ta_check_status(struct psp_context *psp)
1535 {
1536 	struct ta_ras_shared_memory *ras_cmd =
1537 		(struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1538 
1539 	switch (ras_cmd->ras_status) {
1540 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1541 		dev_warn(psp->adev->dev,
1542 			 "RAS WARNING: cmd failed due to unsupported ip\n");
1543 		break;
1544 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1545 		dev_warn(psp->adev->dev,
1546 			 "RAS WARNING: cmd failed due to unsupported error injection\n");
1547 		break;
1548 	case TA_RAS_STATUS__SUCCESS:
1549 		break;
1550 	case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1551 		if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1552 			dev_warn(psp->adev->dev,
1553 				 "RAS WARNING: Inject error to critical region is not allowed\n");
1554 		break;
1555 	default:
1556 		dev_warn(psp->adev->dev,
1557 			 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1558 		break;
1559 	}
1560 }
1561 
1562 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1563 {
1564 	struct ta_ras_shared_memory *ras_cmd;
1565 	int ret;
1566 
1567 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1568 
1569 	/*
1570 	 * TODO: bypass the loading in sriov for now
1571 	 */
1572 	if (amdgpu_sriov_vf(psp->adev))
1573 		return 0;
1574 
1575 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1576 
1577 	if (amdgpu_ras_intr_triggered())
1578 		return ret;
1579 
1580 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1581 		dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1582 		return -EINVAL;
1583 	}
1584 
1585 	if (!ret) {
1586 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1587 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1588 
1589 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1590 		} else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1591 			dev_warn(psp->adev->dev,
1592 				 "RAS internal register access blocked\n");
1593 
1594 		psp_ras_ta_check_status(psp);
1595 	}
1596 
1597 	return ret;
1598 }
1599 
1600 int psp_ras_enable_features(struct psp_context *psp,
1601 		union ta_ras_cmd_input *info, bool enable)
1602 {
1603 	struct ta_ras_shared_memory *ras_cmd;
1604 	int ret;
1605 
1606 	if (!psp->ras_context.context.initialized)
1607 		return -EINVAL;
1608 
1609 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1610 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1611 
1612 	if (enable)
1613 		ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
1614 	else
1615 		ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
1616 
1617 	ras_cmd->ras_in_message = *info;
1618 
1619 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1620 	if (ret)
1621 		return -EINVAL;
1622 
1623 	return 0;
1624 }
1625 
1626 int psp_ras_terminate(struct psp_context *psp)
1627 {
1628 	int ret;
1629 
1630 	/*
1631 	 * TODO: bypass the terminate in sriov for now
1632 	 */
1633 	if (amdgpu_sriov_vf(psp->adev))
1634 		return 0;
1635 
1636 	if (!psp->ras_context.context.initialized)
1637 		return 0;
1638 
1639 	ret = psp_ta_unload(psp, &psp->ras_context.context);
1640 
1641 	psp->ras_context.context.initialized = false;
1642 
1643 	return ret;
1644 }
1645 
1646 int psp_ras_initialize(struct psp_context *psp)
1647 {
1648 	int ret;
1649 	uint32_t boot_cfg = 0xFF;
1650 	struct amdgpu_device *adev = psp->adev;
1651 	struct ta_ras_shared_memory *ras_cmd;
1652 
1653 	/*
1654 	 * TODO: bypass the initialize in sriov for now
1655 	 */
1656 	if (amdgpu_sriov_vf(adev))
1657 		return 0;
1658 
1659 	if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1660 	    !adev->psp.ras_context.context.bin_desc.start_addr) {
1661 		dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1662 		return 0;
1663 	}
1664 
1665 	if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1666 		/* query GECC enablement status from boot config
1667 		 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1668 		 */
1669 		ret = psp_boot_config_get(adev, &boot_cfg);
1670 		if (ret)
1671 			dev_warn(adev->dev, "PSP get boot config failed\n");
1672 
1673 		if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
1674 			if (!boot_cfg) {
1675 				dev_info(adev->dev, "GECC is disabled\n");
1676 			} else {
1677 				/* disable GECC in next boot cycle if ras is
1678 				 * disabled by module parameter amdgpu_ras_enable
1679 				 * and/or amdgpu_ras_mask, or boot_config_get call
1680 				 * is failed
1681 				 */
1682 				ret = psp_boot_config_set(adev, 0);
1683 				if (ret)
1684 					dev_warn(adev->dev, "PSP set boot config failed\n");
1685 				else
1686 					dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1687 			}
1688 		} else {
1689 			if (boot_cfg == 1) {
1690 				dev_info(adev->dev, "GECC is enabled\n");
1691 			} else {
1692 				/* enable GECC in next boot cycle if it is disabled
1693 				 * in boot config, or force enable GECC if failed to
1694 				 * get boot configuration
1695 				 */
1696 				ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1697 				if (ret)
1698 					dev_warn(adev->dev, "PSP set boot config failed\n");
1699 				else
1700 					dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1701 			}
1702 		}
1703 	}
1704 
1705 	psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1706 	psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1707 
1708 	if (!psp->ras_context.context.mem_context.shared_buf) {
1709 		ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1710 		if (ret)
1711 			return ret;
1712 	}
1713 
1714 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1715 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1716 
1717 	if (amdgpu_ras_is_poison_mode_supported(adev))
1718 		ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1719 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1720 		ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1721 	ras_cmd->ras_in_message.init_flags.xcc_mask =
1722 		adev->gfx.xcc_mask;
1723 	ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1724 
1725 	ret = psp_ta_load(psp, &psp->ras_context.context);
1726 
1727 	if (!ret && !ras_cmd->ras_status)
1728 		psp->ras_context.context.initialized = true;
1729 	else {
1730 		if (ras_cmd->ras_status)
1731 			dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1732 
1733 		/* fail to load RAS TA */
1734 		psp->ras_context.context.initialized = false;
1735 	}
1736 
1737 	return ret;
1738 }
1739 
1740 int psp_ras_trigger_error(struct psp_context *psp,
1741 			  struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1742 {
1743 	struct ta_ras_shared_memory *ras_cmd;
1744 	struct amdgpu_device *adev = psp->adev;
1745 	int ret;
1746 	uint32_t dev_mask;
1747 
1748 	if (!psp->ras_context.context.initialized)
1749 		return -EINVAL;
1750 
1751 	switch (info->block_id) {
1752 	case TA_RAS_BLOCK__GFX:
1753 		dev_mask = GET_MASK(GC, instance_mask);
1754 		break;
1755 	case TA_RAS_BLOCK__SDMA:
1756 		dev_mask = GET_MASK(SDMA0, instance_mask);
1757 		break;
1758 	case TA_RAS_BLOCK__VCN:
1759 	case TA_RAS_BLOCK__JPEG:
1760 		dev_mask = GET_MASK(VCN, instance_mask);
1761 		break;
1762 	default:
1763 		dev_mask = instance_mask;
1764 		break;
1765 	}
1766 
1767 	/* reuse sub_block_index for backward compatibility */
1768 	dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1769 	dev_mask &= AMDGPU_RAS_INST_MASK;
1770 	info->sub_block_index |= dev_mask;
1771 
1772 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1773 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1774 
1775 	ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
1776 	ras_cmd->ras_in_message.trigger_error = *info;
1777 
1778 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1779 	if (ret)
1780 		return -EINVAL;
1781 
1782 	/* If err_event_athub occurs error inject was successful, however
1783 	 *  return status from TA is no long reliable
1784 	 */
1785 	if (amdgpu_ras_intr_triggered())
1786 		return 0;
1787 
1788 	if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1789 		return -EACCES;
1790 	else if (ras_cmd->ras_status)
1791 		return -EINVAL;
1792 
1793 	return 0;
1794 }
1795 
1796 int psp_ras_query_address(struct psp_context *psp,
1797 			  struct ta_ras_query_address_input *addr_in,
1798 			  struct ta_ras_query_address_output *addr_out)
1799 {
1800 	struct ta_ras_shared_memory *ras_cmd;
1801 	int ret;
1802 
1803 	if (!psp->ras_context.context.initialized)
1804 		return -EINVAL;
1805 
1806 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1807 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1808 
1809 	ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS;
1810 	ras_cmd->ras_in_message.address = *addr_in;
1811 
1812 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1813 	if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1814 		return -EINVAL;
1815 
1816 	*addr_out = ras_cmd->ras_out_message.address;
1817 
1818 	return 0;
1819 }
1820 // ras end
1821 
1822 // HDCP start
1823 static int psp_hdcp_initialize(struct psp_context *psp)
1824 {
1825 	int ret;
1826 
1827 	/*
1828 	 * TODO: bypass the initialize in sriov for now
1829 	 */
1830 	if (amdgpu_sriov_vf(psp->adev))
1831 		return 0;
1832 
1833 	/* bypass hdcp initialization if dmu is harvested */
1834 	if (!amdgpu_device_has_display_hardware(psp->adev))
1835 		return 0;
1836 
1837 	if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1838 	    !psp->hdcp_context.context.bin_desc.start_addr) {
1839 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1840 		return 0;
1841 	}
1842 
1843 	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1844 	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1845 
1846 	if (!psp->hdcp_context.context.mem_context.shared_buf) {
1847 		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1848 		if (ret)
1849 			return ret;
1850 	}
1851 
1852 	ret = psp_ta_load(psp, &psp->hdcp_context.context);
1853 	if (!ret) {
1854 		psp->hdcp_context.context.initialized = true;
1855 		mutex_init(&psp->hdcp_context.mutex);
1856 	}
1857 
1858 	return ret;
1859 }
1860 
1861 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1862 {
1863 	/*
1864 	 * TODO: bypass the loading in sriov for now
1865 	 */
1866 	if (amdgpu_sriov_vf(psp->adev))
1867 		return 0;
1868 
1869 	if (!psp->hdcp_context.context.initialized)
1870 		return 0;
1871 
1872 	return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
1873 }
1874 
1875 static int psp_hdcp_terminate(struct psp_context *psp)
1876 {
1877 	int ret;
1878 
1879 	/*
1880 	 * TODO: bypass the terminate in sriov for now
1881 	 */
1882 	if (amdgpu_sriov_vf(psp->adev))
1883 		return 0;
1884 
1885 	if (!psp->hdcp_context.context.initialized)
1886 		return 0;
1887 
1888 	ret = psp_ta_unload(psp, &psp->hdcp_context.context);
1889 
1890 	psp->hdcp_context.context.initialized = false;
1891 
1892 	return ret;
1893 }
1894 // HDCP end
1895 
1896 // DTM start
1897 static int psp_dtm_initialize(struct psp_context *psp)
1898 {
1899 	int ret;
1900 
1901 	/*
1902 	 * TODO: bypass the initialize in sriov for now
1903 	 */
1904 	if (amdgpu_sriov_vf(psp->adev))
1905 		return 0;
1906 
1907 	/* bypass dtm initialization if dmu is harvested */
1908 	if (!amdgpu_device_has_display_hardware(psp->adev))
1909 		return 0;
1910 
1911 	if (!psp->dtm_context.context.bin_desc.size_bytes ||
1912 	    !psp->dtm_context.context.bin_desc.start_addr) {
1913 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1914 		return 0;
1915 	}
1916 
1917 	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
1918 	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1919 
1920 	if (!psp->dtm_context.context.mem_context.shared_buf) {
1921 		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
1922 		if (ret)
1923 			return ret;
1924 	}
1925 
1926 	ret = psp_ta_load(psp, &psp->dtm_context.context);
1927 	if (!ret) {
1928 		psp->dtm_context.context.initialized = true;
1929 		mutex_init(&psp->dtm_context.mutex);
1930 	}
1931 
1932 	return ret;
1933 }
1934 
1935 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1936 {
1937 	/*
1938 	 * TODO: bypass the loading in sriov for now
1939 	 */
1940 	if (amdgpu_sriov_vf(psp->adev))
1941 		return 0;
1942 
1943 	if (!psp->dtm_context.context.initialized)
1944 		return 0;
1945 
1946 	return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
1947 }
1948 
1949 static int psp_dtm_terminate(struct psp_context *psp)
1950 {
1951 	int ret;
1952 
1953 	/*
1954 	 * TODO: bypass the terminate in sriov for now
1955 	 */
1956 	if (amdgpu_sriov_vf(psp->adev))
1957 		return 0;
1958 
1959 	if (!psp->dtm_context.context.initialized)
1960 		return 0;
1961 
1962 	ret = psp_ta_unload(psp, &psp->dtm_context.context);
1963 
1964 	psp->dtm_context.context.initialized = false;
1965 
1966 	return ret;
1967 }
1968 // DTM end
1969 
1970 // RAP start
1971 static int psp_rap_initialize(struct psp_context *psp)
1972 {
1973 	int ret;
1974 	enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
1975 
1976 	/*
1977 	 * TODO: bypass the initialize in sriov for now
1978 	 */
1979 	if (amdgpu_sriov_vf(psp->adev))
1980 		return 0;
1981 
1982 	if (!psp->rap_context.context.bin_desc.size_bytes ||
1983 	    !psp->rap_context.context.bin_desc.start_addr) {
1984 		dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
1985 		return 0;
1986 	}
1987 
1988 	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
1989 	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1990 
1991 	if (!psp->rap_context.context.mem_context.shared_buf) {
1992 		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
1993 		if (ret)
1994 			return ret;
1995 	}
1996 
1997 	ret = psp_ta_load(psp, &psp->rap_context.context);
1998 	if (!ret) {
1999 		psp->rap_context.context.initialized = true;
2000 		mutex_init(&psp->rap_context.mutex);
2001 	} else
2002 		return ret;
2003 
2004 	ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2005 	if (ret || status != TA_RAP_STATUS__SUCCESS) {
2006 		psp_rap_terminate(psp);
2007 		/* free rap shared memory */
2008 		psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2009 
2010 		dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2011 			 ret, status);
2012 
2013 		return ret;
2014 	}
2015 
2016 	return 0;
2017 }
2018 
2019 static int psp_rap_terminate(struct psp_context *psp)
2020 {
2021 	int ret;
2022 
2023 	if (!psp->rap_context.context.initialized)
2024 		return 0;
2025 
2026 	ret = psp_ta_unload(psp, &psp->rap_context.context);
2027 
2028 	psp->rap_context.context.initialized = false;
2029 
2030 	return ret;
2031 }
2032 
2033 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2034 {
2035 	struct ta_rap_shared_memory *rap_cmd;
2036 	int ret = 0;
2037 
2038 	if (!psp->rap_context.context.initialized)
2039 		return 0;
2040 
2041 	if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2042 	    ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2043 		return -EINVAL;
2044 
2045 	mutex_lock(&psp->rap_context.mutex);
2046 
2047 	rap_cmd = (struct ta_rap_shared_memory *)
2048 		  psp->rap_context.context.mem_context.shared_buf;
2049 	memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2050 
2051 	rap_cmd->cmd_id = ta_cmd_id;
2052 	rap_cmd->validation_method_id = METHOD_A;
2053 
2054 	ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2055 	if (ret)
2056 		goto out_unlock;
2057 
2058 	if (status)
2059 		*status = rap_cmd->rap_status;
2060 
2061 out_unlock:
2062 	mutex_unlock(&psp->rap_context.mutex);
2063 
2064 	return ret;
2065 }
2066 // RAP end
2067 
2068 /* securedisplay start */
2069 static int psp_securedisplay_initialize(struct psp_context *psp)
2070 {
2071 	int ret;
2072 	struct ta_securedisplay_cmd *securedisplay_cmd;
2073 
2074 	/*
2075 	 * TODO: bypass the initialize in sriov for now
2076 	 */
2077 	if (amdgpu_sriov_vf(psp->adev))
2078 		return 0;
2079 
2080 	/* bypass securedisplay initialization if dmu is harvested */
2081 	if (!amdgpu_device_has_display_hardware(psp->adev))
2082 		return 0;
2083 
2084 	if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2085 	    !psp->securedisplay_context.context.bin_desc.start_addr) {
2086 		dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
2087 		return 0;
2088 	}
2089 
2090 	psp->securedisplay_context.context.mem_context.shared_mem_size =
2091 		PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2092 	psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2093 
2094 	if (!psp->securedisplay_context.context.initialized) {
2095 		ret = psp_ta_init_shared_buf(psp,
2096 					     &psp->securedisplay_context.context.mem_context);
2097 		if (ret)
2098 			return ret;
2099 	}
2100 
2101 	ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2102 	if (!ret) {
2103 		psp->securedisplay_context.context.initialized = true;
2104 		mutex_init(&psp->securedisplay_context.mutex);
2105 	} else
2106 		return ret;
2107 
2108 	mutex_lock(&psp->securedisplay_context.mutex);
2109 
2110 	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2111 			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2112 
2113 	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2114 
2115 	mutex_unlock(&psp->securedisplay_context.mutex);
2116 
2117 	if (ret) {
2118 		psp_securedisplay_terminate(psp);
2119 		/* free securedisplay shared memory */
2120 		psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2121 		dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2122 		return -EINVAL;
2123 	}
2124 
2125 	if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2126 		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2127 		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2128 			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2129 		/* don't try again */
2130 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2131 	}
2132 
2133 	return 0;
2134 }
2135 
2136 static int psp_securedisplay_terminate(struct psp_context *psp)
2137 {
2138 	int ret;
2139 
2140 	/*
2141 	 * TODO:bypass the terminate in sriov for now
2142 	 */
2143 	if (amdgpu_sriov_vf(psp->adev))
2144 		return 0;
2145 
2146 	if (!psp->securedisplay_context.context.initialized)
2147 		return 0;
2148 
2149 	ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2150 
2151 	psp->securedisplay_context.context.initialized = false;
2152 
2153 	return ret;
2154 }
2155 
2156 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2157 {
2158 	int ret;
2159 
2160 	if (!psp->securedisplay_context.context.initialized)
2161 		return -EINVAL;
2162 
2163 	if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2164 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
2165 		return -EINVAL;
2166 
2167 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2168 
2169 	return ret;
2170 }
2171 /* SECUREDISPLAY end */
2172 
2173 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2174 {
2175 	struct psp_context *psp = &adev->psp;
2176 	int ret = 0;
2177 
2178 	if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2179 		ret = psp->funcs->wait_for_bootloader(psp);
2180 
2181 	return ret;
2182 }
2183 
2184 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2185 {
2186 	if (psp->funcs &&
2187 	    psp->funcs->get_ras_capability) {
2188 		return psp->funcs->get_ras_capability(psp);
2189 	} else {
2190 		return false;
2191 	}
2192 }
2193 
2194 static int psp_hw_start(struct psp_context *psp)
2195 {
2196 	struct amdgpu_device *adev = psp->adev;
2197 	int ret;
2198 
2199 	if (!amdgpu_sriov_vf(adev)) {
2200 		if ((is_psp_fw_valid(psp->kdb)) &&
2201 		    (psp->funcs->bootloader_load_kdb != NULL)) {
2202 			ret = psp_bootloader_load_kdb(psp);
2203 			if (ret) {
2204 				dev_err(adev->dev, "PSP load kdb failed!\n");
2205 				return ret;
2206 			}
2207 		}
2208 
2209 		if ((is_psp_fw_valid(psp->spl)) &&
2210 		    (psp->funcs->bootloader_load_spl != NULL)) {
2211 			ret = psp_bootloader_load_spl(psp);
2212 			if (ret) {
2213 				dev_err(adev->dev, "PSP load spl failed!\n");
2214 				return ret;
2215 			}
2216 		}
2217 
2218 		if ((is_psp_fw_valid(psp->sys)) &&
2219 		    (psp->funcs->bootloader_load_sysdrv != NULL)) {
2220 			ret = psp_bootloader_load_sysdrv(psp);
2221 			if (ret) {
2222 				dev_err(adev->dev, "PSP load sys drv failed!\n");
2223 				return ret;
2224 			}
2225 		}
2226 
2227 		if ((is_psp_fw_valid(psp->soc_drv)) &&
2228 		    (psp->funcs->bootloader_load_soc_drv != NULL)) {
2229 			ret = psp_bootloader_load_soc_drv(psp);
2230 			if (ret) {
2231 				dev_err(adev->dev, "PSP load soc drv failed!\n");
2232 				return ret;
2233 			}
2234 		}
2235 
2236 		if ((is_psp_fw_valid(psp->intf_drv)) &&
2237 		    (psp->funcs->bootloader_load_intf_drv != NULL)) {
2238 			ret = psp_bootloader_load_intf_drv(psp);
2239 			if (ret) {
2240 				dev_err(adev->dev, "PSP load intf drv failed!\n");
2241 				return ret;
2242 			}
2243 		}
2244 
2245 		if ((is_psp_fw_valid(psp->dbg_drv)) &&
2246 		    (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2247 			ret = psp_bootloader_load_dbg_drv(psp);
2248 			if (ret) {
2249 				dev_err(adev->dev, "PSP load dbg drv failed!\n");
2250 				return ret;
2251 			}
2252 		}
2253 
2254 		if ((is_psp_fw_valid(psp->ras_drv)) &&
2255 		    (psp->funcs->bootloader_load_ras_drv != NULL)) {
2256 			ret = psp_bootloader_load_ras_drv(psp);
2257 			if (ret) {
2258 				dev_err(adev->dev, "PSP load ras_drv failed!\n");
2259 				return ret;
2260 			}
2261 		}
2262 
2263 		if ((is_psp_fw_valid(psp->sos)) &&
2264 		    (psp->funcs->bootloader_load_sos != NULL)) {
2265 			ret = psp_bootloader_load_sos(psp);
2266 			if (ret) {
2267 				dev_err(adev->dev, "PSP load sos failed!\n");
2268 				return ret;
2269 			}
2270 		}
2271 	}
2272 
2273 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2274 	if (ret) {
2275 		dev_err(adev->dev, "PSP create ring failed!\n");
2276 		return ret;
2277 	}
2278 
2279 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2280 		goto skip_pin_bo;
2281 
2282 	if (!psp->boot_time_tmr || psp->autoload_supported) {
2283 		ret = psp_tmr_init(psp);
2284 		if (ret) {
2285 			dev_err(adev->dev, "PSP tmr init failed!\n");
2286 			return ret;
2287 		}
2288 	}
2289 
2290 skip_pin_bo:
2291 	/*
2292 	 * For ASICs with DF Cstate management centralized
2293 	 * to PMFW, TMR setup should be performed after PMFW
2294 	 * loaded and before other non-psp firmware loaded.
2295 	 */
2296 	if (psp->pmfw_centralized_cstate_management) {
2297 		ret = psp_load_smu_fw(psp);
2298 		if (ret)
2299 			return ret;
2300 	}
2301 
2302 	if (!psp->boot_time_tmr || !psp->autoload_supported) {
2303 		ret = psp_tmr_load(psp);
2304 		if (ret) {
2305 			dev_err(adev->dev, "PSP load tmr failed!\n");
2306 			return ret;
2307 		}
2308 	}
2309 
2310 	return 0;
2311 }
2312 
2313 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2314 			   enum psp_gfx_fw_type *type)
2315 {
2316 	switch (ucode->ucode_id) {
2317 	case AMDGPU_UCODE_ID_CAP:
2318 		*type = GFX_FW_TYPE_CAP;
2319 		break;
2320 	case AMDGPU_UCODE_ID_SDMA0:
2321 		*type = GFX_FW_TYPE_SDMA0;
2322 		break;
2323 	case AMDGPU_UCODE_ID_SDMA1:
2324 		*type = GFX_FW_TYPE_SDMA1;
2325 		break;
2326 	case AMDGPU_UCODE_ID_SDMA2:
2327 		*type = GFX_FW_TYPE_SDMA2;
2328 		break;
2329 	case AMDGPU_UCODE_ID_SDMA3:
2330 		*type = GFX_FW_TYPE_SDMA3;
2331 		break;
2332 	case AMDGPU_UCODE_ID_SDMA4:
2333 		*type = GFX_FW_TYPE_SDMA4;
2334 		break;
2335 	case AMDGPU_UCODE_ID_SDMA5:
2336 		*type = GFX_FW_TYPE_SDMA5;
2337 		break;
2338 	case AMDGPU_UCODE_ID_SDMA6:
2339 		*type = GFX_FW_TYPE_SDMA6;
2340 		break;
2341 	case AMDGPU_UCODE_ID_SDMA7:
2342 		*type = GFX_FW_TYPE_SDMA7;
2343 		break;
2344 	case AMDGPU_UCODE_ID_CP_MES:
2345 		*type = GFX_FW_TYPE_CP_MES;
2346 		break;
2347 	case AMDGPU_UCODE_ID_CP_MES_DATA:
2348 		*type = GFX_FW_TYPE_MES_STACK;
2349 		break;
2350 	case AMDGPU_UCODE_ID_CP_MES1:
2351 		*type = GFX_FW_TYPE_CP_MES_KIQ;
2352 		break;
2353 	case AMDGPU_UCODE_ID_CP_MES1_DATA:
2354 		*type = GFX_FW_TYPE_MES_KIQ_STACK;
2355 		break;
2356 	case AMDGPU_UCODE_ID_CP_CE:
2357 		*type = GFX_FW_TYPE_CP_CE;
2358 		break;
2359 	case AMDGPU_UCODE_ID_CP_PFP:
2360 		*type = GFX_FW_TYPE_CP_PFP;
2361 		break;
2362 	case AMDGPU_UCODE_ID_CP_ME:
2363 		*type = GFX_FW_TYPE_CP_ME;
2364 		break;
2365 	case AMDGPU_UCODE_ID_CP_MEC1:
2366 		*type = GFX_FW_TYPE_CP_MEC;
2367 		break;
2368 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
2369 		*type = GFX_FW_TYPE_CP_MEC_ME1;
2370 		break;
2371 	case AMDGPU_UCODE_ID_CP_MEC2:
2372 		*type = GFX_FW_TYPE_CP_MEC;
2373 		break;
2374 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
2375 		*type = GFX_FW_TYPE_CP_MEC_ME2;
2376 		break;
2377 	case AMDGPU_UCODE_ID_RLC_P:
2378 		*type = GFX_FW_TYPE_RLC_P;
2379 		break;
2380 	case AMDGPU_UCODE_ID_RLC_V:
2381 		*type = GFX_FW_TYPE_RLC_V;
2382 		break;
2383 	case AMDGPU_UCODE_ID_RLC_G:
2384 		*type = GFX_FW_TYPE_RLC_G;
2385 		break;
2386 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2387 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2388 		break;
2389 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2390 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2391 		break;
2392 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2393 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2394 		break;
2395 	case AMDGPU_UCODE_ID_RLC_IRAM:
2396 		*type = GFX_FW_TYPE_RLC_IRAM;
2397 		break;
2398 	case AMDGPU_UCODE_ID_RLC_DRAM:
2399 		*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2400 		break;
2401 	case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2402 		*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2403 		break;
2404 	case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2405 		*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2406 		break;
2407 	case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2408 		*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2409 		break;
2410 	case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2411 		*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2412 		break;
2413 	case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2414 		*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2415 		break;
2416 	case AMDGPU_UCODE_ID_SMC:
2417 		*type = GFX_FW_TYPE_SMU;
2418 		break;
2419 	case AMDGPU_UCODE_ID_PPTABLE:
2420 		*type = GFX_FW_TYPE_PPTABLE;
2421 		break;
2422 	case AMDGPU_UCODE_ID_UVD:
2423 		*type = GFX_FW_TYPE_UVD;
2424 		break;
2425 	case AMDGPU_UCODE_ID_UVD1:
2426 		*type = GFX_FW_TYPE_UVD1;
2427 		break;
2428 	case AMDGPU_UCODE_ID_VCE:
2429 		*type = GFX_FW_TYPE_VCE;
2430 		break;
2431 	case AMDGPU_UCODE_ID_VCN:
2432 		*type = GFX_FW_TYPE_VCN;
2433 		break;
2434 	case AMDGPU_UCODE_ID_VCN1:
2435 		*type = GFX_FW_TYPE_VCN1;
2436 		break;
2437 	case AMDGPU_UCODE_ID_DMCU_ERAM:
2438 		*type = GFX_FW_TYPE_DMCU_ERAM;
2439 		break;
2440 	case AMDGPU_UCODE_ID_DMCU_INTV:
2441 		*type = GFX_FW_TYPE_DMCU_ISR;
2442 		break;
2443 	case AMDGPU_UCODE_ID_VCN0_RAM:
2444 		*type = GFX_FW_TYPE_VCN0_RAM;
2445 		break;
2446 	case AMDGPU_UCODE_ID_VCN1_RAM:
2447 		*type = GFX_FW_TYPE_VCN1_RAM;
2448 		break;
2449 	case AMDGPU_UCODE_ID_DMCUB:
2450 		*type = GFX_FW_TYPE_DMUB;
2451 		break;
2452 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2453 		*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2454 		break;
2455 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2456 		*type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2457 		break;
2458 	case AMDGPU_UCODE_ID_IMU_I:
2459 		*type = GFX_FW_TYPE_IMU_I;
2460 		break;
2461 	case AMDGPU_UCODE_ID_IMU_D:
2462 		*type = GFX_FW_TYPE_IMU_D;
2463 		break;
2464 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
2465 		*type = GFX_FW_TYPE_RS64_PFP;
2466 		break;
2467 	case AMDGPU_UCODE_ID_CP_RS64_ME:
2468 		*type = GFX_FW_TYPE_RS64_ME;
2469 		break;
2470 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
2471 		*type = GFX_FW_TYPE_RS64_MEC;
2472 		break;
2473 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2474 		*type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2475 		break;
2476 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2477 		*type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2478 		break;
2479 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2480 		*type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2481 		break;
2482 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2483 		*type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2484 		break;
2485 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2486 		*type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2487 		break;
2488 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2489 		*type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2490 		break;
2491 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2492 		*type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2493 		break;
2494 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2495 		*type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2496 		break;
2497 	case AMDGPU_UCODE_ID_VPE_CTX:
2498 		*type = GFX_FW_TYPE_VPEC_FW1;
2499 		break;
2500 	case AMDGPU_UCODE_ID_VPE_CTL:
2501 		*type = GFX_FW_TYPE_VPEC_FW2;
2502 		break;
2503 	case AMDGPU_UCODE_ID_VPE:
2504 		*type = GFX_FW_TYPE_VPE;
2505 		break;
2506 	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2507 		*type = GFX_FW_TYPE_UMSCH_UCODE;
2508 		break;
2509 	case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2510 		*type = GFX_FW_TYPE_UMSCH_DATA;
2511 		break;
2512 	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2513 		*type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2514 		break;
2515 	case AMDGPU_UCODE_ID_P2S_TABLE:
2516 		*type = GFX_FW_TYPE_P2S_TABLE;
2517 		break;
2518 	case AMDGPU_UCODE_ID_JPEG_RAM:
2519 		*type = GFX_FW_TYPE_JPEG_RAM;
2520 		break;
2521 	case AMDGPU_UCODE_ID_MAXIMUM:
2522 	default:
2523 		return -EINVAL;
2524 	}
2525 
2526 	return 0;
2527 }
2528 
2529 static void psp_print_fw_hdr(struct psp_context *psp,
2530 			     struct amdgpu_firmware_info *ucode)
2531 {
2532 	struct amdgpu_device *adev = psp->adev;
2533 	struct common_firmware_header *hdr;
2534 
2535 	switch (ucode->ucode_id) {
2536 	case AMDGPU_UCODE_ID_SDMA0:
2537 	case AMDGPU_UCODE_ID_SDMA1:
2538 	case AMDGPU_UCODE_ID_SDMA2:
2539 	case AMDGPU_UCODE_ID_SDMA3:
2540 	case AMDGPU_UCODE_ID_SDMA4:
2541 	case AMDGPU_UCODE_ID_SDMA5:
2542 	case AMDGPU_UCODE_ID_SDMA6:
2543 	case AMDGPU_UCODE_ID_SDMA7:
2544 		hdr = (struct common_firmware_header *)
2545 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2546 		amdgpu_ucode_print_sdma_hdr(hdr);
2547 		break;
2548 	case AMDGPU_UCODE_ID_CP_CE:
2549 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2550 		amdgpu_ucode_print_gfx_hdr(hdr);
2551 		break;
2552 	case AMDGPU_UCODE_ID_CP_PFP:
2553 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2554 		amdgpu_ucode_print_gfx_hdr(hdr);
2555 		break;
2556 	case AMDGPU_UCODE_ID_CP_ME:
2557 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2558 		amdgpu_ucode_print_gfx_hdr(hdr);
2559 		break;
2560 	case AMDGPU_UCODE_ID_CP_MEC1:
2561 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2562 		amdgpu_ucode_print_gfx_hdr(hdr);
2563 		break;
2564 	case AMDGPU_UCODE_ID_RLC_G:
2565 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2566 		amdgpu_ucode_print_rlc_hdr(hdr);
2567 		break;
2568 	case AMDGPU_UCODE_ID_SMC:
2569 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
2570 		amdgpu_ucode_print_smc_hdr(hdr);
2571 		break;
2572 	default:
2573 		break;
2574 	}
2575 }
2576 
2577 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2578 				       struct amdgpu_firmware_info *ucode,
2579 				       struct psp_gfx_cmd_resp *cmd)
2580 {
2581 	int ret;
2582 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
2583 
2584 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2585 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2586 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2587 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2588 
2589 	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2590 	if (ret)
2591 		dev_err(psp->adev->dev, "Unknown firmware type\n");
2592 
2593 	return ret;
2594 }
2595 
2596 int psp_execute_ip_fw_load(struct psp_context *psp,
2597 			   struct amdgpu_firmware_info *ucode)
2598 {
2599 	int ret = 0;
2600 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2601 
2602 	ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2603 	if (!ret) {
2604 		ret = psp_cmd_submit_buf(psp, ucode, cmd,
2605 					 psp->fence_buf_mc_addr);
2606 	}
2607 
2608 	release_psp_cmd_buf(psp);
2609 
2610 	return ret;
2611 }
2612 
2613 static int psp_load_p2s_table(struct psp_context *psp)
2614 {
2615 	int ret;
2616 	struct amdgpu_device *adev = psp->adev;
2617 	struct amdgpu_firmware_info *ucode =
2618 		&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2619 
2620 	if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
2621 		return 0;
2622 
2623 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) {
2624 		uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2625 								0x0036003C;
2626 		if (psp->sos.fw_version < supp_vers)
2627 			return 0;
2628 	}
2629 
2630 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2631 		return 0;
2632 
2633 	ret = psp_execute_ip_fw_load(psp, ucode);
2634 
2635 	return ret;
2636 }
2637 
2638 static int psp_load_smu_fw(struct psp_context *psp)
2639 {
2640 	int ret;
2641 	struct amdgpu_device *adev = psp->adev;
2642 	struct amdgpu_firmware_info *ucode =
2643 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2644 	struct amdgpu_ras *ras = psp->ras_context.ras;
2645 
2646 	/*
2647 	 * Skip SMU FW reloading in case of using BACO for runpm only,
2648 	 * as SMU is always alive.
2649 	 */
2650 	if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
2651 		return 0;
2652 
2653 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2654 		return 0;
2655 
2656 	if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2657 	     (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2658 	      amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2659 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2660 		if (ret)
2661 			dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2662 	}
2663 
2664 	ret = psp_execute_ip_fw_load(psp, ucode);
2665 
2666 	if (ret)
2667 		dev_err(adev->dev, "PSP load smu failed!\n");
2668 
2669 	return ret;
2670 }
2671 
2672 static bool fw_load_skip_check(struct psp_context *psp,
2673 			       struct amdgpu_firmware_info *ucode)
2674 {
2675 	if (!ucode->fw || !ucode->ucode_size)
2676 		return true;
2677 
2678 	if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2679 		return true;
2680 
2681 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2682 	    (psp_smu_reload_quirk(psp) ||
2683 	     psp->autoload_supported ||
2684 	     psp->pmfw_centralized_cstate_management))
2685 		return true;
2686 
2687 	if (amdgpu_sriov_vf(psp->adev) &&
2688 	    amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2689 		return true;
2690 
2691 	if (psp->autoload_supported &&
2692 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2693 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2694 		/* skip mec JT when autoload is enabled */
2695 		return true;
2696 
2697 	return false;
2698 }
2699 
2700 int psp_load_fw_list(struct psp_context *psp,
2701 		     struct amdgpu_firmware_info **ucode_list, int ucode_count)
2702 {
2703 	int ret = 0, i;
2704 	struct amdgpu_firmware_info *ucode;
2705 
2706 	for (i = 0; i < ucode_count; ++i) {
2707 		ucode = ucode_list[i];
2708 		psp_print_fw_hdr(psp, ucode);
2709 		ret = psp_execute_ip_fw_load(psp, ucode);
2710 		if (ret)
2711 			return ret;
2712 	}
2713 	return ret;
2714 }
2715 
2716 static int psp_load_non_psp_fw(struct psp_context *psp)
2717 {
2718 	int i, ret;
2719 	struct amdgpu_firmware_info *ucode;
2720 	struct amdgpu_device *adev = psp->adev;
2721 
2722 	if (psp->autoload_supported &&
2723 	    !psp->pmfw_centralized_cstate_management) {
2724 		ret = psp_load_smu_fw(psp);
2725 		if (ret)
2726 			return ret;
2727 	}
2728 
2729 	/* Load P2S table first if it's available */
2730 	psp_load_p2s_table(psp);
2731 
2732 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
2733 		ucode = &adev->firmware.ucode[i];
2734 
2735 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2736 		    !fw_load_skip_check(psp, ucode)) {
2737 			ret = psp_load_smu_fw(psp);
2738 			if (ret)
2739 				return ret;
2740 			continue;
2741 		}
2742 
2743 		if (fw_load_skip_check(psp, ucode))
2744 			continue;
2745 
2746 		if (psp->autoload_supported &&
2747 		    (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2748 			     IP_VERSION(11, 0, 7) ||
2749 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2750 			     IP_VERSION(11, 0, 11) ||
2751 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2752 			     IP_VERSION(11, 0, 12)) &&
2753 		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2754 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2755 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2756 			/* PSP only receive one SDMA fw for sienna_cichlid,
2757 			 * as all four sdma fw are same
2758 			 */
2759 			continue;
2760 
2761 		psp_print_fw_hdr(psp, ucode);
2762 
2763 		ret = psp_execute_ip_fw_load(psp, ucode);
2764 		if (ret)
2765 			return ret;
2766 
2767 		/* Start rlc autoload after psp recieved all the gfx firmware */
2768 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2769 		    adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2770 			ret = psp_rlc_autoload_start(psp);
2771 			if (ret) {
2772 				dev_err(adev->dev, "Failed to start rlc autoload\n");
2773 				return ret;
2774 			}
2775 		}
2776 	}
2777 
2778 	return 0;
2779 }
2780 
2781 static int psp_load_fw(struct amdgpu_device *adev)
2782 {
2783 	int ret;
2784 	struct psp_context *psp = &adev->psp;
2785 
2786 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2787 		/* should not destroy ring, only stop */
2788 		psp_ring_stop(psp, PSP_RING_TYPE__KM);
2789 	} else {
2790 		memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2791 
2792 		ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2793 		if (ret) {
2794 			dev_err(adev->dev, "PSP ring init failed!\n");
2795 			goto failed;
2796 		}
2797 	}
2798 
2799 	ret = psp_hw_start(psp);
2800 	if (ret)
2801 		goto failed;
2802 
2803 	ret = psp_load_non_psp_fw(psp);
2804 	if (ret)
2805 		goto failed1;
2806 
2807 	ret = psp_asd_initialize(psp);
2808 	if (ret) {
2809 		dev_err(adev->dev, "PSP load asd failed!\n");
2810 		goto failed1;
2811 	}
2812 
2813 	ret = psp_rl_load(adev);
2814 	if (ret) {
2815 		dev_err(adev->dev, "PSP load RL failed!\n");
2816 		goto failed1;
2817 	}
2818 
2819 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2820 		if (adev->gmc.xgmi.num_physical_nodes > 1) {
2821 			ret = psp_xgmi_initialize(psp, false, true);
2822 			/* Warning the XGMI seesion initialize failure
2823 			 * Instead of stop driver initialization
2824 			 */
2825 			if (ret)
2826 				dev_err(psp->adev->dev,
2827 					"XGMI: Failed to initialize XGMI session\n");
2828 		}
2829 	}
2830 
2831 	if (psp->ta_fw) {
2832 		ret = psp_ras_initialize(psp);
2833 		if (ret)
2834 			dev_err(psp->adev->dev,
2835 				"RAS: Failed to initialize RAS\n");
2836 
2837 		ret = psp_hdcp_initialize(psp);
2838 		if (ret)
2839 			dev_err(psp->adev->dev,
2840 				"HDCP: Failed to initialize HDCP\n");
2841 
2842 		ret = psp_dtm_initialize(psp);
2843 		if (ret)
2844 			dev_err(psp->adev->dev,
2845 				"DTM: Failed to initialize DTM\n");
2846 
2847 		ret = psp_rap_initialize(psp);
2848 		if (ret)
2849 			dev_err(psp->adev->dev,
2850 				"RAP: Failed to initialize RAP\n");
2851 
2852 		ret = psp_securedisplay_initialize(psp);
2853 		if (ret)
2854 			dev_err(psp->adev->dev,
2855 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2856 	}
2857 
2858 	return 0;
2859 
2860 failed1:
2861 	psp_free_shared_bufs(psp);
2862 failed:
2863 	/*
2864 	 * all cleanup jobs (xgmi terminate, ras terminate,
2865 	 * ring destroy, cmd/fence/fw buffers destory,
2866 	 * psp->cmd destory) are delayed to psp_hw_fini
2867 	 */
2868 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2869 	return ret;
2870 }
2871 
2872 static int psp_hw_init(void *handle)
2873 {
2874 	int ret;
2875 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2876 
2877 	mutex_lock(&adev->firmware.mutex);
2878 	/*
2879 	 * This sequence is just used on hw_init only once, no need on
2880 	 * resume.
2881 	 */
2882 	ret = amdgpu_ucode_init_bo(adev);
2883 	if (ret)
2884 		goto failed;
2885 
2886 	ret = psp_load_fw(adev);
2887 	if (ret) {
2888 		dev_err(adev->dev, "PSP firmware loading failed\n");
2889 		goto failed;
2890 	}
2891 
2892 	mutex_unlock(&adev->firmware.mutex);
2893 	return 0;
2894 
2895 failed:
2896 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
2897 	mutex_unlock(&adev->firmware.mutex);
2898 	return -EINVAL;
2899 }
2900 
2901 static int psp_hw_fini(void *handle)
2902 {
2903 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2904 	struct psp_context *psp = &adev->psp;
2905 
2906 	if (psp->ta_fw) {
2907 		psp_ras_terminate(psp);
2908 		psp_securedisplay_terminate(psp);
2909 		psp_rap_terminate(psp);
2910 		psp_dtm_terminate(psp);
2911 		psp_hdcp_terminate(psp);
2912 
2913 		if (adev->gmc.xgmi.num_physical_nodes > 1)
2914 			psp_xgmi_terminate(psp);
2915 	}
2916 
2917 	psp_asd_terminate(psp);
2918 	psp_tmr_terminate(psp);
2919 
2920 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2921 
2922 	return 0;
2923 }
2924 
2925 static int psp_suspend(void *handle)
2926 {
2927 	int ret = 0;
2928 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2929 	struct psp_context *psp = &adev->psp;
2930 
2931 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
2932 	    psp->xgmi_context.context.initialized) {
2933 		ret = psp_xgmi_terminate(psp);
2934 		if (ret) {
2935 			dev_err(adev->dev, "Failed to terminate xgmi ta\n");
2936 			goto out;
2937 		}
2938 	}
2939 
2940 	if (psp->ta_fw) {
2941 		ret = psp_ras_terminate(psp);
2942 		if (ret) {
2943 			dev_err(adev->dev, "Failed to terminate ras ta\n");
2944 			goto out;
2945 		}
2946 		ret = psp_hdcp_terminate(psp);
2947 		if (ret) {
2948 			dev_err(adev->dev, "Failed to terminate hdcp ta\n");
2949 			goto out;
2950 		}
2951 		ret = psp_dtm_terminate(psp);
2952 		if (ret) {
2953 			dev_err(adev->dev, "Failed to terminate dtm ta\n");
2954 			goto out;
2955 		}
2956 		ret = psp_rap_terminate(psp);
2957 		if (ret) {
2958 			dev_err(adev->dev, "Failed to terminate rap ta\n");
2959 			goto out;
2960 		}
2961 		ret = psp_securedisplay_terminate(psp);
2962 		if (ret) {
2963 			dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
2964 			goto out;
2965 		}
2966 	}
2967 
2968 	ret = psp_asd_terminate(psp);
2969 	if (ret) {
2970 		dev_err(adev->dev, "Failed to terminate asd\n");
2971 		goto out;
2972 	}
2973 
2974 	ret = psp_tmr_terminate(psp);
2975 	if (ret) {
2976 		dev_err(adev->dev, "Failed to terminate tmr\n");
2977 		goto out;
2978 	}
2979 
2980 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
2981 	if (ret)
2982 		dev_err(adev->dev, "PSP ring stop failed\n");
2983 
2984 out:
2985 	return ret;
2986 }
2987 
2988 static int psp_resume(void *handle)
2989 {
2990 	int ret;
2991 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2992 	struct psp_context *psp = &adev->psp;
2993 
2994 	dev_info(adev->dev, "PSP is resuming...\n");
2995 
2996 	if (psp->mem_train_ctx.enable_mem_training) {
2997 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
2998 		if (ret) {
2999 			dev_err(adev->dev, "Failed to process memory training!\n");
3000 			return ret;
3001 		}
3002 	}
3003 
3004 	mutex_lock(&adev->firmware.mutex);
3005 
3006 	ret = psp_hw_start(psp);
3007 	if (ret)
3008 		goto failed;
3009 
3010 	ret = psp_load_non_psp_fw(psp);
3011 	if (ret)
3012 		goto failed;
3013 
3014 	ret = psp_asd_initialize(psp);
3015 	if (ret) {
3016 		dev_err(adev->dev, "PSP load asd failed!\n");
3017 		goto failed;
3018 	}
3019 
3020 	ret = psp_rl_load(adev);
3021 	if (ret) {
3022 		dev_err(adev->dev, "PSP load RL failed!\n");
3023 		goto failed;
3024 	}
3025 
3026 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3027 		ret = psp_xgmi_initialize(psp, false, true);
3028 		/* Warning the XGMI seesion initialize failure
3029 		 * Instead of stop driver initialization
3030 		 */
3031 		if (ret)
3032 			dev_err(psp->adev->dev,
3033 				"XGMI: Failed to initialize XGMI session\n");
3034 	}
3035 
3036 	if (psp->ta_fw) {
3037 		ret = psp_ras_initialize(psp);
3038 		if (ret)
3039 			dev_err(psp->adev->dev,
3040 				"RAS: Failed to initialize RAS\n");
3041 
3042 		ret = psp_hdcp_initialize(psp);
3043 		if (ret)
3044 			dev_err(psp->adev->dev,
3045 				"HDCP: Failed to initialize HDCP\n");
3046 
3047 		ret = psp_dtm_initialize(psp);
3048 		if (ret)
3049 			dev_err(psp->adev->dev,
3050 				"DTM: Failed to initialize DTM\n");
3051 
3052 		ret = psp_rap_initialize(psp);
3053 		if (ret)
3054 			dev_err(psp->adev->dev,
3055 				"RAP: Failed to initialize RAP\n");
3056 
3057 		ret = psp_securedisplay_initialize(psp);
3058 		if (ret)
3059 			dev_err(psp->adev->dev,
3060 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3061 	}
3062 
3063 	mutex_unlock(&adev->firmware.mutex);
3064 
3065 	return 0;
3066 
3067 failed:
3068 	dev_err(adev->dev, "PSP resume failed\n");
3069 	mutex_unlock(&adev->firmware.mutex);
3070 	return ret;
3071 }
3072 
3073 int psp_gpu_reset(struct amdgpu_device *adev)
3074 {
3075 	int ret;
3076 
3077 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3078 		return 0;
3079 
3080 	mutex_lock(&adev->psp.mutex);
3081 	ret = psp_mode1_reset(&adev->psp);
3082 	mutex_unlock(&adev->psp.mutex);
3083 
3084 	return ret;
3085 }
3086 
3087 int psp_rlc_autoload_start(struct psp_context *psp)
3088 {
3089 	int ret;
3090 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3091 
3092 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3093 
3094 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
3095 				 psp->fence_buf_mc_addr);
3096 
3097 	release_psp_cmd_buf(psp);
3098 
3099 	return ret;
3100 }
3101 
3102 int psp_ring_cmd_submit(struct psp_context *psp,
3103 			uint64_t cmd_buf_mc_addr,
3104 			uint64_t fence_mc_addr,
3105 			int index)
3106 {
3107 	unsigned int psp_write_ptr_reg = 0;
3108 	struct psp_gfx_rb_frame *write_frame;
3109 	struct psp_ring *ring = &psp->km_ring;
3110 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3111 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3112 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3113 	struct amdgpu_device *adev = psp->adev;
3114 	uint32_t ring_size_dw = ring->ring_size / 4;
3115 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3116 
3117 	/* KM (GPCOM) prepare write pointer */
3118 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
3119 
3120 	/* Update KM RB frame pointer to new frame */
3121 	/* write_frame ptr increments by size of rb_frame in bytes */
3122 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3123 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
3124 		write_frame = ring_buffer_start;
3125 	else
3126 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3127 	/* Check invalid write_frame ptr address */
3128 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3129 		dev_err(adev->dev,
3130 			"ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3131 			ring_buffer_start, ring_buffer_end, write_frame);
3132 		dev_err(adev->dev,
3133 			"write_frame is pointing to address out of bounds\n");
3134 		return -EINVAL;
3135 	}
3136 
3137 	/* Initialize KM RB frame */
3138 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3139 
3140 	/* Update KM RB frame */
3141 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3142 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3143 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3144 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3145 	write_frame->fence_value = index;
3146 	amdgpu_device_flush_hdp(adev, NULL);
3147 
3148 	/* Update the write Pointer in DWORDs */
3149 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3150 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
3151 	return 0;
3152 }
3153 
3154 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3155 {
3156 	struct amdgpu_device *adev = psp->adev;
3157 	char fw_name[PSP_FW_NAME_LEN];
3158 	const struct psp_firmware_header_v1_0 *asd_hdr;
3159 	int err = 0;
3160 
3161 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
3162 	err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name);
3163 	if (err)
3164 		goto out;
3165 
3166 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3167 	adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3168 	adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3169 	adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3170 	adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3171 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3172 	return 0;
3173 out:
3174 	amdgpu_ucode_release(&adev->psp.asd_fw);
3175 	return err;
3176 }
3177 
3178 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3179 {
3180 	struct amdgpu_device *adev = psp->adev;
3181 	char fw_name[PSP_FW_NAME_LEN];
3182 	const struct psp_firmware_header_v1_0 *toc_hdr;
3183 	int err = 0;
3184 
3185 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name);
3186 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
3187 	if (err)
3188 		goto out;
3189 
3190 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3191 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3192 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3193 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3194 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3195 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3196 	return 0;
3197 out:
3198 	amdgpu_ucode_release(&adev->psp.toc_fw);
3199 	return err;
3200 }
3201 
3202 static int parse_sos_bin_descriptor(struct psp_context *psp,
3203 				   const struct psp_fw_bin_desc *desc,
3204 				   const struct psp_firmware_header_v2_0 *sos_hdr)
3205 {
3206 	uint8_t *ucode_start_addr  = NULL;
3207 
3208 	if (!psp || !desc || !sos_hdr)
3209 		return -EINVAL;
3210 
3211 	ucode_start_addr  = (uint8_t *)sos_hdr +
3212 			    le32_to_cpu(desc->offset_bytes) +
3213 			    le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3214 
3215 	switch (desc->fw_type) {
3216 	case PSP_FW_TYPE_PSP_SOS:
3217 		psp->sos.fw_version        = le32_to_cpu(desc->fw_version);
3218 		psp->sos.feature_version   = le32_to_cpu(desc->fw_version);
3219 		psp->sos.size_bytes        = le32_to_cpu(desc->size_bytes);
3220 		psp->sos.start_addr	   = ucode_start_addr;
3221 		break;
3222 	case PSP_FW_TYPE_PSP_SYS_DRV:
3223 		psp->sys.fw_version        = le32_to_cpu(desc->fw_version);
3224 		psp->sys.feature_version   = le32_to_cpu(desc->fw_version);
3225 		psp->sys.size_bytes        = le32_to_cpu(desc->size_bytes);
3226 		psp->sys.start_addr        = ucode_start_addr;
3227 		break;
3228 	case PSP_FW_TYPE_PSP_KDB:
3229 		psp->kdb.fw_version        = le32_to_cpu(desc->fw_version);
3230 		psp->kdb.feature_version   = le32_to_cpu(desc->fw_version);
3231 		psp->kdb.size_bytes        = le32_to_cpu(desc->size_bytes);
3232 		psp->kdb.start_addr        = ucode_start_addr;
3233 		break;
3234 	case PSP_FW_TYPE_PSP_TOC:
3235 		psp->toc.fw_version        = le32_to_cpu(desc->fw_version);
3236 		psp->toc.feature_version   = le32_to_cpu(desc->fw_version);
3237 		psp->toc.size_bytes        = le32_to_cpu(desc->size_bytes);
3238 		psp->toc.start_addr        = ucode_start_addr;
3239 		break;
3240 	case PSP_FW_TYPE_PSP_SPL:
3241 		psp->spl.fw_version        = le32_to_cpu(desc->fw_version);
3242 		psp->spl.feature_version   = le32_to_cpu(desc->fw_version);
3243 		psp->spl.size_bytes        = le32_to_cpu(desc->size_bytes);
3244 		psp->spl.start_addr        = ucode_start_addr;
3245 		break;
3246 	case PSP_FW_TYPE_PSP_RL:
3247 		psp->rl.fw_version         = le32_to_cpu(desc->fw_version);
3248 		psp->rl.feature_version    = le32_to_cpu(desc->fw_version);
3249 		psp->rl.size_bytes         = le32_to_cpu(desc->size_bytes);
3250 		psp->rl.start_addr         = ucode_start_addr;
3251 		break;
3252 	case PSP_FW_TYPE_PSP_SOC_DRV:
3253 		psp->soc_drv.fw_version         = le32_to_cpu(desc->fw_version);
3254 		psp->soc_drv.feature_version    = le32_to_cpu(desc->fw_version);
3255 		psp->soc_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3256 		psp->soc_drv.start_addr         = ucode_start_addr;
3257 		break;
3258 	case PSP_FW_TYPE_PSP_INTF_DRV:
3259 		psp->intf_drv.fw_version        = le32_to_cpu(desc->fw_version);
3260 		psp->intf_drv.feature_version   = le32_to_cpu(desc->fw_version);
3261 		psp->intf_drv.size_bytes        = le32_to_cpu(desc->size_bytes);
3262 		psp->intf_drv.start_addr        = ucode_start_addr;
3263 		break;
3264 	case PSP_FW_TYPE_PSP_DBG_DRV:
3265 		psp->dbg_drv.fw_version         = le32_to_cpu(desc->fw_version);
3266 		psp->dbg_drv.feature_version    = le32_to_cpu(desc->fw_version);
3267 		psp->dbg_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3268 		psp->dbg_drv.start_addr         = ucode_start_addr;
3269 		break;
3270 	case PSP_FW_TYPE_PSP_RAS_DRV:
3271 		psp->ras_drv.fw_version         = le32_to_cpu(desc->fw_version);
3272 		psp->ras_drv.feature_version    = le32_to_cpu(desc->fw_version);
3273 		psp->ras_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3274 		psp->ras_drv.start_addr         = ucode_start_addr;
3275 		break;
3276 	default:
3277 		dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3278 		break;
3279 	}
3280 
3281 	return 0;
3282 }
3283 
3284 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3285 {
3286 	const struct psp_firmware_header_v1_0 *sos_hdr;
3287 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3288 	uint8_t *ucode_array_start_addr;
3289 
3290 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3291 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3292 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3293 
3294 	if (adev->gmc.xgmi.connected_to_cpu ||
3295 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3296 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3297 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3298 
3299 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3300 		adev->psp.sys.start_addr = ucode_array_start_addr;
3301 
3302 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3303 		adev->psp.sos.start_addr = ucode_array_start_addr +
3304 				le32_to_cpu(sos_hdr->sos.offset_bytes);
3305 	} else {
3306 		/* Load alternate PSP SOS FW */
3307 		sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3308 
3309 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3310 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3311 
3312 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3313 		adev->psp.sys.start_addr = ucode_array_start_addr +
3314 			le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3315 
3316 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3317 		adev->psp.sos.start_addr = ucode_array_start_addr +
3318 			le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3319 	}
3320 
3321 	if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3322 		dev_warn(adev->dev, "PSP SOS FW not available");
3323 		return -EINVAL;
3324 	}
3325 
3326 	return 0;
3327 }
3328 
3329 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3330 {
3331 	struct amdgpu_device *adev = psp->adev;
3332 	char fw_name[PSP_FW_NAME_LEN];
3333 	const struct psp_firmware_header_v1_0 *sos_hdr;
3334 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3335 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3336 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3337 	const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3338 	int err = 0;
3339 	uint8_t *ucode_array_start_addr;
3340 	int fw_index = 0;
3341 
3342 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
3343 	err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name);
3344 	if (err)
3345 		goto out;
3346 
3347 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3348 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3349 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3350 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3351 
3352 	switch (sos_hdr->header.header_version_major) {
3353 	case 1:
3354 		err = psp_init_sos_base_fw(adev);
3355 		if (err)
3356 			goto out;
3357 
3358 		if (sos_hdr->header.header_version_minor == 1) {
3359 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3360 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3361 			adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3362 					le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3363 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3364 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3365 					le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3366 		}
3367 		if (sos_hdr->header.header_version_minor == 2) {
3368 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3369 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3370 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3371 						    le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3372 		}
3373 		if (sos_hdr->header.header_version_minor == 3) {
3374 			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3375 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3376 			adev->psp.toc.start_addr = ucode_array_start_addr +
3377 				le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3378 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3379 			adev->psp.kdb.start_addr = ucode_array_start_addr +
3380 				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3381 			adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3382 			adev->psp.spl.start_addr = ucode_array_start_addr +
3383 				le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3384 			adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3385 			adev->psp.rl.start_addr = ucode_array_start_addr +
3386 				le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3387 		}
3388 		break;
3389 	case 2:
3390 		sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3391 
3392 		if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3393 			dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3394 			err = -EINVAL;
3395 			goto out;
3396 		}
3397 
3398 		for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) {
3399 			err = parse_sos_bin_descriptor(psp,
3400 						       &sos_hdr_v2_0->psp_fw_bin[fw_index],
3401 						       sos_hdr_v2_0);
3402 			if (err)
3403 				goto out;
3404 		}
3405 		break;
3406 	default:
3407 		dev_err(adev->dev,
3408 			"unsupported psp sos firmware\n");
3409 		err = -EINVAL;
3410 		goto out;
3411 	}
3412 
3413 	return 0;
3414 out:
3415 	amdgpu_ucode_release(&adev->psp.sos_fw);
3416 
3417 	return err;
3418 }
3419 
3420 static int parse_ta_bin_descriptor(struct psp_context *psp,
3421 				   const struct psp_fw_bin_desc *desc,
3422 				   const struct ta_firmware_header_v2_0 *ta_hdr)
3423 {
3424 	uint8_t *ucode_start_addr  = NULL;
3425 
3426 	if (!psp || !desc || !ta_hdr)
3427 		return -EINVAL;
3428 
3429 	ucode_start_addr  = (uint8_t *)ta_hdr +
3430 			    le32_to_cpu(desc->offset_bytes) +
3431 			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3432 
3433 	switch (desc->fw_type) {
3434 	case TA_FW_TYPE_PSP_ASD:
3435 		psp->asd_context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3436 		psp->asd_context.bin_desc.feature_version   = le32_to_cpu(desc->fw_version);
3437 		psp->asd_context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3438 		psp->asd_context.bin_desc.start_addr        = ucode_start_addr;
3439 		break;
3440 	case TA_FW_TYPE_PSP_XGMI:
3441 		psp->xgmi_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3442 		psp->xgmi_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3443 		psp->xgmi_context.context.bin_desc.start_addr       = ucode_start_addr;
3444 		break;
3445 	case TA_FW_TYPE_PSP_RAS:
3446 		psp->ras_context.context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3447 		psp->ras_context.context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3448 		psp->ras_context.context.bin_desc.start_addr        = ucode_start_addr;
3449 		break;
3450 	case TA_FW_TYPE_PSP_HDCP:
3451 		psp->hdcp_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3452 		psp->hdcp_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3453 		psp->hdcp_context.context.bin_desc.start_addr       = ucode_start_addr;
3454 		break;
3455 	case TA_FW_TYPE_PSP_DTM:
3456 		psp->dtm_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3457 		psp->dtm_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3458 		psp->dtm_context.context.bin_desc.start_addr       = ucode_start_addr;
3459 		break;
3460 	case TA_FW_TYPE_PSP_RAP:
3461 		psp->rap_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3462 		psp->rap_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3463 		psp->rap_context.context.bin_desc.start_addr       = ucode_start_addr;
3464 		break;
3465 	case TA_FW_TYPE_PSP_SECUREDISPLAY:
3466 		psp->securedisplay_context.context.bin_desc.fw_version =
3467 			le32_to_cpu(desc->fw_version);
3468 		psp->securedisplay_context.context.bin_desc.size_bytes =
3469 			le32_to_cpu(desc->size_bytes);
3470 		psp->securedisplay_context.context.bin_desc.start_addr =
3471 			ucode_start_addr;
3472 		break;
3473 	default:
3474 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3475 		break;
3476 	}
3477 
3478 	return 0;
3479 }
3480 
3481 static int parse_ta_v1_microcode(struct psp_context *psp)
3482 {
3483 	const struct ta_firmware_header_v1_0 *ta_hdr;
3484 	struct amdgpu_device *adev = psp->adev;
3485 
3486 	ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3487 
3488 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3489 		return -EINVAL;
3490 
3491 	adev->psp.xgmi_context.context.bin_desc.fw_version =
3492 		le32_to_cpu(ta_hdr->xgmi.fw_version);
3493 	adev->psp.xgmi_context.context.bin_desc.size_bytes =
3494 		le32_to_cpu(ta_hdr->xgmi.size_bytes);
3495 	adev->psp.xgmi_context.context.bin_desc.start_addr =
3496 		(uint8_t *)ta_hdr +
3497 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3498 
3499 	adev->psp.ras_context.context.bin_desc.fw_version =
3500 		le32_to_cpu(ta_hdr->ras.fw_version);
3501 	adev->psp.ras_context.context.bin_desc.size_bytes =
3502 		le32_to_cpu(ta_hdr->ras.size_bytes);
3503 	adev->psp.ras_context.context.bin_desc.start_addr =
3504 		(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3505 		le32_to_cpu(ta_hdr->ras.offset_bytes);
3506 
3507 	adev->psp.hdcp_context.context.bin_desc.fw_version =
3508 		le32_to_cpu(ta_hdr->hdcp.fw_version);
3509 	adev->psp.hdcp_context.context.bin_desc.size_bytes =
3510 		le32_to_cpu(ta_hdr->hdcp.size_bytes);
3511 	adev->psp.hdcp_context.context.bin_desc.start_addr =
3512 		(uint8_t *)ta_hdr +
3513 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3514 
3515 	adev->psp.dtm_context.context.bin_desc.fw_version =
3516 		le32_to_cpu(ta_hdr->dtm.fw_version);
3517 	adev->psp.dtm_context.context.bin_desc.size_bytes =
3518 		le32_to_cpu(ta_hdr->dtm.size_bytes);
3519 	adev->psp.dtm_context.context.bin_desc.start_addr =
3520 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3521 		le32_to_cpu(ta_hdr->dtm.offset_bytes);
3522 
3523 	adev->psp.securedisplay_context.context.bin_desc.fw_version =
3524 		le32_to_cpu(ta_hdr->securedisplay.fw_version);
3525 	adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3526 		le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3527 	adev->psp.securedisplay_context.context.bin_desc.start_addr =
3528 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3529 		le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3530 
3531 	adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3532 
3533 	return 0;
3534 }
3535 
3536 static int parse_ta_v2_microcode(struct psp_context *psp)
3537 {
3538 	const struct ta_firmware_header_v2_0 *ta_hdr;
3539 	struct amdgpu_device *adev = psp->adev;
3540 	int err = 0;
3541 	int ta_index = 0;
3542 
3543 	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3544 
3545 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3546 		return -EINVAL;
3547 
3548 	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3549 		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3550 		return -EINVAL;
3551 	}
3552 
3553 	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3554 		err = parse_ta_bin_descriptor(psp,
3555 					      &ta_hdr->ta_fw_bin[ta_index],
3556 					      ta_hdr);
3557 		if (err)
3558 			return err;
3559 	}
3560 
3561 	return 0;
3562 }
3563 
3564 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3565 {
3566 	const struct common_firmware_header *hdr;
3567 	struct amdgpu_device *adev = psp->adev;
3568 	char fw_name[PSP_FW_NAME_LEN];
3569 	int err;
3570 
3571 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
3572 	err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name);
3573 	if (err)
3574 		return err;
3575 
3576 	hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3577 	switch (le16_to_cpu(hdr->header_version_major)) {
3578 	case 1:
3579 		err = parse_ta_v1_microcode(psp);
3580 		break;
3581 	case 2:
3582 		err = parse_ta_v2_microcode(psp);
3583 		break;
3584 	default:
3585 		dev_err(adev->dev, "unsupported TA header version\n");
3586 		err = -EINVAL;
3587 	}
3588 
3589 	if (err)
3590 		amdgpu_ucode_release(&adev->psp.ta_fw);
3591 
3592 	return err;
3593 }
3594 
3595 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3596 {
3597 	struct amdgpu_device *adev = psp->adev;
3598 	char fw_name[PSP_FW_NAME_LEN];
3599 	const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3600 	struct amdgpu_firmware_info *info = NULL;
3601 	int err = 0;
3602 
3603 	if (!amdgpu_sriov_vf(adev)) {
3604 		dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3605 		return -EINVAL;
3606 	}
3607 
3608 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
3609 	err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name);
3610 	if (err) {
3611 		if (err == -ENODEV) {
3612 			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3613 			err = 0;
3614 			goto out;
3615 		}
3616 		dev_err(adev->dev, "fail to initialize cap microcode\n");
3617 	}
3618 
3619 	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3620 	info->ucode_id = AMDGPU_UCODE_ID_CAP;
3621 	info->fw = adev->psp.cap_fw;
3622 	cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3623 		adev->psp.cap_fw->data;
3624 	adev->firmware.fw_size += ALIGN(
3625 			le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3626 	adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3627 	adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3628 	adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3629 
3630 	return 0;
3631 
3632 out:
3633 	amdgpu_ucode_release(&adev->psp.cap_fw);
3634 	return err;
3635 }
3636 
3637 static int psp_set_clockgating_state(void *handle,
3638 				     enum amd_clockgating_state state)
3639 {
3640 	return 0;
3641 }
3642 
3643 static int psp_set_powergating_state(void *handle,
3644 				     enum amd_powergating_state state)
3645 {
3646 	return 0;
3647 }
3648 
3649 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3650 					 struct device_attribute *attr,
3651 					 char *buf)
3652 {
3653 	struct drm_device *ddev = dev_get_drvdata(dev);
3654 	struct amdgpu_device *adev = drm_to_adev(ddev);
3655 	uint32_t fw_ver;
3656 	int ret;
3657 
3658 	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3659 		dev_info(adev->dev, "PSP block is not ready yet\n.");
3660 		return -EBUSY;
3661 	}
3662 
3663 	mutex_lock(&adev->psp.mutex);
3664 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3665 	mutex_unlock(&adev->psp.mutex);
3666 
3667 	if (ret) {
3668 		dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
3669 		return ret;
3670 	}
3671 
3672 	return sysfs_emit(buf, "%x\n", fw_ver);
3673 }
3674 
3675 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3676 						       struct device_attribute *attr,
3677 						       const char *buf,
3678 						       size_t count)
3679 {
3680 	struct drm_device *ddev = dev_get_drvdata(dev);
3681 	struct amdgpu_device *adev = drm_to_adev(ddev);
3682 	int ret, idx;
3683 	char fw_name[100];
3684 	const struct firmware *usbc_pd_fw;
3685 	struct amdgpu_bo *fw_buf_bo = NULL;
3686 	uint64_t fw_pri_mc_addr;
3687 	void *fw_pri_cpu_addr;
3688 
3689 	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3690 		dev_err(adev->dev, "PSP block is not ready yet.");
3691 		return -EBUSY;
3692 	}
3693 
3694 	if (!drm_dev_enter(ddev, &idx))
3695 		return -ENODEV;
3696 
3697 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
3698 	ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
3699 	if (ret)
3700 		goto fail;
3701 
3702 	/* LFB address which is aligned to 1MB boundary per PSP request */
3703 	ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3704 				      AMDGPU_GEM_DOMAIN_VRAM |
3705 				      AMDGPU_GEM_DOMAIN_GTT,
3706 				      &fw_buf_bo, &fw_pri_mc_addr,
3707 				      &fw_pri_cpu_addr);
3708 	if (ret)
3709 		goto rel_buf;
3710 
3711 	memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3712 
3713 	mutex_lock(&adev->psp.mutex);
3714 	ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3715 	mutex_unlock(&adev->psp.mutex);
3716 
3717 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3718 
3719 rel_buf:
3720 	release_firmware(usbc_pd_fw);
3721 fail:
3722 	if (ret) {
3723 		dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
3724 		count = ret;
3725 	}
3726 
3727 	drm_dev_exit(idx);
3728 	return count;
3729 }
3730 
3731 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3732 {
3733 	int idx;
3734 
3735 	if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
3736 		return;
3737 
3738 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3739 	memcpy(psp->fw_pri_buf, start_addr, bin_size);
3740 
3741 	drm_dev_exit(idx);
3742 }
3743 
3744 /**
3745  * DOC: usbc_pd_fw
3746  * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3747  * this file will trigger the update process.
3748  */
3749 static DEVICE_ATTR(usbc_pd_fw, 0644,
3750 		   psp_usbc_pd_fw_sysfs_read,
3751 		   psp_usbc_pd_fw_sysfs_write);
3752 
3753 int is_psp_fw_valid(struct psp_bin_desc bin)
3754 {
3755 	return bin.size_bytes;
3756 }
3757 
3758 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
3759 					struct bin_attribute *bin_attr,
3760 					char *buffer, loff_t pos, size_t count)
3761 {
3762 	struct device *dev = kobj_to_dev(kobj);
3763 	struct drm_device *ddev = dev_get_drvdata(dev);
3764 	struct amdgpu_device *adev = drm_to_adev(ddev);
3765 
3766 	adev->psp.vbflash_done = false;
3767 
3768 	/* Safeguard against memory drain */
3769 	if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
3770 		dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
3771 		kvfree(adev->psp.vbflash_tmp_buf);
3772 		adev->psp.vbflash_tmp_buf = NULL;
3773 		adev->psp.vbflash_image_size = 0;
3774 		return -ENOMEM;
3775 	}
3776 
3777 	/* TODO Just allocate max for now and optimize to realloc later if needed */
3778 	if (!adev->psp.vbflash_tmp_buf) {
3779 		adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
3780 		if (!adev->psp.vbflash_tmp_buf)
3781 			return -ENOMEM;
3782 	}
3783 
3784 	mutex_lock(&adev->psp.mutex);
3785 	memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
3786 	adev->psp.vbflash_image_size += count;
3787 	mutex_unlock(&adev->psp.mutex);
3788 
3789 	dev_dbg(adev->dev, "IFWI staged for update\n");
3790 
3791 	return count;
3792 }
3793 
3794 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
3795 				       struct bin_attribute *bin_attr, char *buffer,
3796 				       loff_t pos, size_t count)
3797 {
3798 	struct device *dev = kobj_to_dev(kobj);
3799 	struct drm_device *ddev = dev_get_drvdata(dev);
3800 	struct amdgpu_device *adev = drm_to_adev(ddev);
3801 	struct amdgpu_bo *fw_buf_bo = NULL;
3802 	uint64_t fw_pri_mc_addr;
3803 	void *fw_pri_cpu_addr;
3804 	int ret;
3805 
3806 	if (adev->psp.vbflash_image_size == 0)
3807 		return -EINVAL;
3808 
3809 	dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
3810 
3811 	ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
3812 					AMDGPU_GPU_PAGE_SIZE,
3813 					AMDGPU_GEM_DOMAIN_VRAM,
3814 					&fw_buf_bo,
3815 					&fw_pri_mc_addr,
3816 					&fw_pri_cpu_addr);
3817 	if (ret)
3818 		goto rel_buf;
3819 
3820 	memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
3821 
3822 	mutex_lock(&adev->psp.mutex);
3823 	ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
3824 	mutex_unlock(&adev->psp.mutex);
3825 
3826 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3827 
3828 rel_buf:
3829 	kvfree(adev->psp.vbflash_tmp_buf);
3830 	adev->psp.vbflash_tmp_buf = NULL;
3831 	adev->psp.vbflash_image_size = 0;
3832 
3833 	if (ret) {
3834 		dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
3835 		return ret;
3836 	}
3837 
3838 	dev_dbg(adev->dev, "PSP IFWI flash process done\n");
3839 	return 0;
3840 }
3841 
3842 /**
3843  * DOC: psp_vbflash
3844  * Writing to this file will stage an IFWI for update. Reading from this file
3845  * will trigger the update process.
3846  */
3847 static struct bin_attribute psp_vbflash_bin_attr = {
3848 	.attr = {.name = "psp_vbflash", .mode = 0660},
3849 	.size = 0,
3850 	.write = amdgpu_psp_vbflash_write,
3851 	.read = amdgpu_psp_vbflash_read,
3852 };
3853 
3854 /**
3855  * DOC: psp_vbflash_status
3856  * The status of the flash process.
3857  * 0: IFWI flash not complete.
3858  * 1: IFWI flash complete.
3859  */
3860 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
3861 					 struct device_attribute *attr,
3862 					 char *buf)
3863 {
3864 	struct drm_device *ddev = dev_get_drvdata(dev);
3865 	struct amdgpu_device *adev = drm_to_adev(ddev);
3866 	uint32_t vbflash_status;
3867 
3868 	vbflash_status = psp_vbflash_status(&adev->psp);
3869 	if (!adev->psp.vbflash_done)
3870 		vbflash_status = 0;
3871 	else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
3872 		vbflash_status = 1;
3873 
3874 	return sysfs_emit(buf, "0x%x\n", vbflash_status);
3875 }
3876 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
3877 
3878 static struct bin_attribute *bin_flash_attrs[] = {
3879 	&psp_vbflash_bin_attr,
3880 	NULL
3881 };
3882 
3883 static struct attribute *flash_attrs[] = {
3884 	&dev_attr_psp_vbflash_status.attr,
3885 	&dev_attr_usbc_pd_fw.attr,
3886 	NULL
3887 };
3888 
3889 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
3890 {
3891 	struct device *dev = kobj_to_dev(kobj);
3892 	struct drm_device *ddev = dev_get_drvdata(dev);
3893 	struct amdgpu_device *adev = drm_to_adev(ddev);
3894 
3895 	if (attr == &dev_attr_usbc_pd_fw.attr)
3896 		return adev->psp.sup_pd_fw_up ? 0660 : 0;
3897 
3898 	return adev->psp.sup_ifwi_up ? 0440 : 0;
3899 }
3900 
3901 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
3902 						struct bin_attribute *attr,
3903 						int idx)
3904 {
3905 	struct device *dev = kobj_to_dev(kobj);
3906 	struct drm_device *ddev = dev_get_drvdata(dev);
3907 	struct amdgpu_device *adev = drm_to_adev(ddev);
3908 
3909 	return adev->psp.sup_ifwi_up ? 0660 : 0;
3910 }
3911 
3912 const struct attribute_group amdgpu_flash_attr_group = {
3913 	.attrs = flash_attrs,
3914 	.bin_attrs = bin_flash_attrs,
3915 	.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
3916 	.is_visible = amdgpu_flash_attr_is_visible,
3917 };
3918 
3919 const struct amd_ip_funcs psp_ip_funcs = {
3920 	.name = "psp",
3921 	.early_init = psp_early_init,
3922 	.late_init = NULL,
3923 	.sw_init = psp_sw_init,
3924 	.sw_fini = psp_sw_fini,
3925 	.hw_init = psp_hw_init,
3926 	.hw_fini = psp_hw_fini,
3927 	.suspend = psp_suspend,
3928 	.resume = psp_resume,
3929 	.is_idle = NULL,
3930 	.check_soft_reset = NULL,
3931 	.wait_for_idle = NULL,
3932 	.soft_reset = NULL,
3933 	.set_clockgating_state = psp_set_clockgating_state,
3934 	.set_powergating_state = psp_set_powergating_state,
3935 };
3936 
3937 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
3938 	.type = AMD_IP_BLOCK_TYPE_PSP,
3939 	.major = 3,
3940 	.minor = 1,
3941 	.rev = 0,
3942 	.funcs = &psp_ip_funcs,
3943 };
3944 
3945 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
3946 	.type = AMD_IP_BLOCK_TYPE_PSP,
3947 	.major = 10,
3948 	.minor = 0,
3949 	.rev = 0,
3950 	.funcs = &psp_ip_funcs,
3951 };
3952 
3953 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
3954 	.type = AMD_IP_BLOCK_TYPE_PSP,
3955 	.major = 11,
3956 	.minor = 0,
3957 	.rev = 0,
3958 	.funcs = &psp_ip_funcs,
3959 };
3960 
3961 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
3962 	.type = AMD_IP_BLOCK_TYPE_PSP,
3963 	.major = 11,
3964 	.minor = 0,
3965 	.rev = 8,
3966 	.funcs = &psp_ip_funcs,
3967 };
3968 
3969 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
3970 	.type = AMD_IP_BLOCK_TYPE_PSP,
3971 	.major = 12,
3972 	.minor = 0,
3973 	.rev = 0,
3974 	.funcs = &psp_ip_funcs,
3975 };
3976 
3977 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
3978 	.type = AMD_IP_BLOCK_TYPE_PSP,
3979 	.major = 13,
3980 	.minor = 0,
3981 	.rev = 0,
3982 	.funcs = &psp_ip_funcs,
3983 };
3984 
3985 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
3986 	.type = AMD_IP_BLOCK_TYPE_PSP,
3987 	.major = 13,
3988 	.minor = 0,
3989 	.rev = 4,
3990 	.funcs = &psp_ip_funcs,
3991 };
3992 
3993 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
3994 	.type = AMD_IP_BLOCK_TYPE_PSP,
3995 	.major = 14,
3996 	.minor = 0,
3997 	.rev = 0,
3998 	.funcs = &psp_ip_funcs,
3999 };
4000