xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c (revision d19deabe5a4566851f6ecade5ebd2e63c3248cf2)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42 
43 #include "amdgpu_ras.h"
44 #include "amdgpu_securedisplay.h"
45 #include "amdgpu_atomfirmware.h"
46 
47 #define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*3)
48 
49 static int psp_load_smu_fw(struct psp_context *psp);
50 static int psp_rap_terminate(struct psp_context *psp);
51 static int psp_securedisplay_terminate(struct psp_context *psp);
52 
53 static int psp_ring_init(struct psp_context *psp,
54 			 enum psp_ring_type ring_type)
55 {
56 	int ret = 0;
57 	struct psp_ring *ring;
58 	struct amdgpu_device *adev = psp->adev;
59 
60 	ring = &psp->km_ring;
61 
62 	ring->ring_type = ring_type;
63 
64 	/* allocate 4k Page of Local Frame Buffer memory for ring */
65 	ring->ring_size = 0x1000;
66 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 				      AMDGPU_GEM_DOMAIN_VRAM |
68 				      AMDGPU_GEM_DOMAIN_GTT,
69 				      &adev->firmware.rbuf,
70 				      &ring->ring_mem_mc_addr,
71 				      (void **)&ring->ring_mem);
72 	if (ret) {
73 		ring->ring_size = 0;
74 		return ret;
75 	}
76 
77 	return 0;
78 }
79 
80 /*
81  * Due to DF Cstate management centralized to PMFW, the firmware
82  * loading sequence will be updated as below:
83  *   - Load KDB
84  *   - Load SYS_DRV
85  *   - Load tOS
86  *   - Load PMFW
87  *   - Setup TMR
88  *   - Load other non-psp fw
89  *   - Load ASD
90  *   - Load XGMI/RAS/HDCP/DTM TA if any
91  *
92  * This new sequence is required for
93  *   - Arcturus and onwards
94  */
95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96 {
97 	struct amdgpu_device *adev = psp->adev;
98 
99 	if (amdgpu_sriov_vf(adev)) {
100 		psp->pmfw_centralized_cstate_management = false;
101 		return;
102 	}
103 
104 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 	case IP_VERSION(11, 0, 0):
106 	case IP_VERSION(11, 0, 4):
107 	case IP_VERSION(11, 0, 5):
108 	case IP_VERSION(11, 0, 7):
109 	case IP_VERSION(11, 0, 9):
110 	case IP_VERSION(11, 0, 11):
111 	case IP_VERSION(11, 0, 12):
112 	case IP_VERSION(11, 0, 13):
113 	case IP_VERSION(13, 0, 0):
114 	case IP_VERSION(13, 0, 2):
115 	case IP_VERSION(13, 0, 7):
116 		psp->pmfw_centralized_cstate_management = true;
117 		break;
118 	default:
119 		psp->pmfw_centralized_cstate_management = false;
120 		break;
121 	}
122 }
123 
124 static int psp_init_sriov_microcode(struct psp_context *psp)
125 {
126 	struct amdgpu_device *adev = psp->adev;
127 	char ucode_prefix[30];
128 	int ret = 0;
129 
130 	amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131 
132 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 	case IP_VERSION(9, 0, 0):
134 	case IP_VERSION(11, 0, 7):
135 	case IP_VERSION(11, 0, 9):
136 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 		ret = psp_init_cap_microcode(psp, ucode_prefix);
138 		break;
139 	case IP_VERSION(13, 0, 2):
140 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 		ret = psp_init_cap_microcode(psp, ucode_prefix);
142 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 		break;
144 	case IP_VERSION(13, 0, 0):
145 		adev->virt.autoload_ucode_id = 0;
146 		break;
147 	case IP_VERSION(13, 0, 6):
148 	case IP_VERSION(13, 0, 14):
149 		ret = psp_init_cap_microcode(psp, ucode_prefix);
150 		ret &= psp_init_ta_microcode(psp, ucode_prefix);
151 		break;
152 	case IP_VERSION(13, 0, 10):
153 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
154 		ret = psp_init_cap_microcode(psp, ucode_prefix);
155 		break;
156 	default:
157 		return -EINVAL;
158 	}
159 	return ret;
160 }
161 
162 static int psp_early_init(void *handle)
163 {
164 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
165 	struct psp_context *psp = &adev->psp;
166 
167 	psp->autoload_supported = true;
168 	psp->boot_time_tmr = true;
169 
170 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
171 	case IP_VERSION(9, 0, 0):
172 		psp_v3_1_set_psp_funcs(psp);
173 		psp->autoload_supported = false;
174 		psp->boot_time_tmr = false;
175 		break;
176 	case IP_VERSION(10, 0, 0):
177 	case IP_VERSION(10, 0, 1):
178 		psp_v10_0_set_psp_funcs(psp);
179 		psp->autoload_supported = false;
180 		psp->boot_time_tmr = false;
181 		break;
182 	case IP_VERSION(11, 0, 2):
183 	case IP_VERSION(11, 0, 4):
184 		psp_v11_0_set_psp_funcs(psp);
185 		psp->autoload_supported = false;
186 		psp->boot_time_tmr = false;
187 		break;
188 	case IP_VERSION(11, 0, 0):
189 	case IP_VERSION(11, 0, 7):
190 		adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
191 		fallthrough;
192 	case IP_VERSION(11, 0, 5):
193 	case IP_VERSION(11, 0, 9):
194 	case IP_VERSION(11, 0, 11):
195 	case IP_VERSION(11, 5, 0):
196 	case IP_VERSION(11, 0, 12):
197 	case IP_VERSION(11, 0, 13):
198 		psp_v11_0_set_psp_funcs(psp);
199 		psp->boot_time_tmr = false;
200 		break;
201 	case IP_VERSION(11, 0, 3):
202 	case IP_VERSION(12, 0, 1):
203 		psp_v12_0_set_psp_funcs(psp);
204 		psp->autoload_supported = false;
205 		psp->boot_time_tmr = false;
206 		break;
207 	case IP_VERSION(13, 0, 2):
208 		psp->boot_time_tmr = false;
209 		fallthrough;
210 	case IP_VERSION(13, 0, 6):
211 	case IP_VERSION(13, 0, 14):
212 		psp_v13_0_set_psp_funcs(psp);
213 		psp->autoload_supported = false;
214 		break;
215 	case IP_VERSION(13, 0, 1):
216 	case IP_VERSION(13, 0, 3):
217 	case IP_VERSION(13, 0, 5):
218 	case IP_VERSION(13, 0, 8):
219 	case IP_VERSION(13, 0, 11):
220 	case IP_VERSION(14, 0, 0):
221 	case IP_VERSION(14, 0, 1):
222 		psp_v13_0_set_psp_funcs(psp);
223 		psp->boot_time_tmr = false;
224 		break;
225 	case IP_VERSION(11, 0, 8):
226 		if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
227 			psp_v11_0_8_set_psp_funcs(psp);
228 		}
229 		psp->autoload_supported = false;
230 		psp->boot_time_tmr = false;
231 		break;
232 	case IP_VERSION(13, 0, 0):
233 	case IP_VERSION(13, 0, 7):
234 	case IP_VERSION(13, 0, 10):
235 		psp_v13_0_set_psp_funcs(psp);
236 		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
237 		psp->boot_time_tmr = false;
238 		break;
239 	case IP_VERSION(13, 0, 4):
240 		psp_v13_0_4_set_psp_funcs(psp);
241 		psp->boot_time_tmr = false;
242 		break;
243 	case IP_VERSION(14, 0, 2):
244 	case IP_VERSION(14, 0, 3):
245 		psp_v14_0_set_psp_funcs(psp);
246 		break;
247 	default:
248 		return -EINVAL;
249 	}
250 
251 	psp->adev = adev;
252 
253 	adev->psp_timeout = 20000;
254 
255 	psp_check_pmfw_centralized_cstate_management(psp);
256 
257 	if (amdgpu_sriov_vf(adev))
258 		return psp_init_sriov_microcode(psp);
259 	else
260 		return psp_init_microcode(psp);
261 }
262 
263 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
264 {
265 	amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
266 			      &mem_ctx->shared_buf);
267 	mem_ctx->shared_bo = NULL;
268 }
269 
270 static void psp_free_shared_bufs(struct psp_context *psp)
271 {
272 	void *tmr_buf;
273 	void **pptr;
274 
275 	/* free TMR memory buffer */
276 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
277 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
278 	psp->tmr_bo = NULL;
279 
280 	/* free xgmi shared memory */
281 	psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
282 
283 	/* free ras shared memory */
284 	psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
285 
286 	/* free hdcp shared memory */
287 	psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
288 
289 	/* free dtm shared memory */
290 	psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
291 
292 	/* free rap shared memory */
293 	psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
294 
295 	/* free securedisplay shared memory */
296 	psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
297 
298 
299 }
300 
301 static void psp_memory_training_fini(struct psp_context *psp)
302 {
303 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
304 
305 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
306 	kfree(ctx->sys_cache);
307 	ctx->sys_cache = NULL;
308 }
309 
310 static int psp_memory_training_init(struct psp_context *psp)
311 {
312 	int ret;
313 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
314 
315 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
316 		dev_dbg(psp->adev->dev, "memory training is not supported!\n");
317 		return 0;
318 	}
319 
320 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
321 	if (ctx->sys_cache == NULL) {
322 		dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
323 		ret = -ENOMEM;
324 		goto Err_out;
325 	}
326 
327 	dev_dbg(psp->adev->dev,
328 		"train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
329 		ctx->train_data_size,
330 		ctx->p2c_train_data_offset,
331 		ctx->c2p_train_data_offset);
332 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
333 	return 0;
334 
335 Err_out:
336 	psp_memory_training_fini(psp);
337 	return ret;
338 }
339 
340 /*
341  * Helper funciton to query psp runtime database entry
342  *
343  * @adev: amdgpu_device pointer
344  * @entry_type: the type of psp runtime database entry
345  * @db_entry: runtime database entry pointer
346  *
347  * Return false if runtime database doesn't exit or entry is invalid
348  * or true if the specific database entry is found, and copy to @db_entry
349  */
350 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
351 				     enum psp_runtime_entry_type entry_type,
352 				     void *db_entry)
353 {
354 	uint64_t db_header_pos, db_dir_pos;
355 	struct psp_runtime_data_header db_header = {0};
356 	struct psp_runtime_data_directory db_dir = {0};
357 	bool ret = false;
358 	int i;
359 
360 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
361 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
362 		return false;
363 
364 	db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
365 	db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
366 
367 	/* read runtime db header from vram */
368 	amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
369 			sizeof(struct psp_runtime_data_header), false);
370 
371 	if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
372 		/* runtime db doesn't exist, exit */
373 		dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
374 		return false;
375 	}
376 
377 	/* read runtime database entry from vram */
378 	amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
379 			sizeof(struct psp_runtime_data_directory), false);
380 
381 	if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
382 		/* invalid db entry count, exit */
383 		dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
384 		return false;
385 	}
386 
387 	/* look up for requested entry type */
388 	for (i = 0; i < db_dir.entry_count && !ret; i++) {
389 		if (db_dir.entry_list[i].entry_type == entry_type) {
390 			switch (entry_type) {
391 			case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
392 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
393 					/* invalid db entry size */
394 					dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
395 					return false;
396 				}
397 				/* read runtime database entry */
398 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
399 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
400 				ret = true;
401 				break;
402 			case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
403 				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
404 					/* invalid db entry size */
405 					dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
406 					return false;
407 				}
408 				/* read runtime database entry */
409 				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
410 							  (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
411 				ret = true;
412 				break;
413 			default:
414 				ret = false;
415 				break;
416 			}
417 		}
418 	}
419 
420 	return ret;
421 }
422 
423 static int psp_sw_init(void *handle)
424 {
425 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
426 	struct psp_context *psp = &adev->psp;
427 	int ret;
428 	struct psp_runtime_boot_cfg_entry boot_cfg_entry;
429 	struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
430 	struct psp_runtime_scpm_entry scpm_entry;
431 
432 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
433 	if (!psp->cmd) {
434 		dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
435 		ret = -ENOMEM;
436 	}
437 
438 	adev->psp.xgmi_context.supports_extended_data =
439 		!adev->gmc.xgmi.connected_to_cpu &&
440 		amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
441 
442 	memset(&scpm_entry, 0, sizeof(scpm_entry));
443 	if ((psp_get_runtime_db_entry(adev,
444 				PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
445 				&scpm_entry)) &&
446 	    (scpm_entry.scpm_status != SCPM_DISABLE)) {
447 		adev->scpm_enabled = true;
448 		adev->scpm_status = scpm_entry.scpm_status;
449 	} else {
450 		adev->scpm_enabled = false;
451 		adev->scpm_status = SCPM_DISABLE;
452 	}
453 
454 	/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
455 
456 	memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
457 	if (psp_get_runtime_db_entry(adev,
458 				PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
459 				&boot_cfg_entry)) {
460 		psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
461 		if ((psp->boot_cfg_bitmask) &
462 		    BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
463 			/* If psp runtime database exists, then
464 			 * only enable two stage memory training
465 			 * when TWO_STAGE_DRAM_TRAINING bit is set
466 			 * in runtime database
467 			 */
468 			mem_training_ctx->enable_mem_training = true;
469 		}
470 
471 	} else {
472 		/* If psp runtime database doesn't exist or is
473 		 * invalid, force enable two stage memory training
474 		 */
475 		mem_training_ctx->enable_mem_training = true;
476 	}
477 
478 	if (mem_training_ctx->enable_mem_training) {
479 		ret = psp_memory_training_init(psp);
480 		if (ret) {
481 			dev_err(adev->dev, "Failed to initialize memory training!\n");
482 			return ret;
483 		}
484 
485 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
486 		if (ret) {
487 			dev_err(adev->dev, "Failed to process memory training!\n");
488 			return ret;
489 		}
490 	}
491 
492 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
493 				      (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
494 				      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
495 				      &psp->fw_pri_bo,
496 				      &psp->fw_pri_mc_addr,
497 				      &psp->fw_pri_buf);
498 	if (ret)
499 		return ret;
500 
501 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
502 				      AMDGPU_GEM_DOMAIN_VRAM |
503 				      AMDGPU_GEM_DOMAIN_GTT,
504 				      &psp->fence_buf_bo,
505 				      &psp->fence_buf_mc_addr,
506 				      &psp->fence_buf);
507 	if (ret)
508 		goto failed1;
509 
510 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
511 				      AMDGPU_GEM_DOMAIN_VRAM |
512 				      AMDGPU_GEM_DOMAIN_GTT,
513 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
514 				      (void **)&psp->cmd_buf_mem);
515 	if (ret)
516 		goto failed2;
517 
518 	return 0;
519 
520 failed2:
521 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
522 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
523 failed1:
524 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
525 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
526 	return ret;
527 }
528 
529 static int psp_sw_fini(void *handle)
530 {
531 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
532 	struct psp_context *psp = &adev->psp;
533 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
534 
535 	psp_memory_training_fini(psp);
536 
537 	amdgpu_ucode_release(&psp->sos_fw);
538 	amdgpu_ucode_release(&psp->asd_fw);
539 	amdgpu_ucode_release(&psp->ta_fw);
540 	amdgpu_ucode_release(&psp->cap_fw);
541 	amdgpu_ucode_release(&psp->toc_fw);
542 
543 	kfree(cmd);
544 	cmd = NULL;
545 
546 	psp_free_shared_bufs(psp);
547 
548 	if (psp->km_ring.ring_mem)
549 		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
550 				      &psp->km_ring.ring_mem_mc_addr,
551 				      (void **)&psp->km_ring.ring_mem);
552 
553 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
554 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
555 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
556 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
557 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
558 			      (void **)&psp->cmd_buf_mem);
559 
560 	return 0;
561 }
562 
563 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
564 		 uint32_t reg_val, uint32_t mask, bool check_changed)
565 {
566 	uint32_t val;
567 	int i;
568 	struct amdgpu_device *adev = psp->adev;
569 
570 	if (psp->adev->no_hw_access)
571 		return 0;
572 
573 	for (i = 0; i < adev->usec_timeout; i++) {
574 		val = RREG32(reg_index);
575 		if (check_changed) {
576 			if (val != reg_val)
577 				return 0;
578 		} else {
579 			if ((val & mask) == reg_val)
580 				return 0;
581 		}
582 		udelay(1);
583 	}
584 
585 	return -ETIME;
586 }
587 
588 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
589 			       uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
590 {
591 	uint32_t val;
592 	int i;
593 	struct amdgpu_device *adev = psp->adev;
594 
595 	if (psp->adev->no_hw_access)
596 		return 0;
597 
598 	for (i = 0; i < msec_timeout; i++) {
599 		val = RREG32(reg_index);
600 		if ((val & mask) == reg_val)
601 			return 0;
602 		msleep(1);
603 	}
604 
605 	return -ETIME;
606 }
607 
608 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
609 {
610 	switch (cmd_id) {
611 	case GFX_CMD_ID_LOAD_TA:
612 		return "LOAD_TA";
613 	case GFX_CMD_ID_UNLOAD_TA:
614 		return "UNLOAD_TA";
615 	case GFX_CMD_ID_INVOKE_CMD:
616 		return "INVOKE_CMD";
617 	case GFX_CMD_ID_LOAD_ASD:
618 		return "LOAD_ASD";
619 	case GFX_CMD_ID_SETUP_TMR:
620 		return "SETUP_TMR";
621 	case GFX_CMD_ID_LOAD_IP_FW:
622 		return "LOAD_IP_FW";
623 	case GFX_CMD_ID_DESTROY_TMR:
624 		return "DESTROY_TMR";
625 	case GFX_CMD_ID_SAVE_RESTORE:
626 		return "SAVE_RESTORE_IP_FW";
627 	case GFX_CMD_ID_SETUP_VMR:
628 		return "SETUP_VMR";
629 	case GFX_CMD_ID_DESTROY_VMR:
630 		return "DESTROY_VMR";
631 	case GFX_CMD_ID_PROG_REG:
632 		return "PROG_REG";
633 	case GFX_CMD_ID_GET_FW_ATTESTATION:
634 		return "GET_FW_ATTESTATION";
635 	case GFX_CMD_ID_LOAD_TOC:
636 		return "ID_LOAD_TOC";
637 	case GFX_CMD_ID_AUTOLOAD_RLC:
638 		return "AUTOLOAD_RLC";
639 	case GFX_CMD_ID_BOOT_CFG:
640 		return "BOOT_CFG";
641 	default:
642 		return "UNKNOWN CMD";
643 	}
644 }
645 
646 static bool psp_err_warn(struct psp_context *psp)
647 {
648 	struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
649 
650 	/* This response indicates reg list is already loaded */
651 	if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
652 	    cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
653 	    cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
654 	    cmd->resp.status == TEE_ERROR_CANCEL)
655 		return false;
656 
657 	return true;
658 }
659 
660 static int
661 psp_cmd_submit_buf(struct psp_context *psp,
662 		   struct amdgpu_firmware_info *ucode,
663 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
664 {
665 	int ret;
666 	int index;
667 	int timeout = psp->adev->psp_timeout;
668 	bool ras_intr = false;
669 	bool skip_unsupport = false;
670 
671 	if (psp->adev->no_hw_access)
672 		return 0;
673 
674 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
675 
676 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
677 
678 	index = atomic_inc_return(&psp->fence_value);
679 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
680 	if (ret) {
681 		atomic_dec(&psp->fence_value);
682 		goto exit;
683 	}
684 
685 	amdgpu_device_invalidate_hdp(psp->adev, NULL);
686 	while (*((unsigned int *)psp->fence_buf) != index) {
687 		if (--timeout == 0)
688 			break;
689 		/*
690 		 * Shouldn't wait for timeout when err_event_athub occurs,
691 		 * because gpu reset thread triggered and lock resource should
692 		 * be released for psp resume sequence.
693 		 */
694 		ras_intr = amdgpu_ras_intr_triggered();
695 		if (ras_intr)
696 			break;
697 		usleep_range(10, 100);
698 		amdgpu_device_invalidate_hdp(psp->adev, NULL);
699 	}
700 
701 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
702 	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
703 		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
704 
705 	memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
706 
707 	/* In some cases, psp response status is not 0 even there is no
708 	 * problem while the command is submitted. Some version of PSP FW
709 	 * doesn't write 0 to that field.
710 	 * So here we would like to only print a warning instead of an error
711 	 * during psp initialization to avoid breaking hw_init and it doesn't
712 	 * return -EINVAL.
713 	 */
714 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
715 		if (ucode)
716 			dev_warn(psp->adev->dev,
717 				 "failed to load ucode %s(0x%X) ",
718 				 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
719 		if (psp_err_warn(psp))
720 			dev_warn(
721 				psp->adev->dev,
722 				"psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
723 				psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
724 				psp->cmd_buf_mem->cmd_id,
725 				psp->cmd_buf_mem->resp.status);
726 		/* If any firmware (including CAP) load fails under SRIOV, it should
727 		 * return failure to stop the VF from initializing.
728 		 * Also return failure in case of timeout
729 		 */
730 		if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
731 			ret = -EINVAL;
732 			goto exit;
733 		}
734 	}
735 
736 	if (ucode) {
737 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
738 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
739 	}
740 
741 exit:
742 	return ret;
743 }
744 
745 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
746 {
747 	struct psp_gfx_cmd_resp *cmd = psp->cmd;
748 
749 	mutex_lock(&psp->mutex);
750 
751 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
752 
753 	return cmd;
754 }
755 
756 static void release_psp_cmd_buf(struct psp_context *psp)
757 {
758 	mutex_unlock(&psp->mutex);
759 }
760 
761 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
762 				 struct psp_gfx_cmd_resp *cmd,
763 				 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
764 {
765 	struct amdgpu_device *adev = psp->adev;
766 	uint32_t size = 0;
767 	uint64_t tmr_pa = 0;
768 
769 	if (tmr_bo) {
770 		size = amdgpu_bo_size(tmr_bo);
771 		tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
772 	}
773 
774 	if (amdgpu_sriov_vf(psp->adev))
775 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
776 	else
777 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
778 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
779 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
780 	cmd->cmd.cmd_setup_tmr.buf_size = size;
781 	cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
782 	cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
783 	cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
784 }
785 
786 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
787 				      uint64_t pri_buf_mc, uint32_t size)
788 {
789 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
790 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
791 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
792 	cmd->cmd.cmd_load_toc.toc_size = size;
793 }
794 
795 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
796 static int psp_load_toc(struct psp_context *psp,
797 			uint32_t *tmr_size)
798 {
799 	int ret;
800 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
801 
802 	/* Copy toc to psp firmware private buffer */
803 	psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
804 
805 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
806 
807 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
808 				 psp->fence_buf_mc_addr);
809 	if (!ret)
810 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
811 
812 	release_psp_cmd_buf(psp);
813 
814 	return ret;
815 }
816 
817 /* Set up Trusted Memory Region */
818 static int psp_tmr_init(struct psp_context *psp)
819 {
820 	int ret = 0;
821 	int tmr_size;
822 	void *tmr_buf;
823 	void **pptr;
824 
825 	/*
826 	 * According to HW engineer, they prefer the TMR address be "naturally
827 	 * aligned" , e.g. the start address be an integer divide of TMR size.
828 	 *
829 	 * Note: this memory need be reserved till the driver
830 	 * uninitializes.
831 	 */
832 	tmr_size = PSP_TMR_SIZE(psp->adev);
833 
834 	/* For ASICs support RLC autoload, psp will parse the toc
835 	 * and calculate the total size of TMR needed
836 	 */
837 	if (!amdgpu_sriov_vf(psp->adev) &&
838 	    psp->toc.start_addr &&
839 	    psp->toc.size_bytes &&
840 	    psp->fw_pri_buf) {
841 		ret = psp_load_toc(psp, &tmr_size);
842 		if (ret) {
843 			dev_err(psp->adev->dev, "Failed to load toc\n");
844 			return ret;
845 		}
846 	}
847 
848 	if (!psp->tmr_bo && !psp->boot_time_tmr) {
849 		pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
850 		ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
851 					      PSP_TMR_ALIGNMENT,
852 					      AMDGPU_HAS_VRAM(psp->adev) ?
853 					      AMDGPU_GEM_DOMAIN_VRAM :
854 					      AMDGPU_GEM_DOMAIN_GTT,
855 					      &psp->tmr_bo, &psp->tmr_mc_addr,
856 					      pptr);
857 	}
858 
859 	return ret;
860 }
861 
862 static bool psp_skip_tmr(struct psp_context *psp)
863 {
864 	switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
865 	case IP_VERSION(11, 0, 9):
866 	case IP_VERSION(11, 0, 7):
867 	case IP_VERSION(13, 0, 2):
868 	case IP_VERSION(13, 0, 6):
869 	case IP_VERSION(13, 0, 10):
870 	case IP_VERSION(13, 0, 14):
871 		return true;
872 	default:
873 		return false;
874 	}
875 }
876 
877 static int psp_tmr_load(struct psp_context *psp)
878 {
879 	int ret;
880 	struct psp_gfx_cmd_resp *cmd;
881 
882 	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
883 	 * Already set up by host driver.
884 	 */
885 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
886 		return 0;
887 
888 	cmd = acquire_psp_cmd_buf(psp);
889 
890 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
891 	if (psp->tmr_bo)
892 		dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
893 			 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
894 
895 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
896 				 psp->fence_buf_mc_addr);
897 
898 	release_psp_cmd_buf(psp);
899 
900 	return ret;
901 }
902 
903 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
904 					struct psp_gfx_cmd_resp *cmd)
905 {
906 	if (amdgpu_sriov_vf(psp->adev))
907 		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
908 	else
909 		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
910 }
911 
912 static int psp_tmr_unload(struct psp_context *psp)
913 {
914 	int ret;
915 	struct psp_gfx_cmd_resp *cmd;
916 
917 	/* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
918 	 * as TMR is not loaded at all
919 	 */
920 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
921 		return 0;
922 
923 	cmd = acquire_psp_cmd_buf(psp);
924 
925 	psp_prep_tmr_unload_cmd_buf(psp, cmd);
926 	dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
927 
928 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
929 				 psp->fence_buf_mc_addr);
930 
931 	release_psp_cmd_buf(psp);
932 
933 	return ret;
934 }
935 
936 static int psp_tmr_terminate(struct psp_context *psp)
937 {
938 	return psp_tmr_unload(psp);
939 }
940 
941 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
942 					uint64_t *output_ptr)
943 {
944 	int ret;
945 	struct psp_gfx_cmd_resp *cmd;
946 
947 	if (!output_ptr)
948 		return -EINVAL;
949 
950 	if (amdgpu_sriov_vf(psp->adev))
951 		return 0;
952 
953 	cmd = acquire_psp_cmd_buf(psp);
954 
955 	cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
956 
957 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
958 				 psp->fence_buf_mc_addr);
959 
960 	if (!ret) {
961 		*output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
962 			      ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
963 	}
964 
965 	release_psp_cmd_buf(psp);
966 
967 	return ret;
968 }
969 
970 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
971 {
972 	struct psp_context *psp = &adev->psp;
973 	struct psp_gfx_cmd_resp *cmd;
974 	int ret;
975 
976 	if (amdgpu_sriov_vf(adev))
977 		return 0;
978 
979 	cmd = acquire_psp_cmd_buf(psp);
980 
981 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
982 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
983 
984 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
985 	if (!ret) {
986 		*boot_cfg =
987 			(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
988 	}
989 
990 	release_psp_cmd_buf(psp);
991 
992 	return ret;
993 }
994 
995 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
996 {
997 	int ret;
998 	struct psp_context *psp = &adev->psp;
999 	struct psp_gfx_cmd_resp *cmd;
1000 
1001 	if (amdgpu_sriov_vf(adev))
1002 		return 0;
1003 
1004 	cmd = acquire_psp_cmd_buf(psp);
1005 
1006 	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1007 	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1008 	cmd->cmd.boot_cfg.boot_config = boot_cfg;
1009 	cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1010 
1011 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1012 
1013 	release_psp_cmd_buf(psp);
1014 
1015 	return ret;
1016 }
1017 
1018 static int psp_rl_load(struct amdgpu_device *adev)
1019 {
1020 	int ret;
1021 	struct psp_context *psp = &adev->psp;
1022 	struct psp_gfx_cmd_resp *cmd;
1023 
1024 	if (!is_psp_fw_valid(psp->rl))
1025 		return 0;
1026 
1027 	cmd = acquire_psp_cmd_buf(psp);
1028 
1029 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1030 	memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1031 
1032 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1033 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1034 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1035 	cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1036 	cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1037 
1038 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1039 
1040 	release_psp_cmd_buf(psp);
1041 
1042 	return ret;
1043 }
1044 
1045 int psp_spatial_partition(struct psp_context *psp, int mode)
1046 {
1047 	struct psp_gfx_cmd_resp *cmd;
1048 	int ret;
1049 
1050 	if (amdgpu_sriov_vf(psp->adev))
1051 		return 0;
1052 
1053 	cmd = acquire_psp_cmd_buf(psp);
1054 
1055 	cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1056 	cmd->cmd.cmd_spatial_part.mode = mode;
1057 
1058 	dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1059 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1060 
1061 	release_psp_cmd_buf(psp);
1062 
1063 	return ret;
1064 }
1065 
1066 static int psp_asd_initialize(struct psp_context *psp)
1067 {
1068 	int ret;
1069 
1070 	/* If PSP version doesn't match ASD version, asd loading will be failed.
1071 	 * add workaround to bypass it for sriov now.
1072 	 * TODO: add version check to make it common
1073 	 */
1074 	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1075 		return 0;
1076 
1077 	/* bypass asd if display hardware is not available */
1078 	if (!amdgpu_device_has_display_hardware(psp->adev) &&
1079 	    amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1080 		return 0;
1081 
1082 	psp->asd_context.mem_context.shared_mc_addr  = 0;
1083 	psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1084 	psp->asd_context.ta_load_type                = GFX_CMD_ID_LOAD_ASD;
1085 
1086 	ret = psp_ta_load(psp, &psp->asd_context);
1087 	if (!ret)
1088 		psp->asd_context.initialized = true;
1089 
1090 	return ret;
1091 }
1092 
1093 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1094 				       uint32_t session_id)
1095 {
1096 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1097 	cmd->cmd.cmd_unload_ta.session_id = session_id;
1098 }
1099 
1100 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1101 {
1102 	int ret;
1103 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1104 
1105 	psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1106 
1107 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1108 
1109 	context->resp_status = cmd->resp.status;
1110 
1111 	release_psp_cmd_buf(psp);
1112 
1113 	return ret;
1114 }
1115 
1116 static int psp_asd_terminate(struct psp_context *psp)
1117 {
1118 	int ret;
1119 
1120 	if (amdgpu_sriov_vf(psp->adev))
1121 		return 0;
1122 
1123 	if (!psp->asd_context.initialized)
1124 		return 0;
1125 
1126 	ret = psp_ta_unload(psp, &psp->asd_context);
1127 	if (!ret)
1128 		psp->asd_context.initialized = false;
1129 
1130 	return ret;
1131 }
1132 
1133 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1134 		uint32_t id, uint32_t value)
1135 {
1136 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1137 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1138 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1139 }
1140 
1141 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1142 		uint32_t value)
1143 {
1144 	struct psp_gfx_cmd_resp *cmd;
1145 	int ret = 0;
1146 
1147 	if (reg >= PSP_REG_LAST)
1148 		return -EINVAL;
1149 
1150 	cmd = acquire_psp_cmd_buf(psp);
1151 
1152 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1153 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1154 	if (ret)
1155 		dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1156 
1157 	release_psp_cmd_buf(psp);
1158 
1159 	return ret;
1160 }
1161 
1162 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1163 				     uint64_t ta_bin_mc,
1164 				     struct ta_context *context)
1165 {
1166 	cmd->cmd_id				= context->ta_load_type;
1167 	cmd->cmd.cmd_load_ta.app_phy_addr_lo	= lower_32_bits(ta_bin_mc);
1168 	cmd->cmd.cmd_load_ta.app_phy_addr_hi	= upper_32_bits(ta_bin_mc);
1169 	cmd->cmd.cmd_load_ta.app_len		= context->bin_desc.size_bytes;
1170 
1171 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1172 		lower_32_bits(context->mem_context.shared_mc_addr);
1173 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1174 		upper_32_bits(context->mem_context.shared_mc_addr);
1175 	cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1176 }
1177 
1178 int psp_ta_init_shared_buf(struct psp_context *psp,
1179 				  struct ta_mem_context *mem_ctx)
1180 {
1181 	/*
1182 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1183 	 * physical) for ta to host memory
1184 	 */
1185 	return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1186 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1187 				      AMDGPU_GEM_DOMAIN_GTT,
1188 				      &mem_ctx->shared_bo,
1189 				      &mem_ctx->shared_mc_addr,
1190 				      &mem_ctx->shared_buf);
1191 }
1192 
1193 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1194 				       uint32_t ta_cmd_id,
1195 				       uint32_t session_id)
1196 {
1197 	cmd->cmd_id				= GFX_CMD_ID_INVOKE_CMD;
1198 	cmd->cmd.cmd_invoke_cmd.session_id	= session_id;
1199 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id	= ta_cmd_id;
1200 }
1201 
1202 int psp_ta_invoke(struct psp_context *psp,
1203 		  uint32_t ta_cmd_id,
1204 		  struct ta_context *context)
1205 {
1206 	int ret;
1207 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1208 
1209 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1210 
1211 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1212 				 psp->fence_buf_mc_addr);
1213 
1214 	context->resp_status = cmd->resp.status;
1215 
1216 	release_psp_cmd_buf(psp);
1217 
1218 	return ret;
1219 }
1220 
1221 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1222 {
1223 	int ret;
1224 	struct psp_gfx_cmd_resp *cmd;
1225 
1226 	cmd = acquire_psp_cmd_buf(psp);
1227 
1228 	psp_copy_fw(psp, context->bin_desc.start_addr,
1229 		    context->bin_desc.size_bytes);
1230 
1231 	psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1232 
1233 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1234 				 psp->fence_buf_mc_addr);
1235 
1236 	context->resp_status = cmd->resp.status;
1237 
1238 	if (!ret)
1239 		context->session_id = cmd->resp.session_id;
1240 
1241 	release_psp_cmd_buf(psp);
1242 
1243 	return ret;
1244 }
1245 
1246 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1247 {
1248 	return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1249 }
1250 
1251 int psp_xgmi_terminate(struct psp_context *psp)
1252 {
1253 	int ret;
1254 	struct amdgpu_device *adev = psp->adev;
1255 
1256 	/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1257 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1258 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1259 	     adev->gmc.xgmi.connected_to_cpu))
1260 		return 0;
1261 
1262 	if (!psp->xgmi_context.context.initialized)
1263 		return 0;
1264 
1265 	ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1266 
1267 	psp->xgmi_context.context.initialized = false;
1268 
1269 	return ret;
1270 }
1271 
1272 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1273 {
1274 	struct ta_xgmi_shared_memory *xgmi_cmd;
1275 	int ret;
1276 
1277 	if (!psp->ta_fw ||
1278 	    !psp->xgmi_context.context.bin_desc.size_bytes ||
1279 	    !psp->xgmi_context.context.bin_desc.start_addr)
1280 		return -ENOENT;
1281 
1282 	if (!load_ta)
1283 		goto invoke;
1284 
1285 	psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1286 	psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1287 
1288 	if (!psp->xgmi_context.context.mem_context.shared_buf) {
1289 		ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1290 		if (ret)
1291 			return ret;
1292 	}
1293 
1294 	/* Load XGMI TA */
1295 	ret = psp_ta_load(psp, &psp->xgmi_context.context);
1296 	if (!ret)
1297 		psp->xgmi_context.context.initialized = true;
1298 	else
1299 		return ret;
1300 
1301 invoke:
1302 	/* Initialize XGMI session */
1303 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1304 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1305 	xgmi_cmd->flag_extend_link_record = set_extended_data;
1306 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1307 
1308 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1309 	/* note down the capbility flag for XGMI TA */
1310 	psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1311 
1312 	return ret;
1313 }
1314 
1315 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1316 {
1317 	struct ta_xgmi_shared_memory *xgmi_cmd;
1318 	int ret;
1319 
1320 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1321 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1322 
1323 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1324 
1325 	/* Invoke xgmi ta to get hive id */
1326 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1327 	if (ret)
1328 		return ret;
1329 
1330 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1331 
1332 	return 0;
1333 }
1334 
1335 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1336 {
1337 	struct ta_xgmi_shared_memory *xgmi_cmd;
1338 	int ret;
1339 
1340 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1341 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1342 
1343 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1344 
1345 	/* Invoke xgmi ta to get the node id */
1346 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1347 	if (ret)
1348 		return ret;
1349 
1350 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1351 
1352 	return 0;
1353 }
1354 
1355 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1356 {
1357 	return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1358 			IP_VERSION(13, 0, 2) &&
1359 		psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1360 	       amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1361 		       IP_VERSION(13, 0, 6);
1362 }
1363 
1364 /*
1365  * Chips that support extended topology information require the driver to
1366  * reflect topology information in the opposite direction.  This is
1367  * because the TA has already exceeded its link record limit and if the
1368  * TA holds bi-directional information, the driver would have to do
1369  * multiple fetches instead of just two.
1370  */
1371 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1372 					struct psp_xgmi_node_info node_info)
1373 {
1374 	struct amdgpu_device *mirror_adev;
1375 	struct amdgpu_hive_info *hive;
1376 	uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1377 	uint64_t dst_node_id = node_info.node_id;
1378 	uint8_t dst_num_hops = node_info.num_hops;
1379 	uint8_t dst_num_links = node_info.num_links;
1380 
1381 	hive = amdgpu_get_xgmi_hive(psp->adev);
1382 	if (WARN_ON(!hive))
1383 		return;
1384 
1385 	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1386 		struct psp_xgmi_topology_info *mirror_top_info;
1387 		int j;
1388 
1389 		if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1390 			continue;
1391 
1392 		mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1393 		for (j = 0; j < mirror_top_info->num_nodes; j++) {
1394 			if (mirror_top_info->nodes[j].node_id != src_node_id)
1395 				continue;
1396 
1397 			mirror_top_info->nodes[j].num_hops = dst_num_hops;
1398 			/*
1399 			 * prevent 0 num_links value re-reflection since reflection
1400 			 * criteria is based on num_hops (direct or indirect).
1401 			 *
1402 			 */
1403 			if (dst_num_links)
1404 				mirror_top_info->nodes[j].num_links = dst_num_links;
1405 
1406 			break;
1407 		}
1408 
1409 		break;
1410 	}
1411 
1412 	amdgpu_put_xgmi_hive(hive);
1413 }
1414 
1415 int psp_xgmi_get_topology_info(struct psp_context *psp,
1416 			       int number_devices,
1417 			       struct psp_xgmi_topology_info *topology,
1418 			       bool get_extended_data)
1419 {
1420 	struct ta_xgmi_shared_memory *xgmi_cmd;
1421 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1422 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1423 	int i;
1424 	int ret;
1425 
1426 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1427 		return -EINVAL;
1428 
1429 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1430 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1431 	xgmi_cmd->flag_extend_link_record = get_extended_data;
1432 
1433 	/* Fill in the shared memory with topology information as input */
1434 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1435 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1436 	topology_info_input->num_nodes = number_devices;
1437 
1438 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1439 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1440 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1441 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1442 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1443 	}
1444 
1445 	/* Invoke xgmi ta to get the topology information */
1446 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1447 	if (ret)
1448 		return ret;
1449 
1450 	/* Read the output topology information from the shared memory */
1451 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1452 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1453 	for (i = 0; i < topology->num_nodes; i++) {
1454 		/* extended data will either be 0 or equal to non-extended data */
1455 		if (topology_info_output->nodes[i].num_hops)
1456 			topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1457 
1458 		/* non-extended data gets everything here so no need to update */
1459 		if (!get_extended_data) {
1460 			topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1461 			topology->nodes[i].is_sharing_enabled =
1462 					topology_info_output->nodes[i].is_sharing_enabled;
1463 			topology->nodes[i].sdma_engine =
1464 					topology_info_output->nodes[i].sdma_engine;
1465 		}
1466 
1467 	}
1468 
1469 	/* Invoke xgmi ta again to get the link information */
1470 	if (psp_xgmi_peer_link_info_supported(psp)) {
1471 		struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1472 		struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1473 		bool requires_reflection =
1474 			(psp->xgmi_context.supports_extended_data &&
1475 			 get_extended_data) ||
1476 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1477 				IP_VERSION(13, 0, 6) ||
1478 			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1479 				IP_VERSION(13, 0, 14);
1480 		bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1481 				psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1482 
1483 		/* popluate the shared output buffer rather than the cmd input buffer
1484 		 * with node_ids as the input for GET_PEER_LINKS command execution.
1485 		 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1486 		 * The same requirement for GET_EXTEND_PEER_LINKS command.
1487 		 */
1488 		if (ta_port_num_support) {
1489 			link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1490 
1491 			for (i = 0; i < topology->num_nodes; i++)
1492 				link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1493 
1494 			link_extend_info_output->num_nodes = topology->num_nodes;
1495 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1496 		} else {
1497 			link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1498 
1499 			for (i = 0; i < topology->num_nodes; i++)
1500 				link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1501 
1502 			link_info_output->num_nodes = topology->num_nodes;
1503 			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1504 		}
1505 
1506 		ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1507 		if (ret)
1508 			return ret;
1509 
1510 		for (i = 0; i < topology->num_nodes; i++) {
1511 			uint8_t node_num_links = ta_port_num_support ?
1512 				link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1513 			/* accumulate num_links on extended data */
1514 			if (get_extended_data) {
1515 				topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1516 			} else {
1517 				topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1518 								topology->nodes[i].num_links : node_num_links;
1519 			}
1520 			/* popluate the connected port num info if supported and available */
1521 			if (ta_port_num_support && topology->nodes[i].num_links) {
1522 				memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1523 				       sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1524 			}
1525 
1526 			/* reflect the topology information for bi-directionality */
1527 			if (requires_reflection && topology->nodes[i].num_hops)
1528 				psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1529 		}
1530 	}
1531 
1532 	return 0;
1533 }
1534 
1535 int psp_xgmi_set_topology_info(struct psp_context *psp,
1536 			       int number_devices,
1537 			       struct psp_xgmi_topology_info *topology)
1538 {
1539 	struct ta_xgmi_shared_memory *xgmi_cmd;
1540 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1541 	int i;
1542 
1543 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1544 		return -EINVAL;
1545 
1546 	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1547 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1548 
1549 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1550 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1551 	topology_info_input->num_nodes = number_devices;
1552 
1553 	for (i = 0; i < topology_info_input->num_nodes; i++) {
1554 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1555 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1556 		topology_info_input->nodes[i].is_sharing_enabled = 1;
1557 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1558 	}
1559 
1560 	/* Invoke xgmi ta to set topology information */
1561 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1562 }
1563 
1564 // ras begin
1565 static void psp_ras_ta_check_status(struct psp_context *psp)
1566 {
1567 	struct ta_ras_shared_memory *ras_cmd =
1568 		(struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1569 
1570 	switch (ras_cmd->ras_status) {
1571 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1572 		dev_warn(psp->adev->dev,
1573 			 "RAS WARNING: cmd failed due to unsupported ip\n");
1574 		break;
1575 	case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1576 		dev_warn(psp->adev->dev,
1577 			 "RAS WARNING: cmd failed due to unsupported error injection\n");
1578 		break;
1579 	case TA_RAS_STATUS__SUCCESS:
1580 		break;
1581 	case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1582 		if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1583 			dev_warn(psp->adev->dev,
1584 				 "RAS WARNING: Inject error to critical region is not allowed\n");
1585 		break;
1586 	default:
1587 		dev_warn(psp->adev->dev,
1588 			 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1589 		break;
1590 	}
1591 }
1592 
1593 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1594 {
1595 	struct ta_ras_shared_memory *ras_cmd;
1596 	int ret;
1597 
1598 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1599 
1600 	/*
1601 	 * TODO: bypass the loading in sriov for now
1602 	 */
1603 	if (amdgpu_sriov_vf(psp->adev))
1604 		return 0;
1605 
1606 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1607 
1608 	if (amdgpu_ras_intr_triggered())
1609 		return ret;
1610 
1611 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1612 		dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1613 		return -EINVAL;
1614 	}
1615 
1616 	if (!ret) {
1617 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1618 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1619 
1620 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1621 		} else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1622 			dev_warn(psp->adev->dev,
1623 				 "RAS internal register access blocked\n");
1624 
1625 		psp_ras_ta_check_status(psp);
1626 	}
1627 
1628 	return ret;
1629 }
1630 
1631 int psp_ras_enable_features(struct psp_context *psp,
1632 		union ta_ras_cmd_input *info, bool enable)
1633 {
1634 	struct ta_ras_shared_memory *ras_cmd;
1635 	int ret;
1636 
1637 	if (!psp->ras_context.context.initialized)
1638 		return -EINVAL;
1639 
1640 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1641 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1642 
1643 	if (enable)
1644 		ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
1645 	else
1646 		ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
1647 
1648 	ras_cmd->ras_in_message = *info;
1649 
1650 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1651 	if (ret)
1652 		return -EINVAL;
1653 
1654 	return 0;
1655 }
1656 
1657 int psp_ras_terminate(struct psp_context *psp)
1658 {
1659 	int ret;
1660 
1661 	/*
1662 	 * TODO: bypass the terminate in sriov for now
1663 	 */
1664 	if (amdgpu_sriov_vf(psp->adev))
1665 		return 0;
1666 
1667 	if (!psp->ras_context.context.initialized)
1668 		return 0;
1669 
1670 	ret = psp_ta_unload(psp, &psp->ras_context.context);
1671 
1672 	psp->ras_context.context.initialized = false;
1673 
1674 	return ret;
1675 }
1676 
1677 int psp_ras_initialize(struct psp_context *psp)
1678 {
1679 	int ret;
1680 	uint32_t boot_cfg = 0xFF;
1681 	struct amdgpu_device *adev = psp->adev;
1682 	struct ta_ras_shared_memory *ras_cmd;
1683 
1684 	/*
1685 	 * TODO: bypass the initialize in sriov for now
1686 	 */
1687 	if (amdgpu_sriov_vf(adev))
1688 		return 0;
1689 
1690 	if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1691 	    !adev->psp.ras_context.context.bin_desc.start_addr) {
1692 		dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1693 		return 0;
1694 	}
1695 
1696 	if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1697 		/* query GECC enablement status from boot config
1698 		 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1699 		 */
1700 		ret = psp_boot_config_get(adev, &boot_cfg);
1701 		if (ret)
1702 			dev_warn(adev->dev, "PSP get boot config failed\n");
1703 
1704 		if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
1705 			if (!boot_cfg) {
1706 				dev_info(adev->dev, "GECC is disabled\n");
1707 			} else {
1708 				/* disable GECC in next boot cycle if ras is
1709 				 * disabled by module parameter amdgpu_ras_enable
1710 				 * and/or amdgpu_ras_mask, or boot_config_get call
1711 				 * is failed
1712 				 */
1713 				ret = psp_boot_config_set(adev, 0);
1714 				if (ret)
1715 					dev_warn(adev->dev, "PSP set boot config failed\n");
1716 				else
1717 					dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1718 			}
1719 		} else {
1720 			if (boot_cfg == 1) {
1721 				dev_info(adev->dev, "GECC is enabled\n");
1722 			} else {
1723 				/* enable GECC in next boot cycle if it is disabled
1724 				 * in boot config, or force enable GECC if failed to
1725 				 * get boot configuration
1726 				 */
1727 				ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1728 				if (ret)
1729 					dev_warn(adev->dev, "PSP set boot config failed\n");
1730 				else
1731 					dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1732 			}
1733 		}
1734 	}
1735 
1736 	psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1737 	psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1738 
1739 	if (!psp->ras_context.context.mem_context.shared_buf) {
1740 		ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1741 		if (ret)
1742 			return ret;
1743 	}
1744 
1745 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1746 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1747 
1748 	if (amdgpu_ras_is_poison_mode_supported(adev))
1749 		ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1750 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1751 		ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1752 	ras_cmd->ras_in_message.init_flags.xcc_mask =
1753 		adev->gfx.xcc_mask;
1754 	ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1755 
1756 	ret = psp_ta_load(psp, &psp->ras_context.context);
1757 
1758 	if (!ret && !ras_cmd->ras_status)
1759 		psp->ras_context.context.initialized = true;
1760 	else {
1761 		if (ras_cmd->ras_status)
1762 			dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1763 
1764 		/* fail to load RAS TA */
1765 		psp->ras_context.context.initialized = false;
1766 	}
1767 
1768 	return ret;
1769 }
1770 
1771 int psp_ras_trigger_error(struct psp_context *psp,
1772 			  struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1773 {
1774 	struct ta_ras_shared_memory *ras_cmd;
1775 	struct amdgpu_device *adev = psp->adev;
1776 	int ret;
1777 	uint32_t dev_mask;
1778 
1779 	if (!psp->ras_context.context.initialized)
1780 		return -EINVAL;
1781 
1782 	switch (info->block_id) {
1783 	case TA_RAS_BLOCK__GFX:
1784 		dev_mask = GET_MASK(GC, instance_mask);
1785 		break;
1786 	case TA_RAS_BLOCK__SDMA:
1787 		dev_mask = GET_MASK(SDMA0, instance_mask);
1788 		break;
1789 	case TA_RAS_BLOCK__VCN:
1790 	case TA_RAS_BLOCK__JPEG:
1791 		dev_mask = GET_MASK(VCN, instance_mask);
1792 		break;
1793 	default:
1794 		dev_mask = instance_mask;
1795 		break;
1796 	}
1797 
1798 	/* reuse sub_block_index for backward compatibility */
1799 	dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1800 	dev_mask &= AMDGPU_RAS_INST_MASK;
1801 	info->sub_block_index |= dev_mask;
1802 
1803 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1804 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1805 
1806 	ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
1807 	ras_cmd->ras_in_message.trigger_error = *info;
1808 
1809 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1810 	if (ret)
1811 		return -EINVAL;
1812 
1813 	/* If err_event_athub occurs error inject was successful, however
1814 	 *  return status from TA is no long reliable
1815 	 */
1816 	if (amdgpu_ras_intr_triggered())
1817 		return 0;
1818 
1819 	if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1820 		return -EACCES;
1821 	else if (ras_cmd->ras_status)
1822 		return -EINVAL;
1823 
1824 	return 0;
1825 }
1826 
1827 int psp_ras_query_address(struct psp_context *psp,
1828 			  struct ta_ras_query_address_input *addr_in,
1829 			  struct ta_ras_query_address_output *addr_out)
1830 {
1831 	struct ta_ras_shared_memory *ras_cmd;
1832 	int ret;
1833 
1834 	if (!psp->ras_context.context.initialized)
1835 		return -EINVAL;
1836 
1837 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1838 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1839 
1840 	ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS;
1841 	ras_cmd->ras_in_message.address = *addr_in;
1842 
1843 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1844 	if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1845 		return -EINVAL;
1846 
1847 	*addr_out = ras_cmd->ras_out_message.address;
1848 
1849 	return 0;
1850 }
1851 // ras end
1852 
1853 // HDCP start
1854 static int psp_hdcp_initialize(struct psp_context *psp)
1855 {
1856 	int ret;
1857 
1858 	/*
1859 	 * TODO: bypass the initialize in sriov for now
1860 	 */
1861 	if (amdgpu_sriov_vf(psp->adev))
1862 		return 0;
1863 
1864 	/* bypass hdcp initialization if dmu is harvested */
1865 	if (!amdgpu_device_has_display_hardware(psp->adev))
1866 		return 0;
1867 
1868 	if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1869 	    !psp->hdcp_context.context.bin_desc.start_addr) {
1870 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1871 		return 0;
1872 	}
1873 
1874 	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1875 	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1876 
1877 	if (!psp->hdcp_context.context.mem_context.shared_buf) {
1878 		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1879 		if (ret)
1880 			return ret;
1881 	}
1882 
1883 	ret = psp_ta_load(psp, &psp->hdcp_context.context);
1884 	if (!ret) {
1885 		psp->hdcp_context.context.initialized = true;
1886 		mutex_init(&psp->hdcp_context.mutex);
1887 	}
1888 
1889 	return ret;
1890 }
1891 
1892 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1893 {
1894 	/*
1895 	 * TODO: bypass the loading in sriov for now
1896 	 */
1897 	if (amdgpu_sriov_vf(psp->adev))
1898 		return 0;
1899 
1900 	if (!psp->hdcp_context.context.initialized)
1901 		return 0;
1902 
1903 	return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
1904 }
1905 
1906 static int psp_hdcp_terminate(struct psp_context *psp)
1907 {
1908 	int ret;
1909 
1910 	/*
1911 	 * TODO: bypass the terminate in sriov for now
1912 	 */
1913 	if (amdgpu_sriov_vf(psp->adev))
1914 		return 0;
1915 
1916 	if (!psp->hdcp_context.context.initialized)
1917 		return 0;
1918 
1919 	ret = psp_ta_unload(psp, &psp->hdcp_context.context);
1920 
1921 	psp->hdcp_context.context.initialized = false;
1922 
1923 	return ret;
1924 }
1925 // HDCP end
1926 
1927 // DTM start
1928 static int psp_dtm_initialize(struct psp_context *psp)
1929 {
1930 	int ret;
1931 
1932 	/*
1933 	 * TODO: bypass the initialize in sriov for now
1934 	 */
1935 	if (amdgpu_sriov_vf(psp->adev))
1936 		return 0;
1937 
1938 	/* bypass dtm initialization if dmu is harvested */
1939 	if (!amdgpu_device_has_display_hardware(psp->adev))
1940 		return 0;
1941 
1942 	if (!psp->dtm_context.context.bin_desc.size_bytes ||
1943 	    !psp->dtm_context.context.bin_desc.start_addr) {
1944 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1945 		return 0;
1946 	}
1947 
1948 	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
1949 	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1950 
1951 	if (!psp->dtm_context.context.mem_context.shared_buf) {
1952 		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
1953 		if (ret)
1954 			return ret;
1955 	}
1956 
1957 	ret = psp_ta_load(psp, &psp->dtm_context.context);
1958 	if (!ret) {
1959 		psp->dtm_context.context.initialized = true;
1960 		mutex_init(&psp->dtm_context.mutex);
1961 	}
1962 
1963 	return ret;
1964 }
1965 
1966 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1967 {
1968 	/*
1969 	 * TODO: bypass the loading in sriov for now
1970 	 */
1971 	if (amdgpu_sriov_vf(psp->adev))
1972 		return 0;
1973 
1974 	if (!psp->dtm_context.context.initialized)
1975 		return 0;
1976 
1977 	return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
1978 }
1979 
1980 static int psp_dtm_terminate(struct psp_context *psp)
1981 {
1982 	int ret;
1983 
1984 	/*
1985 	 * TODO: bypass the terminate in sriov for now
1986 	 */
1987 	if (amdgpu_sriov_vf(psp->adev))
1988 		return 0;
1989 
1990 	if (!psp->dtm_context.context.initialized)
1991 		return 0;
1992 
1993 	ret = psp_ta_unload(psp, &psp->dtm_context.context);
1994 
1995 	psp->dtm_context.context.initialized = false;
1996 
1997 	return ret;
1998 }
1999 // DTM end
2000 
2001 // RAP start
2002 static int psp_rap_initialize(struct psp_context *psp)
2003 {
2004 	int ret;
2005 	enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2006 
2007 	/*
2008 	 * TODO: bypass the initialize in sriov for now
2009 	 */
2010 	if (amdgpu_sriov_vf(psp->adev))
2011 		return 0;
2012 
2013 	if (!psp->rap_context.context.bin_desc.size_bytes ||
2014 	    !psp->rap_context.context.bin_desc.start_addr) {
2015 		dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2016 		return 0;
2017 	}
2018 
2019 	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2020 	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2021 
2022 	if (!psp->rap_context.context.mem_context.shared_buf) {
2023 		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2024 		if (ret)
2025 			return ret;
2026 	}
2027 
2028 	ret = psp_ta_load(psp, &psp->rap_context.context);
2029 	if (!ret) {
2030 		psp->rap_context.context.initialized = true;
2031 		mutex_init(&psp->rap_context.mutex);
2032 	} else
2033 		return ret;
2034 
2035 	ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2036 	if (ret || status != TA_RAP_STATUS__SUCCESS) {
2037 		psp_rap_terminate(psp);
2038 		/* free rap shared memory */
2039 		psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2040 
2041 		dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2042 			 ret, status);
2043 
2044 		return ret;
2045 	}
2046 
2047 	return 0;
2048 }
2049 
2050 static int psp_rap_terminate(struct psp_context *psp)
2051 {
2052 	int ret;
2053 
2054 	if (!psp->rap_context.context.initialized)
2055 		return 0;
2056 
2057 	ret = psp_ta_unload(psp, &psp->rap_context.context);
2058 
2059 	psp->rap_context.context.initialized = false;
2060 
2061 	return ret;
2062 }
2063 
2064 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2065 {
2066 	struct ta_rap_shared_memory *rap_cmd;
2067 	int ret = 0;
2068 
2069 	if (!psp->rap_context.context.initialized)
2070 		return 0;
2071 
2072 	if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2073 	    ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2074 		return -EINVAL;
2075 
2076 	mutex_lock(&psp->rap_context.mutex);
2077 
2078 	rap_cmd = (struct ta_rap_shared_memory *)
2079 		  psp->rap_context.context.mem_context.shared_buf;
2080 	memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2081 
2082 	rap_cmd->cmd_id = ta_cmd_id;
2083 	rap_cmd->validation_method_id = METHOD_A;
2084 
2085 	ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2086 	if (ret)
2087 		goto out_unlock;
2088 
2089 	if (status)
2090 		*status = rap_cmd->rap_status;
2091 
2092 out_unlock:
2093 	mutex_unlock(&psp->rap_context.mutex);
2094 
2095 	return ret;
2096 }
2097 // RAP end
2098 
2099 /* securedisplay start */
2100 static int psp_securedisplay_initialize(struct psp_context *psp)
2101 {
2102 	int ret;
2103 	struct ta_securedisplay_cmd *securedisplay_cmd;
2104 
2105 	/*
2106 	 * TODO: bypass the initialize in sriov for now
2107 	 */
2108 	if (amdgpu_sriov_vf(psp->adev))
2109 		return 0;
2110 
2111 	/* bypass securedisplay initialization if dmu is harvested */
2112 	if (!amdgpu_device_has_display_hardware(psp->adev))
2113 		return 0;
2114 
2115 	if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2116 	    !psp->securedisplay_context.context.bin_desc.start_addr) {
2117 		dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
2118 		return 0;
2119 	}
2120 
2121 	psp->securedisplay_context.context.mem_context.shared_mem_size =
2122 		PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2123 	psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2124 
2125 	if (!psp->securedisplay_context.context.initialized) {
2126 		ret = psp_ta_init_shared_buf(psp,
2127 					     &psp->securedisplay_context.context.mem_context);
2128 		if (ret)
2129 			return ret;
2130 	}
2131 
2132 	ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2133 	if (!ret) {
2134 		psp->securedisplay_context.context.initialized = true;
2135 		mutex_init(&psp->securedisplay_context.mutex);
2136 	} else
2137 		return ret;
2138 
2139 	mutex_lock(&psp->securedisplay_context.mutex);
2140 
2141 	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2142 			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2143 
2144 	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2145 
2146 	mutex_unlock(&psp->securedisplay_context.mutex);
2147 
2148 	if (ret) {
2149 		psp_securedisplay_terminate(psp);
2150 		/* free securedisplay shared memory */
2151 		psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2152 		dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2153 		return -EINVAL;
2154 	}
2155 
2156 	if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2157 		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2158 		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2159 			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2160 		/* don't try again */
2161 		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2162 	}
2163 
2164 	return 0;
2165 }
2166 
2167 static int psp_securedisplay_terminate(struct psp_context *psp)
2168 {
2169 	int ret;
2170 
2171 	/*
2172 	 * TODO:bypass the terminate in sriov for now
2173 	 */
2174 	if (amdgpu_sriov_vf(psp->adev))
2175 		return 0;
2176 
2177 	if (!psp->securedisplay_context.context.initialized)
2178 		return 0;
2179 
2180 	ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2181 
2182 	psp->securedisplay_context.context.initialized = false;
2183 
2184 	return ret;
2185 }
2186 
2187 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2188 {
2189 	int ret;
2190 
2191 	if (!psp->securedisplay_context.context.initialized)
2192 		return -EINVAL;
2193 
2194 	if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2195 	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
2196 		return -EINVAL;
2197 
2198 	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2199 
2200 	return ret;
2201 }
2202 /* SECUREDISPLAY end */
2203 
2204 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2205 {
2206 	struct psp_context *psp = &adev->psp;
2207 	int ret = 0;
2208 
2209 	if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2210 		ret = psp->funcs->wait_for_bootloader(psp);
2211 
2212 	return ret;
2213 }
2214 
2215 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2216 {
2217 	if (psp->funcs &&
2218 	    psp->funcs->get_ras_capability) {
2219 		return psp->funcs->get_ras_capability(psp);
2220 	} else {
2221 		return false;
2222 	}
2223 }
2224 
2225 static int psp_hw_start(struct psp_context *psp)
2226 {
2227 	struct amdgpu_device *adev = psp->adev;
2228 	int ret;
2229 
2230 	if (!amdgpu_sriov_vf(adev)) {
2231 		if ((is_psp_fw_valid(psp->kdb)) &&
2232 		    (psp->funcs->bootloader_load_kdb != NULL)) {
2233 			ret = psp_bootloader_load_kdb(psp);
2234 			if (ret) {
2235 				dev_err(adev->dev, "PSP load kdb failed!\n");
2236 				return ret;
2237 			}
2238 		}
2239 
2240 		if ((is_psp_fw_valid(psp->spl)) &&
2241 		    (psp->funcs->bootloader_load_spl != NULL)) {
2242 			ret = psp_bootloader_load_spl(psp);
2243 			if (ret) {
2244 				dev_err(adev->dev, "PSP load spl failed!\n");
2245 				return ret;
2246 			}
2247 		}
2248 
2249 		if ((is_psp_fw_valid(psp->sys)) &&
2250 		    (psp->funcs->bootloader_load_sysdrv != NULL)) {
2251 			ret = psp_bootloader_load_sysdrv(psp);
2252 			if (ret) {
2253 				dev_err(adev->dev, "PSP load sys drv failed!\n");
2254 				return ret;
2255 			}
2256 		}
2257 
2258 		if ((is_psp_fw_valid(psp->soc_drv)) &&
2259 		    (psp->funcs->bootloader_load_soc_drv != NULL)) {
2260 			ret = psp_bootloader_load_soc_drv(psp);
2261 			if (ret) {
2262 				dev_err(adev->dev, "PSP load soc drv failed!\n");
2263 				return ret;
2264 			}
2265 		}
2266 
2267 		if ((is_psp_fw_valid(psp->intf_drv)) &&
2268 		    (psp->funcs->bootloader_load_intf_drv != NULL)) {
2269 			ret = psp_bootloader_load_intf_drv(psp);
2270 			if (ret) {
2271 				dev_err(adev->dev, "PSP load intf drv failed!\n");
2272 				return ret;
2273 			}
2274 		}
2275 
2276 		if ((is_psp_fw_valid(psp->dbg_drv)) &&
2277 		    (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2278 			ret = psp_bootloader_load_dbg_drv(psp);
2279 			if (ret) {
2280 				dev_err(adev->dev, "PSP load dbg drv failed!\n");
2281 				return ret;
2282 			}
2283 		}
2284 
2285 		if ((is_psp_fw_valid(psp->ras_drv)) &&
2286 		    (psp->funcs->bootloader_load_ras_drv != NULL)) {
2287 			ret = psp_bootloader_load_ras_drv(psp);
2288 			if (ret) {
2289 				dev_err(adev->dev, "PSP load ras_drv failed!\n");
2290 				return ret;
2291 			}
2292 		}
2293 
2294 		if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2295 		    (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2296 			ret = psp_bootloader_load_ipkeymgr_drv(psp);
2297 			if (ret) {
2298 				dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2299 				return ret;
2300 			}
2301 		}
2302 
2303 		if ((is_psp_fw_valid(psp->sos)) &&
2304 		    (psp->funcs->bootloader_load_sos != NULL)) {
2305 			ret = psp_bootloader_load_sos(psp);
2306 			if (ret) {
2307 				dev_err(adev->dev, "PSP load sos failed!\n");
2308 				return ret;
2309 			}
2310 		}
2311 	}
2312 
2313 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2314 	if (ret) {
2315 		dev_err(adev->dev, "PSP create ring failed!\n");
2316 		return ret;
2317 	}
2318 
2319 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2320 		goto skip_pin_bo;
2321 
2322 	if (!psp->boot_time_tmr || psp->autoload_supported) {
2323 		ret = psp_tmr_init(psp);
2324 		if (ret) {
2325 			dev_err(adev->dev, "PSP tmr init failed!\n");
2326 			return ret;
2327 		}
2328 	}
2329 
2330 skip_pin_bo:
2331 	/*
2332 	 * For ASICs with DF Cstate management centralized
2333 	 * to PMFW, TMR setup should be performed after PMFW
2334 	 * loaded and before other non-psp firmware loaded.
2335 	 */
2336 	if (psp->pmfw_centralized_cstate_management) {
2337 		ret = psp_load_smu_fw(psp);
2338 		if (ret)
2339 			return ret;
2340 	}
2341 
2342 	if (!psp->boot_time_tmr || !psp->autoload_supported) {
2343 		ret = psp_tmr_load(psp);
2344 		if (ret) {
2345 			dev_err(adev->dev, "PSP load tmr failed!\n");
2346 			return ret;
2347 		}
2348 	}
2349 
2350 	return 0;
2351 }
2352 
2353 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2354 			   enum psp_gfx_fw_type *type)
2355 {
2356 	switch (ucode->ucode_id) {
2357 	case AMDGPU_UCODE_ID_CAP:
2358 		*type = GFX_FW_TYPE_CAP;
2359 		break;
2360 	case AMDGPU_UCODE_ID_SDMA0:
2361 		*type = GFX_FW_TYPE_SDMA0;
2362 		break;
2363 	case AMDGPU_UCODE_ID_SDMA1:
2364 		*type = GFX_FW_TYPE_SDMA1;
2365 		break;
2366 	case AMDGPU_UCODE_ID_SDMA2:
2367 		*type = GFX_FW_TYPE_SDMA2;
2368 		break;
2369 	case AMDGPU_UCODE_ID_SDMA3:
2370 		*type = GFX_FW_TYPE_SDMA3;
2371 		break;
2372 	case AMDGPU_UCODE_ID_SDMA4:
2373 		*type = GFX_FW_TYPE_SDMA4;
2374 		break;
2375 	case AMDGPU_UCODE_ID_SDMA5:
2376 		*type = GFX_FW_TYPE_SDMA5;
2377 		break;
2378 	case AMDGPU_UCODE_ID_SDMA6:
2379 		*type = GFX_FW_TYPE_SDMA6;
2380 		break;
2381 	case AMDGPU_UCODE_ID_SDMA7:
2382 		*type = GFX_FW_TYPE_SDMA7;
2383 		break;
2384 	case AMDGPU_UCODE_ID_CP_MES:
2385 		*type = GFX_FW_TYPE_CP_MES;
2386 		break;
2387 	case AMDGPU_UCODE_ID_CP_MES_DATA:
2388 		*type = GFX_FW_TYPE_MES_STACK;
2389 		break;
2390 	case AMDGPU_UCODE_ID_CP_MES1:
2391 		*type = GFX_FW_TYPE_CP_MES_KIQ;
2392 		break;
2393 	case AMDGPU_UCODE_ID_CP_MES1_DATA:
2394 		*type = GFX_FW_TYPE_MES_KIQ_STACK;
2395 		break;
2396 	case AMDGPU_UCODE_ID_CP_CE:
2397 		*type = GFX_FW_TYPE_CP_CE;
2398 		break;
2399 	case AMDGPU_UCODE_ID_CP_PFP:
2400 		*type = GFX_FW_TYPE_CP_PFP;
2401 		break;
2402 	case AMDGPU_UCODE_ID_CP_ME:
2403 		*type = GFX_FW_TYPE_CP_ME;
2404 		break;
2405 	case AMDGPU_UCODE_ID_CP_MEC1:
2406 		*type = GFX_FW_TYPE_CP_MEC;
2407 		break;
2408 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
2409 		*type = GFX_FW_TYPE_CP_MEC_ME1;
2410 		break;
2411 	case AMDGPU_UCODE_ID_CP_MEC2:
2412 		*type = GFX_FW_TYPE_CP_MEC;
2413 		break;
2414 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
2415 		*type = GFX_FW_TYPE_CP_MEC_ME2;
2416 		break;
2417 	case AMDGPU_UCODE_ID_RLC_P:
2418 		*type = GFX_FW_TYPE_RLC_P;
2419 		break;
2420 	case AMDGPU_UCODE_ID_RLC_V:
2421 		*type = GFX_FW_TYPE_RLC_V;
2422 		break;
2423 	case AMDGPU_UCODE_ID_RLC_G:
2424 		*type = GFX_FW_TYPE_RLC_G;
2425 		break;
2426 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2427 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2428 		break;
2429 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2430 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2431 		break;
2432 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2433 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2434 		break;
2435 	case AMDGPU_UCODE_ID_RLC_IRAM:
2436 		*type = GFX_FW_TYPE_RLC_IRAM;
2437 		break;
2438 	case AMDGPU_UCODE_ID_RLC_DRAM:
2439 		*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2440 		break;
2441 	case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2442 		*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2443 		break;
2444 	case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2445 		*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2446 		break;
2447 	case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2448 		*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2449 		break;
2450 	case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2451 		*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2452 		break;
2453 	case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2454 		*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2455 		break;
2456 	case AMDGPU_UCODE_ID_SMC:
2457 		*type = GFX_FW_TYPE_SMU;
2458 		break;
2459 	case AMDGPU_UCODE_ID_PPTABLE:
2460 		*type = GFX_FW_TYPE_PPTABLE;
2461 		break;
2462 	case AMDGPU_UCODE_ID_UVD:
2463 		*type = GFX_FW_TYPE_UVD;
2464 		break;
2465 	case AMDGPU_UCODE_ID_UVD1:
2466 		*type = GFX_FW_TYPE_UVD1;
2467 		break;
2468 	case AMDGPU_UCODE_ID_VCE:
2469 		*type = GFX_FW_TYPE_VCE;
2470 		break;
2471 	case AMDGPU_UCODE_ID_VCN:
2472 		*type = GFX_FW_TYPE_VCN;
2473 		break;
2474 	case AMDGPU_UCODE_ID_VCN1:
2475 		*type = GFX_FW_TYPE_VCN1;
2476 		break;
2477 	case AMDGPU_UCODE_ID_DMCU_ERAM:
2478 		*type = GFX_FW_TYPE_DMCU_ERAM;
2479 		break;
2480 	case AMDGPU_UCODE_ID_DMCU_INTV:
2481 		*type = GFX_FW_TYPE_DMCU_ISR;
2482 		break;
2483 	case AMDGPU_UCODE_ID_VCN0_RAM:
2484 		*type = GFX_FW_TYPE_VCN0_RAM;
2485 		break;
2486 	case AMDGPU_UCODE_ID_VCN1_RAM:
2487 		*type = GFX_FW_TYPE_VCN1_RAM;
2488 		break;
2489 	case AMDGPU_UCODE_ID_DMCUB:
2490 		*type = GFX_FW_TYPE_DMUB;
2491 		break;
2492 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2493 	case AMDGPU_UCODE_ID_SDMA_RS64:
2494 		*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2495 		break;
2496 	case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2497 		*type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2498 		break;
2499 	case AMDGPU_UCODE_ID_IMU_I:
2500 		*type = GFX_FW_TYPE_IMU_I;
2501 		break;
2502 	case AMDGPU_UCODE_ID_IMU_D:
2503 		*type = GFX_FW_TYPE_IMU_D;
2504 		break;
2505 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
2506 		*type = GFX_FW_TYPE_RS64_PFP;
2507 		break;
2508 	case AMDGPU_UCODE_ID_CP_RS64_ME:
2509 		*type = GFX_FW_TYPE_RS64_ME;
2510 		break;
2511 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
2512 		*type = GFX_FW_TYPE_RS64_MEC;
2513 		break;
2514 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2515 		*type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2516 		break;
2517 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2518 		*type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2519 		break;
2520 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2521 		*type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2522 		break;
2523 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2524 		*type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2525 		break;
2526 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2527 		*type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2528 		break;
2529 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2530 		*type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2531 		break;
2532 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2533 		*type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2534 		break;
2535 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2536 		*type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2537 		break;
2538 	case AMDGPU_UCODE_ID_VPE_CTX:
2539 		*type = GFX_FW_TYPE_VPEC_FW1;
2540 		break;
2541 	case AMDGPU_UCODE_ID_VPE_CTL:
2542 		*type = GFX_FW_TYPE_VPEC_FW2;
2543 		break;
2544 	case AMDGPU_UCODE_ID_VPE:
2545 		*type = GFX_FW_TYPE_VPE;
2546 		break;
2547 	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2548 		*type = GFX_FW_TYPE_UMSCH_UCODE;
2549 		break;
2550 	case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2551 		*type = GFX_FW_TYPE_UMSCH_DATA;
2552 		break;
2553 	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2554 		*type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2555 		break;
2556 	case AMDGPU_UCODE_ID_P2S_TABLE:
2557 		*type = GFX_FW_TYPE_P2S_TABLE;
2558 		break;
2559 	case AMDGPU_UCODE_ID_JPEG_RAM:
2560 		*type = GFX_FW_TYPE_JPEG_RAM;
2561 		break;
2562 	case AMDGPU_UCODE_ID_ISP:
2563 		*type = GFX_FW_TYPE_ISP;
2564 		break;
2565 	case AMDGPU_UCODE_ID_MAXIMUM:
2566 	default:
2567 		return -EINVAL;
2568 	}
2569 
2570 	return 0;
2571 }
2572 
2573 static void psp_print_fw_hdr(struct psp_context *psp,
2574 			     struct amdgpu_firmware_info *ucode)
2575 {
2576 	struct amdgpu_device *adev = psp->adev;
2577 	struct common_firmware_header *hdr;
2578 
2579 	switch (ucode->ucode_id) {
2580 	case AMDGPU_UCODE_ID_SDMA0:
2581 	case AMDGPU_UCODE_ID_SDMA1:
2582 	case AMDGPU_UCODE_ID_SDMA2:
2583 	case AMDGPU_UCODE_ID_SDMA3:
2584 	case AMDGPU_UCODE_ID_SDMA4:
2585 	case AMDGPU_UCODE_ID_SDMA5:
2586 	case AMDGPU_UCODE_ID_SDMA6:
2587 	case AMDGPU_UCODE_ID_SDMA7:
2588 		hdr = (struct common_firmware_header *)
2589 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2590 		amdgpu_ucode_print_sdma_hdr(hdr);
2591 		break;
2592 	case AMDGPU_UCODE_ID_CP_CE:
2593 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2594 		amdgpu_ucode_print_gfx_hdr(hdr);
2595 		break;
2596 	case AMDGPU_UCODE_ID_CP_PFP:
2597 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2598 		amdgpu_ucode_print_gfx_hdr(hdr);
2599 		break;
2600 	case AMDGPU_UCODE_ID_CP_ME:
2601 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2602 		amdgpu_ucode_print_gfx_hdr(hdr);
2603 		break;
2604 	case AMDGPU_UCODE_ID_CP_MEC1:
2605 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2606 		amdgpu_ucode_print_gfx_hdr(hdr);
2607 		break;
2608 	case AMDGPU_UCODE_ID_RLC_G:
2609 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2610 		amdgpu_ucode_print_rlc_hdr(hdr);
2611 		break;
2612 	case AMDGPU_UCODE_ID_SMC:
2613 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
2614 		amdgpu_ucode_print_smc_hdr(hdr);
2615 		break;
2616 	default:
2617 		break;
2618 	}
2619 }
2620 
2621 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2622 				       struct amdgpu_firmware_info *ucode,
2623 				       struct psp_gfx_cmd_resp *cmd)
2624 {
2625 	int ret;
2626 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
2627 
2628 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2629 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2630 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2631 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2632 
2633 	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2634 	if (ret)
2635 		dev_err(psp->adev->dev, "Unknown firmware type\n");
2636 
2637 	return ret;
2638 }
2639 
2640 int psp_execute_ip_fw_load(struct psp_context *psp,
2641 			   struct amdgpu_firmware_info *ucode)
2642 {
2643 	int ret = 0;
2644 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2645 
2646 	ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2647 	if (!ret) {
2648 		ret = psp_cmd_submit_buf(psp, ucode, cmd,
2649 					 psp->fence_buf_mc_addr);
2650 	}
2651 
2652 	release_psp_cmd_buf(psp);
2653 
2654 	return ret;
2655 }
2656 
2657 static int psp_load_p2s_table(struct psp_context *psp)
2658 {
2659 	int ret;
2660 	struct amdgpu_device *adev = psp->adev;
2661 	struct amdgpu_firmware_info *ucode =
2662 		&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2663 
2664 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2665 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2666 		return 0;
2667 
2668 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2669 	    amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2670 		uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2671 								0x0036003C;
2672 		if (psp->sos.fw_version < supp_vers)
2673 			return 0;
2674 	}
2675 
2676 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2677 		return 0;
2678 
2679 	ret = psp_execute_ip_fw_load(psp, ucode);
2680 
2681 	return ret;
2682 }
2683 
2684 static int psp_load_smu_fw(struct psp_context *psp)
2685 {
2686 	int ret;
2687 	struct amdgpu_device *adev = psp->adev;
2688 	struct amdgpu_firmware_info *ucode =
2689 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2690 	struct amdgpu_ras *ras = psp->ras_context.ras;
2691 
2692 	/*
2693 	 * Skip SMU FW reloading in case of using BACO for runpm only,
2694 	 * as SMU is always alive.
2695 	 */
2696 	if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2697 				(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2698 		return 0;
2699 
2700 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2701 		return 0;
2702 
2703 	if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2704 	     (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2705 	      amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2706 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2707 		if (ret)
2708 			dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2709 	}
2710 
2711 	ret = psp_execute_ip_fw_load(psp, ucode);
2712 
2713 	if (ret)
2714 		dev_err(adev->dev, "PSP load smu failed!\n");
2715 
2716 	return ret;
2717 }
2718 
2719 static bool fw_load_skip_check(struct psp_context *psp,
2720 			       struct amdgpu_firmware_info *ucode)
2721 {
2722 	if (!ucode->fw || !ucode->ucode_size)
2723 		return true;
2724 
2725 	if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2726 		return true;
2727 
2728 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2729 	    (psp_smu_reload_quirk(psp) ||
2730 	     psp->autoload_supported ||
2731 	     psp->pmfw_centralized_cstate_management))
2732 		return true;
2733 
2734 	if (amdgpu_sriov_vf(psp->adev) &&
2735 	    amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2736 		return true;
2737 
2738 	if (psp->autoload_supported &&
2739 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2740 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2741 		/* skip mec JT when autoload is enabled */
2742 		return true;
2743 
2744 	return false;
2745 }
2746 
2747 int psp_load_fw_list(struct psp_context *psp,
2748 		     struct amdgpu_firmware_info **ucode_list, int ucode_count)
2749 {
2750 	int ret = 0, i;
2751 	struct amdgpu_firmware_info *ucode;
2752 
2753 	for (i = 0; i < ucode_count; ++i) {
2754 		ucode = ucode_list[i];
2755 		psp_print_fw_hdr(psp, ucode);
2756 		ret = psp_execute_ip_fw_load(psp, ucode);
2757 		if (ret)
2758 			return ret;
2759 	}
2760 	return ret;
2761 }
2762 
2763 static int psp_load_non_psp_fw(struct psp_context *psp)
2764 {
2765 	int i, ret;
2766 	struct amdgpu_firmware_info *ucode;
2767 	struct amdgpu_device *adev = psp->adev;
2768 
2769 	if (psp->autoload_supported &&
2770 	    !psp->pmfw_centralized_cstate_management) {
2771 		ret = psp_load_smu_fw(psp);
2772 		if (ret)
2773 			return ret;
2774 	}
2775 
2776 	/* Load P2S table first if it's available */
2777 	psp_load_p2s_table(psp);
2778 
2779 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
2780 		ucode = &adev->firmware.ucode[i];
2781 
2782 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2783 		    !fw_load_skip_check(psp, ucode)) {
2784 			ret = psp_load_smu_fw(psp);
2785 			if (ret)
2786 				return ret;
2787 			continue;
2788 		}
2789 
2790 		if (fw_load_skip_check(psp, ucode))
2791 			continue;
2792 
2793 		if (psp->autoload_supported &&
2794 		    (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2795 			     IP_VERSION(11, 0, 7) ||
2796 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2797 			     IP_VERSION(11, 0, 11) ||
2798 		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2799 			     IP_VERSION(11, 0, 12)) &&
2800 		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2801 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2802 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2803 			/* PSP only receive one SDMA fw for sienna_cichlid,
2804 			 * as all four sdma fw are same
2805 			 */
2806 			continue;
2807 
2808 		psp_print_fw_hdr(psp, ucode);
2809 
2810 		ret = psp_execute_ip_fw_load(psp, ucode);
2811 		if (ret)
2812 			return ret;
2813 
2814 		/* Start rlc autoload after psp recieved all the gfx firmware */
2815 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2816 		    adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2817 			ret = psp_rlc_autoload_start(psp);
2818 			if (ret) {
2819 				dev_err(adev->dev, "Failed to start rlc autoload\n");
2820 				return ret;
2821 			}
2822 		}
2823 	}
2824 
2825 	return 0;
2826 }
2827 
2828 static int psp_load_fw(struct amdgpu_device *adev)
2829 {
2830 	int ret;
2831 	struct psp_context *psp = &adev->psp;
2832 
2833 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2834 		/* should not destroy ring, only stop */
2835 		psp_ring_stop(psp, PSP_RING_TYPE__KM);
2836 	} else {
2837 		memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2838 
2839 		ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2840 		if (ret) {
2841 			dev_err(adev->dev, "PSP ring init failed!\n");
2842 			goto failed;
2843 		}
2844 	}
2845 
2846 	ret = psp_hw_start(psp);
2847 	if (ret)
2848 		goto failed;
2849 
2850 	ret = psp_load_non_psp_fw(psp);
2851 	if (ret)
2852 		goto failed1;
2853 
2854 	ret = psp_asd_initialize(psp);
2855 	if (ret) {
2856 		dev_err(adev->dev, "PSP load asd failed!\n");
2857 		goto failed1;
2858 	}
2859 
2860 	ret = psp_rl_load(adev);
2861 	if (ret) {
2862 		dev_err(adev->dev, "PSP load RL failed!\n");
2863 		goto failed1;
2864 	}
2865 
2866 	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2867 		if (adev->gmc.xgmi.num_physical_nodes > 1) {
2868 			ret = psp_xgmi_initialize(psp, false, true);
2869 			/* Warning the XGMI seesion initialize failure
2870 			 * Instead of stop driver initialization
2871 			 */
2872 			if (ret)
2873 				dev_err(psp->adev->dev,
2874 					"XGMI: Failed to initialize XGMI session\n");
2875 		}
2876 	}
2877 
2878 	if (psp->ta_fw) {
2879 		ret = psp_ras_initialize(psp);
2880 		if (ret)
2881 			dev_err(psp->adev->dev,
2882 				"RAS: Failed to initialize RAS\n");
2883 
2884 		ret = psp_hdcp_initialize(psp);
2885 		if (ret)
2886 			dev_err(psp->adev->dev,
2887 				"HDCP: Failed to initialize HDCP\n");
2888 
2889 		ret = psp_dtm_initialize(psp);
2890 		if (ret)
2891 			dev_err(psp->adev->dev,
2892 				"DTM: Failed to initialize DTM\n");
2893 
2894 		ret = psp_rap_initialize(psp);
2895 		if (ret)
2896 			dev_err(psp->adev->dev,
2897 				"RAP: Failed to initialize RAP\n");
2898 
2899 		ret = psp_securedisplay_initialize(psp);
2900 		if (ret)
2901 			dev_err(psp->adev->dev,
2902 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2903 	}
2904 
2905 	return 0;
2906 
2907 failed1:
2908 	psp_free_shared_bufs(psp);
2909 failed:
2910 	/*
2911 	 * all cleanup jobs (xgmi terminate, ras terminate,
2912 	 * ring destroy, cmd/fence/fw buffers destory,
2913 	 * psp->cmd destory) are delayed to psp_hw_fini
2914 	 */
2915 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2916 	return ret;
2917 }
2918 
2919 static int psp_hw_init(void *handle)
2920 {
2921 	int ret;
2922 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2923 
2924 	mutex_lock(&adev->firmware.mutex);
2925 	/*
2926 	 * This sequence is just used on hw_init only once, no need on
2927 	 * resume.
2928 	 */
2929 	ret = amdgpu_ucode_init_bo(adev);
2930 	if (ret)
2931 		goto failed;
2932 
2933 	ret = psp_load_fw(adev);
2934 	if (ret) {
2935 		dev_err(adev->dev, "PSP firmware loading failed\n");
2936 		goto failed;
2937 	}
2938 
2939 	mutex_unlock(&adev->firmware.mutex);
2940 	return 0;
2941 
2942 failed:
2943 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
2944 	mutex_unlock(&adev->firmware.mutex);
2945 	return -EINVAL;
2946 }
2947 
2948 static int psp_hw_fini(void *handle)
2949 {
2950 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2951 	struct psp_context *psp = &adev->psp;
2952 
2953 	if (psp->ta_fw) {
2954 		psp_ras_terminate(psp);
2955 		psp_securedisplay_terminate(psp);
2956 		psp_rap_terminate(psp);
2957 		psp_dtm_terminate(psp);
2958 		psp_hdcp_terminate(psp);
2959 
2960 		if (adev->gmc.xgmi.num_physical_nodes > 1)
2961 			psp_xgmi_terminate(psp);
2962 	}
2963 
2964 	psp_asd_terminate(psp);
2965 	psp_tmr_terminate(psp);
2966 
2967 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2968 
2969 	return 0;
2970 }
2971 
2972 static int psp_suspend(void *handle)
2973 {
2974 	int ret = 0;
2975 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2976 	struct psp_context *psp = &adev->psp;
2977 
2978 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
2979 	    psp->xgmi_context.context.initialized) {
2980 		ret = psp_xgmi_terminate(psp);
2981 		if (ret) {
2982 			dev_err(adev->dev, "Failed to terminate xgmi ta\n");
2983 			goto out;
2984 		}
2985 	}
2986 
2987 	if (psp->ta_fw) {
2988 		ret = psp_ras_terminate(psp);
2989 		if (ret) {
2990 			dev_err(adev->dev, "Failed to terminate ras ta\n");
2991 			goto out;
2992 		}
2993 		ret = psp_hdcp_terminate(psp);
2994 		if (ret) {
2995 			dev_err(adev->dev, "Failed to terminate hdcp ta\n");
2996 			goto out;
2997 		}
2998 		ret = psp_dtm_terminate(psp);
2999 		if (ret) {
3000 			dev_err(adev->dev, "Failed to terminate dtm ta\n");
3001 			goto out;
3002 		}
3003 		ret = psp_rap_terminate(psp);
3004 		if (ret) {
3005 			dev_err(adev->dev, "Failed to terminate rap ta\n");
3006 			goto out;
3007 		}
3008 		ret = psp_securedisplay_terminate(psp);
3009 		if (ret) {
3010 			dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3011 			goto out;
3012 		}
3013 	}
3014 
3015 	ret = psp_asd_terminate(psp);
3016 	if (ret) {
3017 		dev_err(adev->dev, "Failed to terminate asd\n");
3018 		goto out;
3019 	}
3020 
3021 	ret = psp_tmr_terminate(psp);
3022 	if (ret) {
3023 		dev_err(adev->dev, "Failed to terminate tmr\n");
3024 		goto out;
3025 	}
3026 
3027 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3028 	if (ret)
3029 		dev_err(adev->dev, "PSP ring stop failed\n");
3030 
3031 out:
3032 	return ret;
3033 }
3034 
3035 static int psp_resume(void *handle)
3036 {
3037 	int ret;
3038 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3039 	struct psp_context *psp = &adev->psp;
3040 
3041 	dev_info(adev->dev, "PSP is resuming...\n");
3042 
3043 	if (psp->mem_train_ctx.enable_mem_training) {
3044 		ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3045 		if (ret) {
3046 			dev_err(adev->dev, "Failed to process memory training!\n");
3047 			return ret;
3048 		}
3049 	}
3050 
3051 	mutex_lock(&adev->firmware.mutex);
3052 
3053 	ret = psp_hw_start(psp);
3054 	if (ret)
3055 		goto failed;
3056 
3057 	ret = psp_load_non_psp_fw(psp);
3058 	if (ret)
3059 		goto failed;
3060 
3061 	ret = psp_asd_initialize(psp);
3062 	if (ret) {
3063 		dev_err(adev->dev, "PSP load asd failed!\n");
3064 		goto failed;
3065 	}
3066 
3067 	ret = psp_rl_load(adev);
3068 	if (ret) {
3069 		dev_err(adev->dev, "PSP load RL failed!\n");
3070 		goto failed;
3071 	}
3072 
3073 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3074 		ret = psp_xgmi_initialize(psp, false, true);
3075 		/* Warning the XGMI seesion initialize failure
3076 		 * Instead of stop driver initialization
3077 		 */
3078 		if (ret)
3079 			dev_err(psp->adev->dev,
3080 				"XGMI: Failed to initialize XGMI session\n");
3081 	}
3082 
3083 	if (psp->ta_fw) {
3084 		ret = psp_ras_initialize(psp);
3085 		if (ret)
3086 			dev_err(psp->adev->dev,
3087 				"RAS: Failed to initialize RAS\n");
3088 
3089 		ret = psp_hdcp_initialize(psp);
3090 		if (ret)
3091 			dev_err(psp->adev->dev,
3092 				"HDCP: Failed to initialize HDCP\n");
3093 
3094 		ret = psp_dtm_initialize(psp);
3095 		if (ret)
3096 			dev_err(psp->adev->dev,
3097 				"DTM: Failed to initialize DTM\n");
3098 
3099 		ret = psp_rap_initialize(psp);
3100 		if (ret)
3101 			dev_err(psp->adev->dev,
3102 				"RAP: Failed to initialize RAP\n");
3103 
3104 		ret = psp_securedisplay_initialize(psp);
3105 		if (ret)
3106 			dev_err(psp->adev->dev,
3107 				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3108 	}
3109 
3110 	mutex_unlock(&adev->firmware.mutex);
3111 
3112 	return 0;
3113 
3114 failed:
3115 	dev_err(adev->dev, "PSP resume failed\n");
3116 	mutex_unlock(&adev->firmware.mutex);
3117 	return ret;
3118 }
3119 
3120 int psp_gpu_reset(struct amdgpu_device *adev)
3121 {
3122 	int ret;
3123 
3124 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3125 		return 0;
3126 
3127 	mutex_lock(&adev->psp.mutex);
3128 	ret = psp_mode1_reset(&adev->psp);
3129 	mutex_unlock(&adev->psp.mutex);
3130 
3131 	return ret;
3132 }
3133 
3134 int psp_rlc_autoload_start(struct psp_context *psp)
3135 {
3136 	int ret;
3137 	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3138 
3139 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3140 
3141 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
3142 				 psp->fence_buf_mc_addr);
3143 
3144 	release_psp_cmd_buf(psp);
3145 
3146 	return ret;
3147 }
3148 
3149 int psp_ring_cmd_submit(struct psp_context *psp,
3150 			uint64_t cmd_buf_mc_addr,
3151 			uint64_t fence_mc_addr,
3152 			int index)
3153 {
3154 	unsigned int psp_write_ptr_reg = 0;
3155 	struct psp_gfx_rb_frame *write_frame;
3156 	struct psp_ring *ring = &psp->km_ring;
3157 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3158 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3159 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3160 	struct amdgpu_device *adev = psp->adev;
3161 	uint32_t ring_size_dw = ring->ring_size / 4;
3162 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3163 
3164 	/* KM (GPCOM) prepare write pointer */
3165 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
3166 
3167 	/* Update KM RB frame pointer to new frame */
3168 	/* write_frame ptr increments by size of rb_frame in bytes */
3169 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3170 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
3171 		write_frame = ring_buffer_start;
3172 	else
3173 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3174 	/* Check invalid write_frame ptr address */
3175 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3176 		dev_err(adev->dev,
3177 			"ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3178 			ring_buffer_start, ring_buffer_end, write_frame);
3179 		dev_err(adev->dev,
3180 			"write_frame is pointing to address out of bounds\n");
3181 		return -EINVAL;
3182 	}
3183 
3184 	/* Initialize KM RB frame */
3185 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3186 
3187 	/* Update KM RB frame */
3188 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3189 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3190 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3191 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3192 	write_frame->fence_value = index;
3193 	amdgpu_device_flush_hdp(adev, NULL);
3194 
3195 	/* Update the write Pointer in DWORDs */
3196 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3197 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
3198 	return 0;
3199 }
3200 
3201 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3202 {
3203 	struct amdgpu_device *adev = psp->adev;
3204 	const struct psp_firmware_header_v1_0 *asd_hdr;
3205 	int err = 0;
3206 
3207 	err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, "amdgpu/%s_asd.bin", chip_name);
3208 	if (err)
3209 		goto out;
3210 
3211 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3212 	adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3213 	adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3214 	adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3215 	adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3216 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3217 	return 0;
3218 out:
3219 	amdgpu_ucode_release(&adev->psp.asd_fw);
3220 	return err;
3221 }
3222 
3223 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3224 {
3225 	struct amdgpu_device *adev = psp->adev;
3226 	const struct psp_firmware_header_v1_0 *toc_hdr;
3227 	int err = 0;
3228 
3229 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, "amdgpu/%s_toc.bin", chip_name);
3230 	if (err)
3231 		goto out;
3232 
3233 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3234 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3235 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3236 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3237 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3238 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3239 	return 0;
3240 out:
3241 	amdgpu_ucode_release(&adev->psp.toc_fw);
3242 	return err;
3243 }
3244 
3245 static int parse_sos_bin_descriptor(struct psp_context *psp,
3246 				   const struct psp_fw_bin_desc *desc,
3247 				   const struct psp_firmware_header_v2_0 *sos_hdr)
3248 {
3249 	uint8_t *ucode_start_addr  = NULL;
3250 
3251 	if (!psp || !desc || !sos_hdr)
3252 		return -EINVAL;
3253 
3254 	ucode_start_addr  = (uint8_t *)sos_hdr +
3255 			    le32_to_cpu(desc->offset_bytes) +
3256 			    le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3257 
3258 	switch (desc->fw_type) {
3259 	case PSP_FW_TYPE_PSP_SOS:
3260 		psp->sos.fw_version        = le32_to_cpu(desc->fw_version);
3261 		psp->sos.feature_version   = le32_to_cpu(desc->fw_version);
3262 		psp->sos.size_bytes        = le32_to_cpu(desc->size_bytes);
3263 		psp->sos.start_addr	   = ucode_start_addr;
3264 		break;
3265 	case PSP_FW_TYPE_PSP_SYS_DRV:
3266 		psp->sys.fw_version        = le32_to_cpu(desc->fw_version);
3267 		psp->sys.feature_version   = le32_to_cpu(desc->fw_version);
3268 		psp->sys.size_bytes        = le32_to_cpu(desc->size_bytes);
3269 		psp->sys.start_addr        = ucode_start_addr;
3270 		break;
3271 	case PSP_FW_TYPE_PSP_KDB:
3272 		psp->kdb.fw_version        = le32_to_cpu(desc->fw_version);
3273 		psp->kdb.feature_version   = le32_to_cpu(desc->fw_version);
3274 		psp->kdb.size_bytes        = le32_to_cpu(desc->size_bytes);
3275 		psp->kdb.start_addr        = ucode_start_addr;
3276 		break;
3277 	case PSP_FW_TYPE_PSP_TOC:
3278 		psp->toc.fw_version        = le32_to_cpu(desc->fw_version);
3279 		psp->toc.feature_version   = le32_to_cpu(desc->fw_version);
3280 		psp->toc.size_bytes        = le32_to_cpu(desc->size_bytes);
3281 		psp->toc.start_addr        = ucode_start_addr;
3282 		break;
3283 	case PSP_FW_TYPE_PSP_SPL:
3284 		psp->spl.fw_version        = le32_to_cpu(desc->fw_version);
3285 		psp->spl.feature_version   = le32_to_cpu(desc->fw_version);
3286 		psp->spl.size_bytes        = le32_to_cpu(desc->size_bytes);
3287 		psp->spl.start_addr        = ucode_start_addr;
3288 		break;
3289 	case PSP_FW_TYPE_PSP_RL:
3290 		psp->rl.fw_version         = le32_to_cpu(desc->fw_version);
3291 		psp->rl.feature_version    = le32_to_cpu(desc->fw_version);
3292 		psp->rl.size_bytes         = le32_to_cpu(desc->size_bytes);
3293 		psp->rl.start_addr         = ucode_start_addr;
3294 		break;
3295 	case PSP_FW_TYPE_PSP_SOC_DRV:
3296 		psp->soc_drv.fw_version         = le32_to_cpu(desc->fw_version);
3297 		psp->soc_drv.feature_version    = le32_to_cpu(desc->fw_version);
3298 		psp->soc_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3299 		psp->soc_drv.start_addr         = ucode_start_addr;
3300 		break;
3301 	case PSP_FW_TYPE_PSP_INTF_DRV:
3302 		psp->intf_drv.fw_version        = le32_to_cpu(desc->fw_version);
3303 		psp->intf_drv.feature_version   = le32_to_cpu(desc->fw_version);
3304 		psp->intf_drv.size_bytes        = le32_to_cpu(desc->size_bytes);
3305 		psp->intf_drv.start_addr        = ucode_start_addr;
3306 		break;
3307 	case PSP_FW_TYPE_PSP_DBG_DRV:
3308 		psp->dbg_drv.fw_version         = le32_to_cpu(desc->fw_version);
3309 		psp->dbg_drv.feature_version    = le32_to_cpu(desc->fw_version);
3310 		psp->dbg_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3311 		psp->dbg_drv.start_addr         = ucode_start_addr;
3312 		break;
3313 	case PSP_FW_TYPE_PSP_RAS_DRV:
3314 		psp->ras_drv.fw_version         = le32_to_cpu(desc->fw_version);
3315 		psp->ras_drv.feature_version    = le32_to_cpu(desc->fw_version);
3316 		psp->ras_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3317 		psp->ras_drv.start_addr         = ucode_start_addr;
3318 		break;
3319 	case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3320 		psp->ipkeymgr_drv.fw_version         = le32_to_cpu(desc->fw_version);
3321 		psp->ipkeymgr_drv.feature_version    = le32_to_cpu(desc->fw_version);
3322 		psp->ipkeymgr_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3323 		psp->ipkeymgr_drv.start_addr         = ucode_start_addr;
3324 		break;
3325 	default:
3326 		dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3327 		break;
3328 	}
3329 
3330 	return 0;
3331 }
3332 
3333 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3334 {
3335 	const struct psp_firmware_header_v1_0 *sos_hdr;
3336 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3337 	uint8_t *ucode_array_start_addr;
3338 
3339 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3340 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3341 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3342 
3343 	if (adev->gmc.xgmi.connected_to_cpu ||
3344 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3345 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3346 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3347 
3348 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3349 		adev->psp.sys.start_addr = ucode_array_start_addr;
3350 
3351 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3352 		adev->psp.sos.start_addr = ucode_array_start_addr +
3353 				le32_to_cpu(sos_hdr->sos.offset_bytes);
3354 	} else {
3355 		/* Load alternate PSP SOS FW */
3356 		sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3357 
3358 		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3359 		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3360 
3361 		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3362 		adev->psp.sys.start_addr = ucode_array_start_addr +
3363 			le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3364 
3365 		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3366 		adev->psp.sos.start_addr = ucode_array_start_addr +
3367 			le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3368 	}
3369 
3370 	if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3371 		dev_warn(adev->dev, "PSP SOS FW not available");
3372 		return -EINVAL;
3373 	}
3374 
3375 	return 0;
3376 }
3377 
3378 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3379 {
3380 	struct amdgpu_device *adev = psp->adev;
3381 	const struct psp_firmware_header_v1_0 *sos_hdr;
3382 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3383 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3384 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3385 	const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3386 	int err = 0;
3387 	uint8_t *ucode_array_start_addr;
3388 	int fw_index = 0;
3389 
3390 	err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
3391 	if (err)
3392 		goto out;
3393 
3394 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3395 	ucode_array_start_addr = (uint8_t *)sos_hdr +
3396 		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3397 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3398 
3399 	switch (sos_hdr->header.header_version_major) {
3400 	case 1:
3401 		err = psp_init_sos_base_fw(adev);
3402 		if (err)
3403 			goto out;
3404 
3405 		if (sos_hdr->header.header_version_minor == 1) {
3406 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3407 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3408 			adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3409 					le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3410 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3411 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3412 					le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3413 		}
3414 		if (sos_hdr->header.header_version_minor == 2) {
3415 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3416 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3417 			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3418 						    le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3419 		}
3420 		if (sos_hdr->header.header_version_minor == 3) {
3421 			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3422 			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3423 			adev->psp.toc.start_addr = ucode_array_start_addr +
3424 				le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3425 			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3426 			adev->psp.kdb.start_addr = ucode_array_start_addr +
3427 				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3428 			adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3429 			adev->psp.spl.start_addr = ucode_array_start_addr +
3430 				le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3431 			adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3432 			adev->psp.rl.start_addr = ucode_array_start_addr +
3433 				le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3434 		}
3435 		break;
3436 	case 2:
3437 		sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3438 
3439 		if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3440 			dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3441 			err = -EINVAL;
3442 			goto out;
3443 		}
3444 
3445 		for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) {
3446 			err = parse_sos_bin_descriptor(psp,
3447 						       &sos_hdr_v2_0->psp_fw_bin[fw_index],
3448 						       sos_hdr_v2_0);
3449 			if (err)
3450 				goto out;
3451 		}
3452 		break;
3453 	default:
3454 		dev_err(adev->dev,
3455 			"unsupported psp sos firmware\n");
3456 		err = -EINVAL;
3457 		goto out;
3458 	}
3459 
3460 	return 0;
3461 out:
3462 	amdgpu_ucode_release(&adev->psp.sos_fw);
3463 
3464 	return err;
3465 }
3466 
3467 static int parse_ta_bin_descriptor(struct psp_context *psp,
3468 				   const struct psp_fw_bin_desc *desc,
3469 				   const struct ta_firmware_header_v2_0 *ta_hdr)
3470 {
3471 	uint8_t *ucode_start_addr  = NULL;
3472 
3473 	if (!psp || !desc || !ta_hdr)
3474 		return -EINVAL;
3475 
3476 	ucode_start_addr  = (uint8_t *)ta_hdr +
3477 			    le32_to_cpu(desc->offset_bytes) +
3478 			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3479 
3480 	switch (desc->fw_type) {
3481 	case TA_FW_TYPE_PSP_ASD:
3482 		psp->asd_context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3483 		psp->asd_context.bin_desc.feature_version   = le32_to_cpu(desc->fw_version);
3484 		psp->asd_context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3485 		psp->asd_context.bin_desc.start_addr        = ucode_start_addr;
3486 		break;
3487 	case TA_FW_TYPE_PSP_XGMI:
3488 		psp->xgmi_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3489 		psp->xgmi_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3490 		psp->xgmi_context.context.bin_desc.start_addr       = ucode_start_addr;
3491 		break;
3492 	case TA_FW_TYPE_PSP_RAS:
3493 		psp->ras_context.context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3494 		psp->ras_context.context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3495 		psp->ras_context.context.bin_desc.start_addr        = ucode_start_addr;
3496 		break;
3497 	case TA_FW_TYPE_PSP_HDCP:
3498 		psp->hdcp_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3499 		psp->hdcp_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3500 		psp->hdcp_context.context.bin_desc.start_addr       = ucode_start_addr;
3501 		break;
3502 	case TA_FW_TYPE_PSP_DTM:
3503 		psp->dtm_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3504 		psp->dtm_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3505 		psp->dtm_context.context.bin_desc.start_addr       = ucode_start_addr;
3506 		break;
3507 	case TA_FW_TYPE_PSP_RAP:
3508 		psp->rap_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3509 		psp->rap_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3510 		psp->rap_context.context.bin_desc.start_addr       = ucode_start_addr;
3511 		break;
3512 	case TA_FW_TYPE_PSP_SECUREDISPLAY:
3513 		psp->securedisplay_context.context.bin_desc.fw_version =
3514 			le32_to_cpu(desc->fw_version);
3515 		psp->securedisplay_context.context.bin_desc.size_bytes =
3516 			le32_to_cpu(desc->size_bytes);
3517 		psp->securedisplay_context.context.bin_desc.start_addr =
3518 			ucode_start_addr;
3519 		break;
3520 	default:
3521 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3522 		break;
3523 	}
3524 
3525 	return 0;
3526 }
3527 
3528 static int parse_ta_v1_microcode(struct psp_context *psp)
3529 {
3530 	const struct ta_firmware_header_v1_0 *ta_hdr;
3531 	struct amdgpu_device *adev = psp->adev;
3532 
3533 	ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3534 
3535 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3536 		return -EINVAL;
3537 
3538 	adev->psp.xgmi_context.context.bin_desc.fw_version =
3539 		le32_to_cpu(ta_hdr->xgmi.fw_version);
3540 	adev->psp.xgmi_context.context.bin_desc.size_bytes =
3541 		le32_to_cpu(ta_hdr->xgmi.size_bytes);
3542 	adev->psp.xgmi_context.context.bin_desc.start_addr =
3543 		(uint8_t *)ta_hdr +
3544 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3545 
3546 	adev->psp.ras_context.context.bin_desc.fw_version =
3547 		le32_to_cpu(ta_hdr->ras.fw_version);
3548 	adev->psp.ras_context.context.bin_desc.size_bytes =
3549 		le32_to_cpu(ta_hdr->ras.size_bytes);
3550 	adev->psp.ras_context.context.bin_desc.start_addr =
3551 		(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3552 		le32_to_cpu(ta_hdr->ras.offset_bytes);
3553 
3554 	adev->psp.hdcp_context.context.bin_desc.fw_version =
3555 		le32_to_cpu(ta_hdr->hdcp.fw_version);
3556 	adev->psp.hdcp_context.context.bin_desc.size_bytes =
3557 		le32_to_cpu(ta_hdr->hdcp.size_bytes);
3558 	adev->psp.hdcp_context.context.bin_desc.start_addr =
3559 		(uint8_t *)ta_hdr +
3560 		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3561 
3562 	adev->psp.dtm_context.context.bin_desc.fw_version =
3563 		le32_to_cpu(ta_hdr->dtm.fw_version);
3564 	adev->psp.dtm_context.context.bin_desc.size_bytes =
3565 		le32_to_cpu(ta_hdr->dtm.size_bytes);
3566 	adev->psp.dtm_context.context.bin_desc.start_addr =
3567 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3568 		le32_to_cpu(ta_hdr->dtm.offset_bytes);
3569 
3570 	adev->psp.securedisplay_context.context.bin_desc.fw_version =
3571 		le32_to_cpu(ta_hdr->securedisplay.fw_version);
3572 	adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3573 		le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3574 	adev->psp.securedisplay_context.context.bin_desc.start_addr =
3575 		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3576 		le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3577 
3578 	adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3579 
3580 	return 0;
3581 }
3582 
3583 static int parse_ta_v2_microcode(struct psp_context *psp)
3584 {
3585 	const struct ta_firmware_header_v2_0 *ta_hdr;
3586 	struct amdgpu_device *adev = psp->adev;
3587 	int err = 0;
3588 	int ta_index = 0;
3589 
3590 	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3591 
3592 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3593 		return -EINVAL;
3594 
3595 	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3596 		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3597 		return -EINVAL;
3598 	}
3599 
3600 	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3601 		err = parse_ta_bin_descriptor(psp,
3602 					      &ta_hdr->ta_fw_bin[ta_index],
3603 					      ta_hdr);
3604 		if (err)
3605 			return err;
3606 	}
3607 
3608 	return 0;
3609 }
3610 
3611 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3612 {
3613 	const struct common_firmware_header *hdr;
3614 	struct amdgpu_device *adev = psp->adev;
3615 	int err;
3616 
3617 	err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name);
3618 	if (err)
3619 		return err;
3620 
3621 	hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3622 	switch (le16_to_cpu(hdr->header_version_major)) {
3623 	case 1:
3624 		err = parse_ta_v1_microcode(psp);
3625 		break;
3626 	case 2:
3627 		err = parse_ta_v2_microcode(psp);
3628 		break;
3629 	default:
3630 		dev_err(adev->dev, "unsupported TA header version\n");
3631 		err = -EINVAL;
3632 	}
3633 
3634 	if (err)
3635 		amdgpu_ucode_release(&adev->psp.ta_fw);
3636 
3637 	return err;
3638 }
3639 
3640 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3641 {
3642 	struct amdgpu_device *adev = psp->adev;
3643 	const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3644 	struct amdgpu_firmware_info *info = NULL;
3645 	int err = 0;
3646 
3647 	if (!amdgpu_sriov_vf(adev)) {
3648 		dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3649 		return -EINVAL;
3650 	}
3651 
3652 	err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, "amdgpu/%s_cap.bin", chip_name);
3653 	if (err) {
3654 		if (err == -ENODEV) {
3655 			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3656 			err = 0;
3657 			goto out;
3658 		}
3659 		dev_err(adev->dev, "fail to initialize cap microcode\n");
3660 	}
3661 
3662 	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3663 	info->ucode_id = AMDGPU_UCODE_ID_CAP;
3664 	info->fw = adev->psp.cap_fw;
3665 	cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3666 		adev->psp.cap_fw->data;
3667 	adev->firmware.fw_size += ALIGN(
3668 			le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3669 	adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3670 	adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3671 	adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3672 
3673 	return 0;
3674 
3675 out:
3676 	amdgpu_ucode_release(&adev->psp.cap_fw);
3677 	return err;
3678 }
3679 
3680 static int psp_set_clockgating_state(void *handle,
3681 				     enum amd_clockgating_state state)
3682 {
3683 	return 0;
3684 }
3685 
3686 static int psp_set_powergating_state(void *handle,
3687 				     enum amd_powergating_state state)
3688 {
3689 	return 0;
3690 }
3691 
3692 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3693 					 struct device_attribute *attr,
3694 					 char *buf)
3695 {
3696 	struct drm_device *ddev = dev_get_drvdata(dev);
3697 	struct amdgpu_device *adev = drm_to_adev(ddev);
3698 	uint32_t fw_ver;
3699 	int ret;
3700 
3701 	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3702 		dev_info(adev->dev, "PSP block is not ready yet\n.");
3703 		return -EBUSY;
3704 	}
3705 
3706 	mutex_lock(&adev->psp.mutex);
3707 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3708 	mutex_unlock(&adev->psp.mutex);
3709 
3710 	if (ret) {
3711 		dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
3712 		return ret;
3713 	}
3714 
3715 	return sysfs_emit(buf, "%x\n", fw_ver);
3716 }
3717 
3718 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3719 						       struct device_attribute *attr,
3720 						       const char *buf,
3721 						       size_t count)
3722 {
3723 	struct drm_device *ddev = dev_get_drvdata(dev);
3724 	struct amdgpu_device *adev = drm_to_adev(ddev);
3725 	int ret, idx;
3726 	const struct firmware *usbc_pd_fw;
3727 	struct amdgpu_bo *fw_buf_bo = NULL;
3728 	uint64_t fw_pri_mc_addr;
3729 	void *fw_pri_cpu_addr;
3730 
3731 	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3732 		dev_err(adev->dev, "PSP block is not ready yet.");
3733 		return -EBUSY;
3734 	}
3735 
3736 	if (!drm_dev_enter(ddev, &idx))
3737 		return -ENODEV;
3738 
3739 	ret = amdgpu_ucode_request(adev, &usbc_pd_fw, "amdgpu/%s", buf);
3740 	if (ret)
3741 		goto fail;
3742 
3743 	/* LFB address which is aligned to 1MB boundary per PSP request */
3744 	ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3745 				      AMDGPU_GEM_DOMAIN_VRAM |
3746 				      AMDGPU_GEM_DOMAIN_GTT,
3747 				      &fw_buf_bo, &fw_pri_mc_addr,
3748 				      &fw_pri_cpu_addr);
3749 	if (ret)
3750 		goto rel_buf;
3751 
3752 	memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3753 
3754 	mutex_lock(&adev->psp.mutex);
3755 	ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3756 	mutex_unlock(&adev->psp.mutex);
3757 
3758 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3759 
3760 rel_buf:
3761 	amdgpu_ucode_release(&usbc_pd_fw);
3762 fail:
3763 	if (ret) {
3764 		dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
3765 		count = ret;
3766 	}
3767 
3768 	drm_dev_exit(idx);
3769 	return count;
3770 }
3771 
3772 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3773 {
3774 	int idx;
3775 
3776 	if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
3777 		return;
3778 
3779 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3780 	memcpy(psp->fw_pri_buf, start_addr, bin_size);
3781 
3782 	drm_dev_exit(idx);
3783 }
3784 
3785 /**
3786  * DOC: usbc_pd_fw
3787  * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3788  * this file will trigger the update process.
3789  */
3790 static DEVICE_ATTR(usbc_pd_fw, 0644,
3791 		   psp_usbc_pd_fw_sysfs_read,
3792 		   psp_usbc_pd_fw_sysfs_write);
3793 
3794 int is_psp_fw_valid(struct psp_bin_desc bin)
3795 {
3796 	return bin.size_bytes;
3797 }
3798 
3799 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
3800 					struct bin_attribute *bin_attr,
3801 					char *buffer, loff_t pos, size_t count)
3802 {
3803 	struct device *dev = kobj_to_dev(kobj);
3804 	struct drm_device *ddev = dev_get_drvdata(dev);
3805 	struct amdgpu_device *adev = drm_to_adev(ddev);
3806 
3807 	adev->psp.vbflash_done = false;
3808 
3809 	/* Safeguard against memory drain */
3810 	if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
3811 		dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
3812 		kvfree(adev->psp.vbflash_tmp_buf);
3813 		adev->psp.vbflash_tmp_buf = NULL;
3814 		adev->psp.vbflash_image_size = 0;
3815 		return -ENOMEM;
3816 	}
3817 
3818 	/* TODO Just allocate max for now and optimize to realloc later if needed */
3819 	if (!adev->psp.vbflash_tmp_buf) {
3820 		adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
3821 		if (!adev->psp.vbflash_tmp_buf)
3822 			return -ENOMEM;
3823 	}
3824 
3825 	mutex_lock(&adev->psp.mutex);
3826 	memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
3827 	adev->psp.vbflash_image_size += count;
3828 	mutex_unlock(&adev->psp.mutex);
3829 
3830 	dev_dbg(adev->dev, "IFWI staged for update\n");
3831 
3832 	return count;
3833 }
3834 
3835 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
3836 				       struct bin_attribute *bin_attr, char *buffer,
3837 				       loff_t pos, size_t count)
3838 {
3839 	struct device *dev = kobj_to_dev(kobj);
3840 	struct drm_device *ddev = dev_get_drvdata(dev);
3841 	struct amdgpu_device *adev = drm_to_adev(ddev);
3842 	struct amdgpu_bo *fw_buf_bo = NULL;
3843 	uint64_t fw_pri_mc_addr;
3844 	void *fw_pri_cpu_addr;
3845 	int ret;
3846 
3847 	if (adev->psp.vbflash_image_size == 0)
3848 		return -EINVAL;
3849 
3850 	dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
3851 
3852 	ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
3853 					AMDGPU_GPU_PAGE_SIZE,
3854 					AMDGPU_GEM_DOMAIN_VRAM,
3855 					&fw_buf_bo,
3856 					&fw_pri_mc_addr,
3857 					&fw_pri_cpu_addr);
3858 	if (ret)
3859 		goto rel_buf;
3860 
3861 	memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
3862 
3863 	mutex_lock(&adev->psp.mutex);
3864 	ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
3865 	mutex_unlock(&adev->psp.mutex);
3866 
3867 	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3868 
3869 rel_buf:
3870 	kvfree(adev->psp.vbflash_tmp_buf);
3871 	adev->psp.vbflash_tmp_buf = NULL;
3872 	adev->psp.vbflash_image_size = 0;
3873 
3874 	if (ret) {
3875 		dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
3876 		return ret;
3877 	}
3878 
3879 	dev_dbg(adev->dev, "PSP IFWI flash process done\n");
3880 	return 0;
3881 }
3882 
3883 /**
3884  * DOC: psp_vbflash
3885  * Writing to this file will stage an IFWI for update. Reading from this file
3886  * will trigger the update process.
3887  */
3888 static struct bin_attribute psp_vbflash_bin_attr = {
3889 	.attr = {.name = "psp_vbflash", .mode = 0660},
3890 	.size = 0,
3891 	.write = amdgpu_psp_vbflash_write,
3892 	.read = amdgpu_psp_vbflash_read,
3893 };
3894 
3895 /**
3896  * DOC: psp_vbflash_status
3897  * The status of the flash process.
3898  * 0: IFWI flash not complete.
3899  * 1: IFWI flash complete.
3900  */
3901 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
3902 					 struct device_attribute *attr,
3903 					 char *buf)
3904 {
3905 	struct drm_device *ddev = dev_get_drvdata(dev);
3906 	struct amdgpu_device *adev = drm_to_adev(ddev);
3907 	uint32_t vbflash_status;
3908 
3909 	vbflash_status = psp_vbflash_status(&adev->psp);
3910 	if (!adev->psp.vbflash_done)
3911 		vbflash_status = 0;
3912 	else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
3913 		vbflash_status = 1;
3914 
3915 	return sysfs_emit(buf, "0x%x\n", vbflash_status);
3916 }
3917 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
3918 
3919 static struct bin_attribute *bin_flash_attrs[] = {
3920 	&psp_vbflash_bin_attr,
3921 	NULL
3922 };
3923 
3924 static struct attribute *flash_attrs[] = {
3925 	&dev_attr_psp_vbflash_status.attr,
3926 	&dev_attr_usbc_pd_fw.attr,
3927 	NULL
3928 };
3929 
3930 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
3931 {
3932 	struct device *dev = kobj_to_dev(kobj);
3933 	struct drm_device *ddev = dev_get_drvdata(dev);
3934 	struct amdgpu_device *adev = drm_to_adev(ddev);
3935 
3936 	if (attr == &dev_attr_usbc_pd_fw.attr)
3937 		return adev->psp.sup_pd_fw_up ? 0660 : 0;
3938 
3939 	return adev->psp.sup_ifwi_up ? 0440 : 0;
3940 }
3941 
3942 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
3943 						struct bin_attribute *attr,
3944 						int idx)
3945 {
3946 	struct device *dev = kobj_to_dev(kobj);
3947 	struct drm_device *ddev = dev_get_drvdata(dev);
3948 	struct amdgpu_device *adev = drm_to_adev(ddev);
3949 
3950 	return adev->psp.sup_ifwi_up ? 0660 : 0;
3951 }
3952 
3953 const struct attribute_group amdgpu_flash_attr_group = {
3954 	.attrs = flash_attrs,
3955 	.bin_attrs = bin_flash_attrs,
3956 	.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
3957 	.is_visible = amdgpu_flash_attr_is_visible,
3958 };
3959 
3960 const struct amd_ip_funcs psp_ip_funcs = {
3961 	.name = "psp",
3962 	.early_init = psp_early_init,
3963 	.late_init = NULL,
3964 	.sw_init = psp_sw_init,
3965 	.sw_fini = psp_sw_fini,
3966 	.hw_init = psp_hw_init,
3967 	.hw_fini = psp_hw_fini,
3968 	.suspend = psp_suspend,
3969 	.resume = psp_resume,
3970 	.is_idle = NULL,
3971 	.check_soft_reset = NULL,
3972 	.wait_for_idle = NULL,
3973 	.soft_reset = NULL,
3974 	.set_clockgating_state = psp_set_clockgating_state,
3975 	.set_powergating_state = psp_set_powergating_state,
3976 };
3977 
3978 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
3979 	.type = AMD_IP_BLOCK_TYPE_PSP,
3980 	.major = 3,
3981 	.minor = 1,
3982 	.rev = 0,
3983 	.funcs = &psp_ip_funcs,
3984 };
3985 
3986 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
3987 	.type = AMD_IP_BLOCK_TYPE_PSP,
3988 	.major = 10,
3989 	.minor = 0,
3990 	.rev = 0,
3991 	.funcs = &psp_ip_funcs,
3992 };
3993 
3994 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
3995 	.type = AMD_IP_BLOCK_TYPE_PSP,
3996 	.major = 11,
3997 	.minor = 0,
3998 	.rev = 0,
3999 	.funcs = &psp_ip_funcs,
4000 };
4001 
4002 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4003 	.type = AMD_IP_BLOCK_TYPE_PSP,
4004 	.major = 11,
4005 	.minor = 0,
4006 	.rev = 8,
4007 	.funcs = &psp_ip_funcs,
4008 };
4009 
4010 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4011 	.type = AMD_IP_BLOCK_TYPE_PSP,
4012 	.major = 12,
4013 	.minor = 0,
4014 	.rev = 0,
4015 	.funcs = &psp_ip_funcs,
4016 };
4017 
4018 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4019 	.type = AMD_IP_BLOCK_TYPE_PSP,
4020 	.major = 13,
4021 	.minor = 0,
4022 	.rev = 0,
4023 	.funcs = &psp_ip_funcs,
4024 };
4025 
4026 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4027 	.type = AMD_IP_BLOCK_TYPE_PSP,
4028 	.major = 13,
4029 	.minor = 0,
4030 	.rev = 4,
4031 	.funcs = &psp_ip_funcs,
4032 };
4033 
4034 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4035 	.type = AMD_IP_BLOCK_TYPE_PSP,
4036 	.major = 14,
4037 	.minor = 0,
4038 	.rev = 0,
4039 	.funcs = &psp_ip_funcs,
4040 };
4041