1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui
23 *
24 */
25
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42
43 #include "amdgpu_ras.h"
44 #include "amdgpu_securedisplay.h"
45 #include "amdgpu_atomfirmware.h"
46
47 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16)
48
49 static int psp_load_smu_fw(struct psp_context *psp);
50 static int psp_rap_terminate(struct psp_context *psp);
51 static int psp_securedisplay_terminate(struct psp_context *psp);
52
psp_ring_init(struct psp_context * psp,enum psp_ring_type ring_type)53 static int psp_ring_init(struct psp_context *psp,
54 enum psp_ring_type ring_type)
55 {
56 int ret = 0;
57 struct psp_ring *ring;
58 struct amdgpu_device *adev = psp->adev;
59
60 ring = &psp->km_ring;
61
62 ring->ring_type = ring_type;
63
64 /* allocate 4k Page of Local Frame Buffer memory for ring */
65 ring->ring_size = 0x1000;
66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 AMDGPU_GEM_DOMAIN_VRAM |
68 AMDGPU_GEM_DOMAIN_GTT,
69 &adev->firmware.rbuf,
70 &ring->ring_mem_mc_addr,
71 (void **)&ring->ring_mem);
72 if (ret) {
73 ring->ring_size = 0;
74 return ret;
75 }
76
77 return 0;
78 }
79
80 /*
81 * Due to DF Cstate management centralized to PMFW, the firmware
82 * loading sequence will be updated as below:
83 * - Load KDB
84 * - Load SYS_DRV
85 * - Load tOS
86 * - Load PMFW
87 * - Setup TMR
88 * - Load other non-psp fw
89 * - Load ASD
90 * - Load XGMI/RAS/HDCP/DTM TA if any
91 *
92 * This new sequence is required for
93 * - Arcturus and onwards
94 */
psp_check_pmfw_centralized_cstate_management(struct psp_context * psp)95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96 {
97 struct amdgpu_device *adev = psp->adev;
98
99 if (amdgpu_sriov_vf(adev)) {
100 psp->pmfw_centralized_cstate_management = false;
101 return;
102 }
103
104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 case IP_VERSION(11, 0, 0):
106 case IP_VERSION(11, 0, 4):
107 case IP_VERSION(11, 0, 5):
108 case IP_VERSION(11, 0, 7):
109 case IP_VERSION(11, 0, 9):
110 case IP_VERSION(11, 0, 11):
111 case IP_VERSION(11, 0, 12):
112 case IP_VERSION(11, 0, 13):
113 case IP_VERSION(13, 0, 0):
114 case IP_VERSION(13, 0, 2):
115 case IP_VERSION(13, 0, 7):
116 psp->pmfw_centralized_cstate_management = true;
117 break;
118 default:
119 psp->pmfw_centralized_cstate_management = false;
120 break;
121 }
122 }
123
psp_init_sriov_microcode(struct psp_context * psp)124 static int psp_init_sriov_microcode(struct psp_context *psp)
125 {
126 struct amdgpu_device *adev = psp->adev;
127 char ucode_prefix[30];
128 int ret = 0;
129
130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131
132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 case IP_VERSION(9, 0, 0):
134 case IP_VERSION(11, 0, 7):
135 case IP_VERSION(11, 0, 9):
136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 ret = psp_init_cap_microcode(psp, ucode_prefix);
138 break;
139 case IP_VERSION(13, 0, 2):
140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 ret = psp_init_cap_microcode(psp, ucode_prefix);
142 ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 break;
144 case IP_VERSION(13, 0, 0):
145 adev->virt.autoload_ucode_id = 0;
146 break;
147 case IP_VERSION(13, 0, 6):
148 case IP_VERSION(13, 0, 14):
149 ret = psp_init_cap_microcode(psp, ucode_prefix);
150 ret &= psp_init_ta_microcode(psp, ucode_prefix);
151 break;
152 case IP_VERSION(13, 0, 10):
153 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
154 ret = psp_init_cap_microcode(psp, ucode_prefix);
155 break;
156 case IP_VERSION(13, 0, 12):
157 ret = psp_init_ta_microcode(psp, ucode_prefix);
158 break;
159 default:
160 return -EINVAL;
161 }
162 return ret;
163 }
164
psp_early_init(struct amdgpu_ip_block * ip_block)165 static int psp_early_init(struct amdgpu_ip_block *ip_block)
166 {
167 struct amdgpu_device *adev = ip_block->adev;
168 struct psp_context *psp = &adev->psp;
169
170 psp->autoload_supported = true;
171 psp->boot_time_tmr = true;
172
173 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
174 case IP_VERSION(9, 0, 0):
175 psp_v3_1_set_psp_funcs(psp);
176 psp->autoload_supported = false;
177 psp->boot_time_tmr = false;
178 break;
179 case IP_VERSION(10, 0, 0):
180 case IP_VERSION(10, 0, 1):
181 psp_v10_0_set_psp_funcs(psp);
182 psp->autoload_supported = false;
183 psp->boot_time_tmr = false;
184 break;
185 case IP_VERSION(11, 0, 2):
186 case IP_VERSION(11, 0, 4):
187 psp_v11_0_set_psp_funcs(psp);
188 psp->autoload_supported = false;
189 psp->boot_time_tmr = false;
190 break;
191 case IP_VERSION(11, 0, 0):
192 case IP_VERSION(11, 0, 7):
193 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
194 fallthrough;
195 case IP_VERSION(11, 0, 5):
196 case IP_VERSION(11, 0, 9):
197 case IP_VERSION(11, 0, 11):
198 case IP_VERSION(11, 5, 0):
199 case IP_VERSION(11, 5, 2):
200 case IP_VERSION(11, 0, 12):
201 case IP_VERSION(11, 0, 13):
202 psp_v11_0_set_psp_funcs(psp);
203 psp->boot_time_tmr = false;
204 break;
205 case IP_VERSION(11, 0, 3):
206 case IP_VERSION(12, 0, 1):
207 psp_v12_0_set_psp_funcs(psp);
208 psp->autoload_supported = false;
209 psp->boot_time_tmr = false;
210 break;
211 case IP_VERSION(13, 0, 2):
212 psp->boot_time_tmr = false;
213 fallthrough;
214 case IP_VERSION(13, 0, 6):
215 case IP_VERSION(13, 0, 14):
216 psp_v13_0_set_psp_funcs(psp);
217 psp->autoload_supported = false;
218 break;
219 case IP_VERSION(13, 0, 12):
220 psp_v13_0_set_psp_funcs(psp);
221 psp->autoload_supported = false;
222 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
223 break;
224 case IP_VERSION(13, 0, 1):
225 case IP_VERSION(13, 0, 3):
226 case IP_VERSION(13, 0, 5):
227 case IP_VERSION(13, 0, 8):
228 case IP_VERSION(13, 0, 11):
229 case IP_VERSION(14, 0, 0):
230 case IP_VERSION(14, 0, 1):
231 case IP_VERSION(14, 0, 4):
232 psp_v13_0_set_psp_funcs(psp);
233 psp->boot_time_tmr = false;
234 break;
235 case IP_VERSION(11, 0, 8):
236 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
237 psp_v11_0_8_set_psp_funcs(psp);
238 }
239 psp->autoload_supported = false;
240 psp->boot_time_tmr = false;
241 break;
242 case IP_VERSION(13, 0, 0):
243 case IP_VERSION(13, 0, 7):
244 case IP_VERSION(13, 0, 10):
245 psp_v13_0_set_psp_funcs(psp);
246 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
247 psp->boot_time_tmr = false;
248 break;
249 case IP_VERSION(13, 0, 4):
250 psp_v13_0_4_set_psp_funcs(psp);
251 psp->boot_time_tmr = false;
252 break;
253 case IP_VERSION(14, 0, 2):
254 case IP_VERSION(14, 0, 3):
255 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
256 psp_v14_0_set_psp_funcs(psp);
257 break;
258 case IP_VERSION(14, 0, 5):
259 psp_v14_0_set_psp_funcs(psp);
260 psp->boot_time_tmr = false;
261 break;
262 default:
263 return -EINVAL;
264 }
265
266 psp->adev = adev;
267
268 adev->psp_timeout = 20000;
269
270 psp_check_pmfw_centralized_cstate_management(psp);
271
272 if (amdgpu_sriov_vf(adev))
273 return psp_init_sriov_microcode(psp);
274 else
275 return psp_init_microcode(psp);
276 }
277
psp_ta_free_shared_buf(struct ta_mem_context * mem_ctx)278 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
279 {
280 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
281 &mem_ctx->shared_buf);
282 mem_ctx->shared_bo = NULL;
283 }
284
psp_free_shared_bufs(struct psp_context * psp)285 static void psp_free_shared_bufs(struct psp_context *psp)
286 {
287 void *tmr_buf;
288 void **pptr;
289
290 /* free TMR memory buffer */
291 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
292 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
293 psp->tmr_bo = NULL;
294
295 /* free xgmi shared memory */
296 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
297
298 /* free ras shared memory */
299 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
300
301 /* free hdcp shared memory */
302 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
303
304 /* free dtm shared memory */
305 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
306
307 /* free rap shared memory */
308 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
309
310 /* free securedisplay shared memory */
311 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
312
313
314 }
315
psp_memory_training_fini(struct psp_context * psp)316 static void psp_memory_training_fini(struct psp_context *psp)
317 {
318 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
319
320 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
321 kfree(ctx->sys_cache);
322 ctx->sys_cache = NULL;
323 }
324
psp_memory_training_init(struct psp_context * psp)325 static int psp_memory_training_init(struct psp_context *psp)
326 {
327 int ret;
328 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
329
330 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
331 dev_dbg(psp->adev->dev, "memory training is not supported!\n");
332 return 0;
333 }
334
335 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
336 if (ctx->sys_cache == NULL) {
337 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
338 ret = -ENOMEM;
339 goto Err_out;
340 }
341
342 dev_dbg(psp->adev->dev,
343 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
344 ctx->train_data_size,
345 ctx->p2c_train_data_offset,
346 ctx->c2p_train_data_offset);
347 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
348 return 0;
349
350 Err_out:
351 psp_memory_training_fini(psp);
352 return ret;
353 }
354
355 /*
356 * Helper funciton to query psp runtime database entry
357 *
358 * @adev: amdgpu_device pointer
359 * @entry_type: the type of psp runtime database entry
360 * @db_entry: runtime database entry pointer
361 *
362 * Return false if runtime database doesn't exit or entry is invalid
363 * or true if the specific database entry is found, and copy to @db_entry
364 */
psp_get_runtime_db_entry(struct amdgpu_device * adev,enum psp_runtime_entry_type entry_type,void * db_entry)365 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
366 enum psp_runtime_entry_type entry_type,
367 void *db_entry)
368 {
369 uint64_t db_header_pos, db_dir_pos;
370 struct psp_runtime_data_header db_header = {0};
371 struct psp_runtime_data_directory db_dir = {0};
372 bool ret = false;
373 int i;
374
375 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
376 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
377 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
378 return false;
379
380 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
381 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
382
383 /* read runtime db header from vram */
384 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
385 sizeof(struct psp_runtime_data_header), false);
386
387 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
388 /* runtime db doesn't exist, exit */
389 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
390 return false;
391 }
392
393 /* read runtime database entry from vram */
394 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
395 sizeof(struct psp_runtime_data_directory), false);
396
397 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
398 /* invalid db entry count, exit */
399 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
400 return false;
401 }
402
403 /* look up for requested entry type */
404 for (i = 0; i < db_dir.entry_count && !ret; i++) {
405 if (db_dir.entry_list[i].entry_type == entry_type) {
406 switch (entry_type) {
407 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
408 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
409 /* invalid db entry size */
410 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
411 return false;
412 }
413 /* read runtime database entry */
414 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
415 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
416 ret = true;
417 break;
418 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
419 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
420 /* invalid db entry size */
421 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
422 return false;
423 }
424 /* read runtime database entry */
425 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
426 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
427 ret = true;
428 break;
429 default:
430 ret = false;
431 break;
432 }
433 }
434 }
435
436 return ret;
437 }
438
psp_sw_init(struct amdgpu_ip_block * ip_block)439 static int psp_sw_init(struct amdgpu_ip_block *ip_block)
440 {
441 struct amdgpu_device *adev = ip_block->adev;
442 struct psp_context *psp = &adev->psp;
443 int ret;
444 struct psp_runtime_boot_cfg_entry boot_cfg_entry;
445 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
446 struct psp_runtime_scpm_entry scpm_entry;
447
448 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
449 if (!psp->cmd) {
450 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
451 ret = -ENOMEM;
452 }
453
454 adev->psp.xgmi_context.supports_extended_data =
455 !adev->gmc.xgmi.connected_to_cpu &&
456 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
457
458 memset(&scpm_entry, 0, sizeof(scpm_entry));
459 if ((psp_get_runtime_db_entry(adev,
460 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
461 &scpm_entry)) &&
462 (scpm_entry.scpm_status != SCPM_DISABLE)) {
463 adev->scpm_enabled = true;
464 adev->scpm_status = scpm_entry.scpm_status;
465 } else {
466 adev->scpm_enabled = false;
467 adev->scpm_status = SCPM_DISABLE;
468 }
469
470 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
471
472 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
473 if (psp_get_runtime_db_entry(adev,
474 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
475 &boot_cfg_entry)) {
476 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
477 if ((psp->boot_cfg_bitmask) &
478 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
479 /* If psp runtime database exists, then
480 * only enable two stage memory training
481 * when TWO_STAGE_DRAM_TRAINING bit is set
482 * in runtime database
483 */
484 mem_training_ctx->enable_mem_training = true;
485 }
486
487 } else {
488 /* If psp runtime database doesn't exist or is
489 * invalid, force enable two stage memory training
490 */
491 mem_training_ctx->enable_mem_training = true;
492 }
493
494 if (mem_training_ctx->enable_mem_training) {
495 ret = psp_memory_training_init(psp);
496 if (ret) {
497 dev_err(adev->dev, "Failed to initialize memory training!\n");
498 return ret;
499 }
500
501 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
502 if (ret) {
503 dev_err(adev->dev, "Failed to process memory training!\n");
504 return ret;
505 }
506 }
507
508 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
509 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
510 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
511 &psp->fw_pri_bo,
512 &psp->fw_pri_mc_addr,
513 &psp->fw_pri_buf);
514 if (ret)
515 return ret;
516
517 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
518 AMDGPU_GEM_DOMAIN_VRAM |
519 AMDGPU_GEM_DOMAIN_GTT,
520 &psp->fence_buf_bo,
521 &psp->fence_buf_mc_addr,
522 &psp->fence_buf);
523 if (ret)
524 goto failed1;
525
526 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
527 AMDGPU_GEM_DOMAIN_VRAM |
528 AMDGPU_GEM_DOMAIN_GTT,
529 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
530 (void **)&psp->cmd_buf_mem);
531 if (ret)
532 goto failed2;
533
534 return 0;
535
536 failed2:
537 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
538 &psp->fence_buf_mc_addr, &psp->fence_buf);
539 failed1:
540 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
541 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
542 return ret;
543 }
544
psp_sw_fini(struct amdgpu_ip_block * ip_block)545 static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
546 {
547 struct amdgpu_device *adev = ip_block->adev;
548 struct psp_context *psp = &adev->psp;
549
550 psp_memory_training_fini(psp);
551
552 amdgpu_ucode_release(&psp->sos_fw);
553 amdgpu_ucode_release(&psp->asd_fw);
554 amdgpu_ucode_release(&psp->ta_fw);
555 amdgpu_ucode_release(&psp->cap_fw);
556 amdgpu_ucode_release(&psp->toc_fw);
557
558 kfree(psp->cmd);
559 psp->cmd = NULL;
560
561 psp_free_shared_bufs(psp);
562
563 if (psp->km_ring.ring_mem)
564 amdgpu_bo_free_kernel(&adev->firmware.rbuf,
565 &psp->km_ring.ring_mem_mc_addr,
566 (void **)&psp->km_ring.ring_mem);
567
568 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
569 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
570 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
571 &psp->fence_buf_mc_addr, &psp->fence_buf);
572 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
573 (void **)&psp->cmd_buf_mem);
574
575 return 0;
576 }
577
psp_wait_for(struct psp_context * psp,uint32_t reg_index,uint32_t reg_val,uint32_t mask,uint32_t flags)578 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val,
579 uint32_t mask, uint32_t flags)
580 {
581 bool check_changed = flags & PSP_WAITREG_CHANGED;
582 bool verbose = !(flags & PSP_WAITREG_NOVERBOSE);
583 uint32_t val;
584 int i;
585 struct amdgpu_device *adev = psp->adev;
586
587 if (psp->adev->no_hw_access)
588 return 0;
589
590 for (i = 0; i < adev->usec_timeout; i++) {
591 val = RREG32(reg_index);
592 if (check_changed) {
593 if (val != reg_val)
594 return 0;
595 } else {
596 if ((val & mask) == reg_val)
597 return 0;
598 }
599 udelay(1);
600 }
601
602 if (verbose)
603 dev_err(adev->dev,
604 "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x",
605 reg_index, mask, val, reg_val);
606
607 return -ETIME;
608 }
609
psp_wait_for_spirom_update(struct psp_context * psp,uint32_t reg_index,uint32_t reg_val,uint32_t mask,uint32_t msec_timeout)610 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
611 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
612 {
613 uint32_t val;
614 int i;
615 struct amdgpu_device *adev = psp->adev;
616
617 if (psp->adev->no_hw_access)
618 return 0;
619
620 for (i = 0; i < msec_timeout; i++) {
621 val = RREG32(reg_index);
622 if ((val & mask) == reg_val)
623 return 0;
624 msleep(1);
625 }
626
627 return -ETIME;
628 }
629
psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)630 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
631 {
632 switch (cmd_id) {
633 case GFX_CMD_ID_LOAD_TA:
634 return "LOAD_TA";
635 case GFX_CMD_ID_UNLOAD_TA:
636 return "UNLOAD_TA";
637 case GFX_CMD_ID_INVOKE_CMD:
638 return "INVOKE_CMD";
639 case GFX_CMD_ID_LOAD_ASD:
640 return "LOAD_ASD";
641 case GFX_CMD_ID_SETUP_TMR:
642 return "SETUP_TMR";
643 case GFX_CMD_ID_LOAD_IP_FW:
644 return "LOAD_IP_FW";
645 case GFX_CMD_ID_DESTROY_TMR:
646 return "DESTROY_TMR";
647 case GFX_CMD_ID_SAVE_RESTORE:
648 return "SAVE_RESTORE_IP_FW";
649 case GFX_CMD_ID_SETUP_VMR:
650 return "SETUP_VMR";
651 case GFX_CMD_ID_DESTROY_VMR:
652 return "DESTROY_VMR";
653 case GFX_CMD_ID_PROG_REG:
654 return "PROG_REG";
655 case GFX_CMD_ID_GET_FW_ATTESTATION:
656 return "GET_FW_ATTESTATION";
657 case GFX_CMD_ID_LOAD_TOC:
658 return "ID_LOAD_TOC";
659 case GFX_CMD_ID_AUTOLOAD_RLC:
660 return "AUTOLOAD_RLC";
661 case GFX_CMD_ID_BOOT_CFG:
662 return "BOOT_CFG";
663 case GFX_CMD_ID_CONFIG_SQ_PERFMON:
664 return "CONFIG_SQ_PERFMON";
665 case GFX_CMD_ID_FB_FW_RESERV_ADDR:
666 return "FB_FW_RESERV_ADDR";
667 case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
668 return "FB_FW_RESERV_EXT_ADDR";
669 default:
670 return "UNKNOWN CMD";
671 }
672 }
673
psp_err_warn(struct psp_context * psp)674 static bool psp_err_warn(struct psp_context *psp)
675 {
676 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
677
678 /* This response indicates reg list is already loaded */
679 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
680 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
681 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
682 cmd->resp.status == TEE_ERROR_CANCEL)
683 return false;
684
685 return true;
686 }
687
688 static int
psp_cmd_submit_buf(struct psp_context * psp,struct amdgpu_firmware_info * ucode,struct psp_gfx_cmd_resp * cmd,uint64_t fence_mc_addr)689 psp_cmd_submit_buf(struct psp_context *psp,
690 struct amdgpu_firmware_info *ucode,
691 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
692 {
693 int ret;
694 int index;
695 int timeout = psp->adev->psp_timeout;
696 bool ras_intr = false;
697 bool skip_unsupport = false;
698
699 if (psp->adev->no_hw_access)
700 return 0;
701
702 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
703
704 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
705
706 index = atomic_inc_return(&psp->fence_value);
707 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
708 if (ret) {
709 atomic_dec(&psp->fence_value);
710 goto exit;
711 }
712
713 amdgpu_device_invalidate_hdp(psp->adev, NULL);
714 while (*((unsigned int *)psp->fence_buf) != index) {
715 if (--timeout == 0)
716 break;
717 /*
718 * Shouldn't wait for timeout when err_event_athub occurs,
719 * because gpu reset thread triggered and lock resource should
720 * be released for psp resume sequence.
721 */
722 ras_intr = amdgpu_ras_intr_triggered();
723 if (ras_intr)
724 break;
725 usleep_range(10, 100);
726 amdgpu_device_invalidate_hdp(psp->adev, NULL);
727 }
728
729 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
730 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
731 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
732
733 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
734
735 /* In some cases, psp response status is not 0 even there is no
736 * problem while the command is submitted. Some version of PSP FW
737 * doesn't write 0 to that field.
738 * So here we would like to only print a warning instead of an error
739 * during psp initialization to avoid breaking hw_init and it doesn't
740 * return -EINVAL.
741 */
742 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
743 if (ucode)
744 dev_warn(psp->adev->dev,
745 "failed to load ucode %s(0x%X) ",
746 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
747 if (psp_err_warn(psp))
748 dev_warn(
749 psp->adev->dev,
750 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
751 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
752 psp->cmd_buf_mem->cmd_id,
753 psp->cmd_buf_mem->resp.status);
754 /* If any firmware (including CAP) load fails under SRIOV, it should
755 * return failure to stop the VF from initializing.
756 * Also return failure in case of timeout
757 */
758 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
759 ret = -EINVAL;
760 goto exit;
761 }
762 }
763
764 if (ucode) {
765 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
766 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
767 }
768
769 exit:
770 return ret;
771 }
772
acquire_psp_cmd_buf(struct psp_context * psp)773 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
774 {
775 struct psp_gfx_cmd_resp *cmd = psp->cmd;
776
777 mutex_lock(&psp->mutex);
778
779 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
780
781 return cmd;
782 }
783
release_psp_cmd_buf(struct psp_context * psp)784 static void release_psp_cmd_buf(struct psp_context *psp)
785 {
786 mutex_unlock(&psp->mutex);
787 }
788
psp_prep_tmr_cmd_buf(struct psp_context * psp,struct psp_gfx_cmd_resp * cmd,uint64_t tmr_mc,struct amdgpu_bo * tmr_bo)789 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
790 struct psp_gfx_cmd_resp *cmd,
791 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
792 {
793 struct amdgpu_device *adev = psp->adev;
794 uint32_t size = 0;
795 uint64_t tmr_pa = 0;
796
797 if (tmr_bo) {
798 size = amdgpu_bo_size(tmr_bo);
799 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
800 }
801
802 if (amdgpu_sriov_vf(psp->adev))
803 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
804 else
805 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
806 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
807 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
808 cmd->cmd.cmd_setup_tmr.buf_size = size;
809 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
810 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
811 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
812 }
813
psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint64_t pri_buf_mc,uint32_t size)814 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
815 uint64_t pri_buf_mc, uint32_t size)
816 {
817 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
818 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
819 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
820 cmd->cmd.cmd_load_toc.toc_size = size;
821 }
822
823 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
psp_load_toc(struct psp_context * psp,uint32_t * tmr_size)824 static int psp_load_toc(struct psp_context *psp,
825 uint32_t *tmr_size)
826 {
827 int ret;
828 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
829
830 /* Copy toc to psp firmware private buffer */
831 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
832
833 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
834
835 ret = psp_cmd_submit_buf(psp, NULL, cmd,
836 psp->fence_buf_mc_addr);
837 if (!ret)
838 *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
839
840 release_psp_cmd_buf(psp);
841
842 return ret;
843 }
844
845 /* Set up Trusted Memory Region */
psp_tmr_init(struct psp_context * psp)846 static int psp_tmr_init(struct psp_context *psp)
847 {
848 int ret = 0;
849 int tmr_size;
850 void *tmr_buf;
851 void **pptr;
852
853 /*
854 * According to HW engineer, they prefer the TMR address be "naturally
855 * aligned" , e.g. the start address be an integer divide of TMR size.
856 *
857 * Note: this memory need be reserved till the driver
858 * uninitializes.
859 */
860 tmr_size = PSP_TMR_SIZE(psp->adev);
861
862 /* For ASICs support RLC autoload, psp will parse the toc
863 * and calculate the total size of TMR needed
864 */
865 if (!amdgpu_sriov_vf(psp->adev) &&
866 psp->toc.start_addr &&
867 psp->toc.size_bytes &&
868 psp->fw_pri_buf) {
869 ret = psp_load_toc(psp, &tmr_size);
870 if (ret) {
871 dev_err(psp->adev->dev, "Failed to load toc\n");
872 return ret;
873 }
874 }
875
876 if (!psp->tmr_bo && !psp->boot_time_tmr) {
877 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
878 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
879 PSP_TMR_ALIGNMENT,
880 AMDGPU_HAS_VRAM(psp->adev) ?
881 AMDGPU_GEM_DOMAIN_VRAM :
882 AMDGPU_GEM_DOMAIN_GTT,
883 &psp->tmr_bo, &psp->tmr_mc_addr,
884 pptr);
885 }
886 if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
887 psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
888
889 return ret;
890 }
891
psp_skip_tmr(struct psp_context * psp)892 static bool psp_skip_tmr(struct psp_context *psp)
893 {
894 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
895 case IP_VERSION(11, 0, 9):
896 case IP_VERSION(11, 0, 7):
897 case IP_VERSION(13, 0, 2):
898 case IP_VERSION(13, 0, 6):
899 case IP_VERSION(13, 0, 10):
900 case IP_VERSION(13, 0, 12):
901 case IP_VERSION(13, 0, 14):
902 return true;
903 default:
904 return false;
905 }
906 }
907
psp_tmr_load(struct psp_context * psp)908 static int psp_tmr_load(struct psp_context *psp)
909 {
910 int ret;
911 struct psp_gfx_cmd_resp *cmd;
912
913 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
914 * Already set up by host driver.
915 */
916 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
917 return 0;
918
919 cmd = acquire_psp_cmd_buf(psp);
920
921 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
922 if (psp->tmr_bo)
923 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
924 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
925
926 ret = psp_cmd_submit_buf(psp, NULL, cmd,
927 psp->fence_buf_mc_addr);
928
929 release_psp_cmd_buf(psp);
930
931 return ret;
932 }
933
psp_prep_tmr_unload_cmd_buf(struct psp_context * psp,struct psp_gfx_cmd_resp * cmd)934 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
935 struct psp_gfx_cmd_resp *cmd)
936 {
937 if (amdgpu_sriov_vf(psp->adev))
938 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
939 else
940 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
941 }
942
psp_tmr_unload(struct psp_context * psp)943 static int psp_tmr_unload(struct psp_context *psp)
944 {
945 int ret;
946 struct psp_gfx_cmd_resp *cmd;
947
948 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
949 * as TMR is not loaded at all
950 */
951 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
952 return 0;
953
954 cmd = acquire_psp_cmd_buf(psp);
955
956 psp_prep_tmr_unload_cmd_buf(psp, cmd);
957 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
958
959 ret = psp_cmd_submit_buf(psp, NULL, cmd,
960 psp->fence_buf_mc_addr);
961
962 release_psp_cmd_buf(psp);
963
964 return ret;
965 }
966
psp_tmr_terminate(struct psp_context * psp)967 static int psp_tmr_terminate(struct psp_context *psp)
968 {
969 return psp_tmr_unload(psp);
970 }
971
psp_get_fw_attestation_records_addr(struct psp_context * psp,uint64_t * output_ptr)972 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
973 uint64_t *output_ptr)
974 {
975 int ret;
976 struct psp_gfx_cmd_resp *cmd;
977
978 if (!output_ptr)
979 return -EINVAL;
980
981 if (amdgpu_sriov_vf(psp->adev))
982 return 0;
983
984 cmd = acquire_psp_cmd_buf(psp);
985
986 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
987
988 ret = psp_cmd_submit_buf(psp, NULL, cmd,
989 psp->fence_buf_mc_addr);
990
991 if (!ret) {
992 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
993 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
994 }
995
996 release_psp_cmd_buf(psp);
997
998 return ret;
999 }
1000
psp_get_fw_reservation_info(struct psp_context * psp,uint32_t cmd_id,uint64_t * addr,uint32_t * size)1001 static int psp_get_fw_reservation_info(struct psp_context *psp,
1002 uint32_t cmd_id,
1003 uint64_t *addr,
1004 uint32_t *size)
1005 {
1006 int ret;
1007 uint32_t status;
1008 struct psp_gfx_cmd_resp *cmd;
1009
1010 cmd = acquire_psp_cmd_buf(psp);
1011
1012 cmd->cmd_id = cmd_id;
1013
1014 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1015 psp->fence_buf_mc_addr);
1016 if (ret) {
1017 release_psp_cmd_buf(psp);
1018 return ret;
1019 }
1020
1021 status = cmd->resp.status;
1022 if (status == PSP_ERR_UNKNOWN_COMMAND) {
1023 release_psp_cmd_buf(psp);
1024 *addr = 0;
1025 *size = 0;
1026 return 0;
1027 }
1028
1029 *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 |
1030 cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo;
1031 *size = cmd->resp.uresp.fw_reserve_info.reserve_size;
1032
1033 release_psp_cmd_buf(psp);
1034
1035 return 0;
1036 }
1037
psp_update_fw_reservation(struct psp_context * psp)1038 int psp_update_fw_reservation(struct psp_context *psp)
1039 {
1040 int ret;
1041 uint64_t reserv_addr, reserv_addr_ext;
1042 uint32_t reserv_size, reserv_size_ext;
1043 struct amdgpu_device *adev = psp->adev;
1044
1045 if (amdgpu_sriov_vf(psp->adev))
1046 return 0;
1047
1048 if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 2)) &&
1049 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 3)))
1050 return 0;
1051
1052 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
1053 if (ret)
1054 return ret;
1055 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
1056 if (ret)
1057 return ret;
1058
1059 if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
1060 dev_warn(adev->dev, "reserve fw region is not valid!\n");
1061 return 0;
1062 }
1063
1064 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
1065
1066 reserv_size = roundup(reserv_size, SZ_1M);
1067
1068 ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL);
1069 if (ret) {
1070 dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret);
1071 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
1072 return ret;
1073 }
1074
1075 reserv_size_ext = roundup(reserv_size_ext, SZ_1M);
1076
1077 ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext,
1078 &adev->mman.fw_reserved_memory_extend, NULL);
1079 if (ret) {
1080 dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret);
1081 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL);
1082 return ret;
1083 }
1084
1085 return 0;
1086 }
1087
psp_boot_config_get(struct amdgpu_device * adev,uint32_t * boot_cfg)1088 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
1089 {
1090 struct psp_context *psp = &adev->psp;
1091 struct psp_gfx_cmd_resp *cmd;
1092 int ret;
1093
1094 if (amdgpu_sriov_vf(adev))
1095 return 0;
1096
1097 cmd = acquire_psp_cmd_buf(psp);
1098
1099 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1100 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
1101
1102 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1103 if (!ret) {
1104 *boot_cfg =
1105 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
1106 }
1107
1108 release_psp_cmd_buf(psp);
1109
1110 return ret;
1111 }
1112
psp_boot_config_set(struct amdgpu_device * adev,uint32_t boot_cfg)1113 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
1114 {
1115 int ret;
1116 struct psp_context *psp = &adev->psp;
1117 struct psp_gfx_cmd_resp *cmd;
1118
1119 if (amdgpu_sriov_vf(adev))
1120 return 0;
1121
1122 cmd = acquire_psp_cmd_buf(psp);
1123
1124 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1125 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1126 cmd->cmd.boot_cfg.boot_config = boot_cfg;
1127 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1128
1129 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1130
1131 release_psp_cmd_buf(psp);
1132
1133 return ret;
1134 }
1135
psp_rl_load(struct amdgpu_device * adev)1136 static int psp_rl_load(struct amdgpu_device *adev)
1137 {
1138 int ret;
1139 struct psp_context *psp = &adev->psp;
1140 struct psp_gfx_cmd_resp *cmd;
1141
1142 if (!is_psp_fw_valid(psp->rl))
1143 return 0;
1144
1145 cmd = acquire_psp_cmd_buf(psp);
1146
1147 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1148 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1149
1150 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1151 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1152 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1153 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1154 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1155
1156 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1157
1158 release_psp_cmd_buf(psp);
1159
1160 return ret;
1161 }
1162
psp_memory_partition(struct psp_context * psp,int mode)1163 int psp_memory_partition(struct psp_context *psp, int mode)
1164 {
1165 struct psp_gfx_cmd_resp *cmd;
1166 int ret;
1167
1168 if (amdgpu_sriov_vf(psp->adev))
1169 return 0;
1170
1171 cmd = acquire_psp_cmd_buf(psp);
1172
1173 cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
1174 cmd->cmd.cmd_memory_part.mode = mode;
1175
1176 dev_info(psp->adev->dev,
1177 "Requesting %d memory partition change through PSP", mode);
1178 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1179 if (ret)
1180 dev_err(psp->adev->dev,
1181 "PSP request failed to change to NPS%d mode\n", mode);
1182
1183 release_psp_cmd_buf(psp);
1184
1185 return ret;
1186 }
1187
psp_spatial_partition(struct psp_context * psp,int mode)1188 int psp_spatial_partition(struct psp_context *psp, int mode)
1189 {
1190 struct psp_gfx_cmd_resp *cmd;
1191 int ret;
1192
1193 if (amdgpu_sriov_vf(psp->adev))
1194 return 0;
1195
1196 cmd = acquire_psp_cmd_buf(psp);
1197
1198 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1199 cmd->cmd.cmd_spatial_part.mode = mode;
1200
1201 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1202 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1203
1204 release_psp_cmd_buf(psp);
1205
1206 return ret;
1207 }
1208
psp_asd_initialize(struct psp_context * psp)1209 static int psp_asd_initialize(struct psp_context *psp)
1210 {
1211 int ret;
1212
1213 /* If PSP version doesn't match ASD version, asd loading will be failed.
1214 * add workaround to bypass it for sriov now.
1215 * TODO: add version check to make it common
1216 */
1217 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1218 return 0;
1219
1220 /* bypass asd if display hardware is not available */
1221 if (!amdgpu_device_has_display_hardware(psp->adev) &&
1222 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1223 return 0;
1224
1225 psp->asd_context.mem_context.shared_mc_addr = 0;
1226 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1227 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
1228
1229 ret = psp_ta_load(psp, &psp->asd_context);
1230 if (!ret)
1231 psp->asd_context.initialized = true;
1232
1233 return ret;
1234 }
1235
psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t session_id)1236 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1237 uint32_t session_id)
1238 {
1239 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1240 cmd->cmd.cmd_unload_ta.session_id = session_id;
1241 }
1242
psp_ta_unload(struct psp_context * psp,struct ta_context * context)1243 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1244 {
1245 int ret;
1246 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1247
1248 psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1249
1250 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1251
1252 context->resp_status = cmd->resp.status;
1253
1254 release_psp_cmd_buf(psp);
1255
1256 return ret;
1257 }
1258
psp_asd_terminate(struct psp_context * psp)1259 static int psp_asd_terminate(struct psp_context *psp)
1260 {
1261 int ret;
1262
1263 if (amdgpu_sriov_vf(psp->adev))
1264 return 0;
1265
1266 if (!psp->asd_context.initialized)
1267 return 0;
1268
1269 ret = psp_ta_unload(psp, &psp->asd_context);
1270 if (!ret)
1271 psp->asd_context.initialized = false;
1272
1273 return ret;
1274 }
1275
psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t id,uint32_t value)1276 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1277 uint32_t id, uint32_t value)
1278 {
1279 cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1280 cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1281 cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1282 }
1283
psp_reg_program(struct psp_context * psp,enum psp_reg_prog_id reg,uint32_t value)1284 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1285 uint32_t value)
1286 {
1287 struct psp_gfx_cmd_resp *cmd;
1288 int ret = 0;
1289
1290 if (reg >= PSP_REG_LAST)
1291 return -EINVAL;
1292
1293 cmd = acquire_psp_cmd_buf(psp);
1294
1295 psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1296 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1297 if (ret)
1298 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1299
1300 release_psp_cmd_buf(psp);
1301
1302 return ret;
1303 }
1304
psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint64_t ta_bin_mc,struct ta_context * context)1305 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1306 uint64_t ta_bin_mc,
1307 struct ta_context *context)
1308 {
1309 cmd->cmd_id = context->ta_load_type;
1310 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
1311 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
1312 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
1313
1314 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1315 lower_32_bits(context->mem_context.shared_mc_addr);
1316 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1317 upper_32_bits(context->mem_context.shared_mc_addr);
1318 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1319 }
1320
psp_ta_init_shared_buf(struct psp_context * psp,struct ta_mem_context * mem_ctx)1321 int psp_ta_init_shared_buf(struct psp_context *psp,
1322 struct ta_mem_context *mem_ctx)
1323 {
1324 /*
1325 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1326 * physical) for ta to host memory
1327 */
1328 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1329 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1330 AMDGPU_GEM_DOMAIN_GTT,
1331 &mem_ctx->shared_bo,
1332 &mem_ctx->shared_mc_addr,
1333 &mem_ctx->shared_buf);
1334 }
1335
psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t ta_cmd_id,uint32_t session_id)1336 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1337 uint32_t ta_cmd_id,
1338 uint32_t session_id)
1339 {
1340 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
1341 cmd->cmd.cmd_invoke_cmd.session_id = session_id;
1342 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
1343 }
1344
psp_ta_invoke(struct psp_context * psp,uint32_t ta_cmd_id,struct ta_context * context)1345 int psp_ta_invoke(struct psp_context *psp,
1346 uint32_t ta_cmd_id,
1347 struct ta_context *context)
1348 {
1349 int ret;
1350 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1351
1352 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1353
1354 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1355 psp->fence_buf_mc_addr);
1356
1357 context->resp_status = cmd->resp.status;
1358
1359 release_psp_cmd_buf(psp);
1360
1361 return ret;
1362 }
1363
psp_ta_load(struct psp_context * psp,struct ta_context * context)1364 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1365 {
1366 int ret;
1367 struct psp_gfx_cmd_resp *cmd;
1368
1369 cmd = acquire_psp_cmd_buf(psp);
1370
1371 psp_copy_fw(psp, context->bin_desc.start_addr,
1372 context->bin_desc.size_bytes);
1373
1374 if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
1375 context->mem_context.shared_bo)
1376 context->mem_context.shared_mc_addr =
1377 amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
1378
1379 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1380
1381 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1382 psp->fence_buf_mc_addr);
1383
1384 context->resp_status = cmd->resp.status;
1385
1386 if (!ret)
1387 context->session_id = cmd->resp.session_id;
1388
1389 release_psp_cmd_buf(psp);
1390
1391 return ret;
1392 }
1393
psp_xgmi_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1394 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1395 {
1396 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1397 }
1398
psp_xgmi_terminate(struct psp_context * psp)1399 int psp_xgmi_terminate(struct psp_context *psp)
1400 {
1401 int ret;
1402 struct amdgpu_device *adev = psp->adev;
1403
1404 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1405 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1406 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1407 adev->gmc.xgmi.connected_to_cpu))
1408 return 0;
1409
1410 if (!psp->xgmi_context.context.initialized)
1411 return 0;
1412
1413 ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1414
1415 psp->xgmi_context.context.initialized = false;
1416
1417 return ret;
1418 }
1419
psp_xgmi_initialize(struct psp_context * psp,bool set_extended_data,bool load_ta)1420 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1421 {
1422 struct ta_xgmi_shared_memory *xgmi_cmd;
1423 int ret;
1424
1425 if (!psp->ta_fw ||
1426 !psp->xgmi_context.context.bin_desc.size_bytes ||
1427 !psp->xgmi_context.context.bin_desc.start_addr)
1428 return -ENOENT;
1429
1430 if (!load_ta)
1431 goto invoke;
1432
1433 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1434 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1435
1436 if (!psp->xgmi_context.context.mem_context.shared_buf) {
1437 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1438 if (ret)
1439 return ret;
1440 }
1441
1442 /* Load XGMI TA */
1443 ret = psp_ta_load(psp, &psp->xgmi_context.context);
1444 if (!ret)
1445 psp->xgmi_context.context.initialized = true;
1446 else
1447 return ret;
1448
1449 invoke:
1450 /* Initialize XGMI session */
1451 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1452 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1453 xgmi_cmd->flag_extend_link_record = set_extended_data;
1454 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1455
1456 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1457 /* note down the capbility flag for XGMI TA */
1458 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1459
1460 return ret;
1461 }
1462
psp_xgmi_get_hive_id(struct psp_context * psp,uint64_t * hive_id)1463 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1464 {
1465 struct ta_xgmi_shared_memory *xgmi_cmd;
1466 int ret;
1467
1468 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1469 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1470
1471 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1472
1473 /* Invoke xgmi ta to get hive id */
1474 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1475 if (ret)
1476 return ret;
1477
1478 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1479
1480 return 0;
1481 }
1482
psp_xgmi_get_node_id(struct psp_context * psp,uint64_t * node_id)1483 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1484 {
1485 struct ta_xgmi_shared_memory *xgmi_cmd;
1486 int ret;
1487
1488 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1489 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1490
1491 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1492
1493 /* Invoke xgmi ta to get the node id */
1494 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1495 if (ret)
1496 return ret;
1497
1498 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1499
1500 return 0;
1501 }
1502
psp_xgmi_peer_link_info_supported(struct psp_context * psp)1503 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1504 {
1505 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1506 IP_VERSION(13, 0, 2) &&
1507 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1508 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1509 IP_VERSION(13, 0, 6);
1510 }
1511
1512 /*
1513 * Chips that support extended topology information require the driver to
1514 * reflect topology information in the opposite direction. This is
1515 * because the TA has already exceeded its link record limit and if the
1516 * TA holds bi-directional information, the driver would have to do
1517 * multiple fetches instead of just two.
1518 */
psp_xgmi_reflect_topology_info(struct psp_context * psp,struct psp_xgmi_node_info node_info)1519 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1520 struct psp_xgmi_node_info node_info)
1521 {
1522 struct amdgpu_device *mirror_adev;
1523 struct amdgpu_hive_info *hive;
1524 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1525 uint64_t dst_node_id = node_info.node_id;
1526 uint8_t dst_num_hops = node_info.num_hops;
1527 uint8_t dst_num_links = node_info.num_links;
1528
1529 hive = amdgpu_get_xgmi_hive(psp->adev);
1530 if (WARN_ON(!hive))
1531 return;
1532
1533 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1534 struct psp_xgmi_topology_info *mirror_top_info;
1535 int j;
1536
1537 if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1538 continue;
1539
1540 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1541 for (j = 0; j < mirror_top_info->num_nodes; j++) {
1542 if (mirror_top_info->nodes[j].node_id != src_node_id)
1543 continue;
1544
1545 mirror_top_info->nodes[j].num_hops = dst_num_hops;
1546 /*
1547 * prevent 0 num_links value re-reflection since reflection
1548 * criteria is based on num_hops (direct or indirect).
1549 *
1550 */
1551 if (dst_num_links)
1552 mirror_top_info->nodes[j].num_links = dst_num_links;
1553
1554 break;
1555 }
1556
1557 break;
1558 }
1559
1560 amdgpu_put_xgmi_hive(hive);
1561 }
1562
psp_xgmi_get_topology_info(struct psp_context * psp,int number_devices,struct psp_xgmi_topology_info * topology,bool get_extended_data)1563 int psp_xgmi_get_topology_info(struct psp_context *psp,
1564 int number_devices,
1565 struct psp_xgmi_topology_info *topology,
1566 bool get_extended_data)
1567 {
1568 struct ta_xgmi_shared_memory *xgmi_cmd;
1569 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1570 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1571 int i;
1572 int ret;
1573
1574 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1575 return -EINVAL;
1576
1577 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1578 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1579 xgmi_cmd->flag_extend_link_record = get_extended_data;
1580
1581 /* Fill in the shared memory with topology information as input */
1582 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1583 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1584 topology_info_input->num_nodes = number_devices;
1585
1586 for (i = 0; i < topology_info_input->num_nodes; i++) {
1587 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1588 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1589 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1590 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1591 }
1592
1593 /* Invoke xgmi ta to get the topology information */
1594 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1595 if (ret)
1596 return ret;
1597
1598 /* Read the output topology information from the shared memory */
1599 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1600 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1601 for (i = 0; i < topology->num_nodes; i++) {
1602 /* extended data will either be 0 or equal to non-extended data */
1603 if (topology_info_output->nodes[i].num_hops)
1604 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1605
1606 /* non-extended data gets everything here so no need to update */
1607 if (!get_extended_data) {
1608 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1609 topology->nodes[i].is_sharing_enabled =
1610 topology_info_output->nodes[i].is_sharing_enabled;
1611 topology->nodes[i].sdma_engine =
1612 topology_info_output->nodes[i].sdma_engine;
1613 }
1614
1615 }
1616
1617 /* Invoke xgmi ta again to get the link information */
1618 if (psp_xgmi_peer_link_info_supported(psp)) {
1619 struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1620 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1621 bool requires_reflection =
1622 (psp->xgmi_context.supports_extended_data &&
1623 get_extended_data) ||
1624 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1625 IP_VERSION(13, 0, 6) ||
1626 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1627 IP_VERSION(13, 0, 14);
1628 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1629 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1630
1631 /* popluate the shared output buffer rather than the cmd input buffer
1632 * with node_ids as the input for GET_PEER_LINKS command execution.
1633 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1634 * The same requirement for GET_EXTEND_PEER_LINKS command.
1635 */
1636 if (ta_port_num_support) {
1637 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1638
1639 for (i = 0; i < topology->num_nodes; i++)
1640 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1641
1642 link_extend_info_output->num_nodes = topology->num_nodes;
1643 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1644 } else {
1645 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1646
1647 for (i = 0; i < topology->num_nodes; i++)
1648 link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1649
1650 link_info_output->num_nodes = topology->num_nodes;
1651 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1652 }
1653
1654 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1655 if (ret)
1656 return ret;
1657
1658 for (i = 0; i < topology->num_nodes; i++) {
1659 uint8_t node_num_links = ta_port_num_support ?
1660 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1661 /* accumulate num_links on extended data */
1662 if (get_extended_data) {
1663 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1664 } else {
1665 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1666 topology->nodes[i].num_links : node_num_links;
1667 }
1668 /* popluate the connected port num info if supported and available */
1669 if (ta_port_num_support && topology->nodes[i].num_links) {
1670 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1671 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1672 }
1673
1674 /* reflect the topology information for bi-directionality */
1675 if (requires_reflection && topology->nodes[i].num_hops)
1676 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1677 }
1678 }
1679
1680 return 0;
1681 }
1682
psp_xgmi_set_topology_info(struct psp_context * psp,int number_devices,struct psp_xgmi_topology_info * topology)1683 int psp_xgmi_set_topology_info(struct psp_context *psp,
1684 int number_devices,
1685 struct psp_xgmi_topology_info *topology)
1686 {
1687 struct ta_xgmi_shared_memory *xgmi_cmd;
1688 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1689 int i;
1690
1691 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1692 return -EINVAL;
1693
1694 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1695 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1696
1697 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1698 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1699 topology_info_input->num_nodes = number_devices;
1700
1701 for (i = 0; i < topology_info_input->num_nodes; i++) {
1702 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1703 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1704 topology_info_input->nodes[i].is_sharing_enabled = 1;
1705 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1706 }
1707
1708 /* Invoke xgmi ta to set topology information */
1709 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1710 }
1711
1712 // ras begin
psp_ras_ta_check_status(struct psp_context * psp)1713 static void psp_ras_ta_check_status(struct psp_context *psp)
1714 {
1715 struct ta_ras_shared_memory *ras_cmd =
1716 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1717
1718 switch (ras_cmd->ras_status) {
1719 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1720 dev_warn(psp->adev->dev,
1721 "RAS WARNING: cmd failed due to unsupported ip\n");
1722 break;
1723 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1724 dev_warn(psp->adev->dev,
1725 "RAS WARNING: cmd failed due to unsupported error injection\n");
1726 break;
1727 case TA_RAS_STATUS__SUCCESS:
1728 break;
1729 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1730 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1731 dev_warn(psp->adev->dev,
1732 "RAS WARNING: Inject error to critical region is not allowed\n");
1733 break;
1734 default:
1735 dev_warn(psp->adev->dev,
1736 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1737 break;
1738 }
1739 }
1740
psp_ras_send_cmd(struct psp_context * psp,enum ras_command cmd_id,void * in,void * out)1741 static int psp_ras_send_cmd(struct psp_context *psp,
1742 enum ras_command cmd_id, void *in, void *out)
1743 {
1744 struct ta_ras_shared_memory *ras_cmd;
1745 uint32_t cmd = cmd_id;
1746 int ret = 0;
1747
1748 if (!in)
1749 return -EINVAL;
1750
1751 mutex_lock(&psp->ras_context.mutex);
1752 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1753 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1754
1755 switch (cmd) {
1756 case TA_RAS_COMMAND__ENABLE_FEATURES:
1757 case TA_RAS_COMMAND__DISABLE_FEATURES:
1758 memcpy(&ras_cmd->ras_in_message,
1759 in, sizeof(ras_cmd->ras_in_message));
1760 break;
1761 case TA_RAS_COMMAND__TRIGGER_ERROR:
1762 memcpy(&ras_cmd->ras_in_message.trigger_error,
1763 in, sizeof(ras_cmd->ras_in_message.trigger_error));
1764 break;
1765 case TA_RAS_COMMAND__QUERY_ADDRESS:
1766 memcpy(&ras_cmd->ras_in_message.address,
1767 in, sizeof(ras_cmd->ras_in_message.address));
1768 break;
1769 default:
1770 dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1771 ret = -EINVAL;
1772 goto err_out;
1773 }
1774
1775 ras_cmd->cmd_id = cmd;
1776 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1777
1778 switch (cmd) {
1779 case TA_RAS_COMMAND__TRIGGER_ERROR:
1780 if (!ret && out)
1781 memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1782 break;
1783 case TA_RAS_COMMAND__QUERY_ADDRESS:
1784 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1785 ret = -EINVAL;
1786 else if (out)
1787 memcpy(out,
1788 &ras_cmd->ras_out_message.address,
1789 sizeof(ras_cmd->ras_out_message.address));
1790 break;
1791 default:
1792 break;
1793 }
1794
1795 err_out:
1796 mutex_unlock(&psp->ras_context.mutex);
1797
1798 return ret;
1799 }
1800
psp_ras_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1801 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1802 {
1803 struct ta_ras_shared_memory *ras_cmd;
1804 int ret;
1805
1806 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1807
1808 /*
1809 * TODO: bypass the loading in sriov for now
1810 */
1811 if (amdgpu_sriov_vf(psp->adev))
1812 return 0;
1813
1814 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1815
1816 if (amdgpu_ras_intr_triggered())
1817 return ret;
1818
1819 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1820 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1821 return -EINVAL;
1822 }
1823
1824 if (!ret) {
1825 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1826 dev_warn(psp->adev->dev, "ECC switch disabled\n");
1827
1828 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1829 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1830 dev_warn(psp->adev->dev,
1831 "RAS internal register access blocked\n");
1832
1833 psp_ras_ta_check_status(psp);
1834 }
1835
1836 return ret;
1837 }
1838
psp_ras_enable_features(struct psp_context * psp,union ta_ras_cmd_input * info,bool enable)1839 int psp_ras_enable_features(struct psp_context *psp,
1840 union ta_ras_cmd_input *info, bool enable)
1841 {
1842 enum ras_command cmd_id;
1843 int ret;
1844
1845 if (!psp->ras_context.context.initialized || !info)
1846 return -EINVAL;
1847
1848 cmd_id = enable ?
1849 TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1850 ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
1851 if (ret)
1852 return -EINVAL;
1853
1854 return 0;
1855 }
1856
psp_ras_terminate(struct psp_context * psp)1857 int psp_ras_terminate(struct psp_context *psp)
1858 {
1859 int ret;
1860
1861 /*
1862 * TODO: bypass the terminate in sriov for now
1863 */
1864 if (amdgpu_sriov_vf(psp->adev))
1865 return 0;
1866
1867 if (!psp->ras_context.context.initialized)
1868 return 0;
1869
1870 ret = psp_ta_unload(psp, &psp->ras_context.context);
1871
1872 psp->ras_context.context.initialized = false;
1873
1874 mutex_destroy(&psp->ras_context.mutex);
1875
1876 return ret;
1877 }
1878
psp_ras_initialize(struct psp_context * psp)1879 int psp_ras_initialize(struct psp_context *psp)
1880 {
1881 int ret;
1882 uint32_t boot_cfg = 0xFF;
1883 struct amdgpu_device *adev = psp->adev;
1884 struct ta_ras_shared_memory *ras_cmd;
1885
1886 /*
1887 * TODO: bypass the initialize in sriov for now
1888 */
1889 if (amdgpu_sriov_vf(adev))
1890 return 0;
1891
1892 if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1893 !adev->psp.ras_context.context.bin_desc.start_addr) {
1894 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1895 return 0;
1896 }
1897
1898 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1899 /* query GECC enablement status from boot config
1900 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1901 */
1902 ret = psp_boot_config_get(adev, &boot_cfg);
1903 if (ret)
1904 dev_warn(adev->dev, "PSP get boot config failed\n");
1905
1906 if (boot_cfg == 1 && !adev->ras_default_ecc_enabled &&
1907 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1908 dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n");
1909 dev_warn(adev->dev,
1910 "To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n");
1911 } else {
1912 if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) &&
1913 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1914 if (boot_cfg == 1) {
1915 dev_info(adev->dev, "GECC is enabled\n");
1916 } else {
1917 /* enable GECC in next boot cycle if it is disabled
1918 * in boot config, or force enable GECC if failed to
1919 * get boot configuration
1920 */
1921 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1922 if (ret)
1923 dev_warn(adev->dev, "PSP set boot config failed\n");
1924 else
1925 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1926 }
1927 } else {
1928 if (!boot_cfg) {
1929 if (!adev->ras_default_ecc_enabled &&
1930 amdgpu_ras_enable != 1 &&
1931 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
1932 dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n");
1933 else
1934 dev_info(adev->dev, "GECC is disabled\n");
1935 } else {
1936 /* disable GECC in next boot cycle if ras is
1937 * disabled by module parameter amdgpu_ras_enable
1938 * and/or amdgpu_ras_mask, or boot_config_get call
1939 * is failed
1940 */
1941 ret = psp_boot_config_set(adev, 0);
1942 if (ret)
1943 dev_warn(adev->dev, "PSP set boot config failed\n");
1944 else
1945 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1946 }
1947 }
1948 }
1949 }
1950
1951 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1952 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1953
1954 if (!psp->ras_context.context.mem_context.shared_buf) {
1955 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1956 if (ret)
1957 return ret;
1958 }
1959
1960 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1961 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1962
1963 if (amdgpu_ras_is_poison_mode_supported(adev))
1964 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1965 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1966 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1967 ras_cmd->ras_in_message.init_flags.xcc_mask =
1968 adev->gfx.xcc_mask;
1969 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1970 if (adev->gmc.gmc_funcs->query_mem_partition_mode)
1971 ras_cmd->ras_in_message.init_flags.nps_mode =
1972 adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1973 ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask;
1974
1975 ret = psp_ta_load(psp, &psp->ras_context.context);
1976
1977 if (!ret && !ras_cmd->ras_status) {
1978 psp->ras_context.context.initialized = true;
1979 mutex_init(&psp->ras_context.mutex);
1980 } else {
1981 if (ras_cmd->ras_status)
1982 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1983
1984 /* fail to load RAS TA */
1985 psp->ras_context.context.initialized = false;
1986 }
1987
1988 return ret;
1989 }
1990
psp_ras_trigger_error(struct psp_context * psp,struct ta_ras_trigger_error_input * info,uint32_t instance_mask)1991 int psp_ras_trigger_error(struct psp_context *psp,
1992 struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1993 {
1994 struct amdgpu_device *adev = psp->adev;
1995 int ret;
1996 uint32_t dev_mask;
1997 uint32_t ras_status = 0;
1998
1999 if (!psp->ras_context.context.initialized || !info)
2000 return -EINVAL;
2001
2002 switch (info->block_id) {
2003 case TA_RAS_BLOCK__GFX:
2004 dev_mask = GET_MASK(GC, instance_mask);
2005 break;
2006 case TA_RAS_BLOCK__SDMA:
2007 dev_mask = GET_MASK(SDMA0, instance_mask);
2008 break;
2009 case TA_RAS_BLOCK__VCN:
2010 case TA_RAS_BLOCK__JPEG:
2011 dev_mask = GET_MASK(VCN, instance_mask);
2012 break;
2013 default:
2014 dev_mask = instance_mask;
2015 break;
2016 }
2017
2018 /* reuse sub_block_index for backward compatibility */
2019 dev_mask <<= AMDGPU_RAS_INST_SHIFT;
2020 dev_mask &= AMDGPU_RAS_INST_MASK;
2021 info->sub_block_index |= dev_mask;
2022
2023 ret = psp_ras_send_cmd(psp,
2024 TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
2025 if (ret)
2026 return -EINVAL;
2027
2028 /* If err_event_athub occurs error inject was successful, however
2029 * return status from TA is no long reliable
2030 */
2031 if (amdgpu_ras_intr_triggered())
2032 return 0;
2033
2034 if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
2035 return -EACCES;
2036 else if (ras_status)
2037 return -EINVAL;
2038
2039 return 0;
2040 }
2041
psp_ras_query_address(struct psp_context * psp,struct ta_ras_query_address_input * addr_in,struct ta_ras_query_address_output * addr_out)2042 int psp_ras_query_address(struct psp_context *psp,
2043 struct ta_ras_query_address_input *addr_in,
2044 struct ta_ras_query_address_output *addr_out)
2045 {
2046 int ret;
2047
2048 if (!psp->ras_context.context.initialized ||
2049 !addr_in || !addr_out)
2050 return -EINVAL;
2051
2052 ret = psp_ras_send_cmd(psp,
2053 TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
2054
2055 return ret;
2056 }
2057 // ras end
2058
2059 // HDCP start
psp_hdcp_initialize(struct psp_context * psp)2060 static int psp_hdcp_initialize(struct psp_context *psp)
2061 {
2062 int ret;
2063
2064 /*
2065 * TODO: bypass the initialize in sriov for now
2066 */
2067 if (amdgpu_sriov_vf(psp->adev))
2068 return 0;
2069
2070 /* bypass hdcp initialization if dmu is harvested */
2071 if (!amdgpu_device_has_display_hardware(psp->adev))
2072 return 0;
2073
2074 if (!psp->hdcp_context.context.bin_desc.size_bytes ||
2075 !psp->hdcp_context.context.bin_desc.start_addr) {
2076 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
2077 return 0;
2078 }
2079
2080 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
2081 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2082
2083 if (!psp->hdcp_context.context.mem_context.shared_buf) {
2084 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
2085 if (ret)
2086 return ret;
2087 }
2088
2089 ret = psp_ta_load(psp, &psp->hdcp_context.context);
2090 if (!ret) {
2091 psp->hdcp_context.context.initialized = true;
2092 mutex_init(&psp->hdcp_context.mutex);
2093 }
2094
2095 return ret;
2096 }
2097
psp_hdcp_invoke(struct psp_context * psp,uint32_t ta_cmd_id)2098 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2099 {
2100 /*
2101 * TODO: bypass the loading in sriov for now
2102 */
2103 if (amdgpu_sriov_vf(psp->adev))
2104 return 0;
2105
2106 if (!psp->hdcp_context.context.initialized)
2107 return 0;
2108
2109 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
2110 }
2111
psp_hdcp_terminate(struct psp_context * psp)2112 static int psp_hdcp_terminate(struct psp_context *psp)
2113 {
2114 int ret;
2115
2116 /*
2117 * TODO: bypass the terminate in sriov for now
2118 */
2119 if (amdgpu_sriov_vf(psp->adev))
2120 return 0;
2121
2122 if (!psp->hdcp_context.context.initialized)
2123 return 0;
2124
2125 ret = psp_ta_unload(psp, &psp->hdcp_context.context);
2126
2127 psp->hdcp_context.context.initialized = false;
2128
2129 return ret;
2130 }
2131 // HDCP end
2132
2133 // DTM start
psp_dtm_initialize(struct psp_context * psp)2134 static int psp_dtm_initialize(struct psp_context *psp)
2135 {
2136 int ret;
2137
2138 /*
2139 * TODO: bypass the initialize in sriov for now
2140 */
2141 if (amdgpu_sriov_vf(psp->adev))
2142 return 0;
2143
2144 /* bypass dtm initialization if dmu is harvested */
2145 if (!amdgpu_device_has_display_hardware(psp->adev))
2146 return 0;
2147
2148 if (!psp->dtm_context.context.bin_desc.size_bytes ||
2149 !psp->dtm_context.context.bin_desc.start_addr) {
2150 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
2151 return 0;
2152 }
2153
2154 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
2155 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2156
2157 if (!psp->dtm_context.context.mem_context.shared_buf) {
2158 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
2159 if (ret)
2160 return ret;
2161 }
2162
2163 ret = psp_ta_load(psp, &psp->dtm_context.context);
2164 if (!ret) {
2165 psp->dtm_context.context.initialized = true;
2166 mutex_init(&psp->dtm_context.mutex);
2167 }
2168
2169 return ret;
2170 }
2171
psp_dtm_invoke(struct psp_context * psp,uint32_t ta_cmd_id)2172 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2173 {
2174 /*
2175 * TODO: bypass the loading in sriov for now
2176 */
2177 if (amdgpu_sriov_vf(psp->adev))
2178 return 0;
2179
2180 if (!psp->dtm_context.context.initialized)
2181 return 0;
2182
2183 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
2184 }
2185
psp_dtm_terminate(struct psp_context * psp)2186 static int psp_dtm_terminate(struct psp_context *psp)
2187 {
2188 int ret;
2189
2190 /*
2191 * TODO: bypass the terminate in sriov for now
2192 */
2193 if (amdgpu_sriov_vf(psp->adev))
2194 return 0;
2195
2196 if (!psp->dtm_context.context.initialized)
2197 return 0;
2198
2199 ret = psp_ta_unload(psp, &psp->dtm_context.context);
2200
2201 psp->dtm_context.context.initialized = false;
2202
2203 return ret;
2204 }
2205 // DTM end
2206
2207 // RAP start
psp_rap_initialize(struct psp_context * psp)2208 static int psp_rap_initialize(struct psp_context *psp)
2209 {
2210 int ret;
2211 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2212
2213 /*
2214 * TODO: bypass the initialize in sriov for now
2215 */
2216 if (amdgpu_sriov_vf(psp->adev))
2217 return 0;
2218
2219 if (!psp->rap_context.context.bin_desc.size_bytes ||
2220 !psp->rap_context.context.bin_desc.start_addr) {
2221 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2222 return 0;
2223 }
2224
2225 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2226 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2227
2228 if (!psp->rap_context.context.mem_context.shared_buf) {
2229 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2230 if (ret)
2231 return ret;
2232 }
2233
2234 ret = psp_ta_load(psp, &psp->rap_context.context);
2235 if (!ret) {
2236 psp->rap_context.context.initialized = true;
2237 mutex_init(&psp->rap_context.mutex);
2238 } else
2239 return ret;
2240
2241 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2242 if (ret || status != TA_RAP_STATUS__SUCCESS) {
2243 psp_rap_terminate(psp);
2244 /* free rap shared memory */
2245 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2246
2247 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2248 ret, status);
2249
2250 return ret;
2251 }
2252
2253 return 0;
2254 }
2255
psp_rap_terminate(struct psp_context * psp)2256 static int psp_rap_terminate(struct psp_context *psp)
2257 {
2258 int ret;
2259
2260 if (!psp->rap_context.context.initialized)
2261 return 0;
2262
2263 ret = psp_ta_unload(psp, &psp->rap_context.context);
2264
2265 psp->rap_context.context.initialized = false;
2266
2267 return ret;
2268 }
2269
psp_rap_invoke(struct psp_context * psp,uint32_t ta_cmd_id,enum ta_rap_status * status)2270 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2271 {
2272 struct ta_rap_shared_memory *rap_cmd;
2273 int ret = 0;
2274
2275 if (!psp->rap_context.context.initialized)
2276 return 0;
2277
2278 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2279 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2280 return -EINVAL;
2281
2282 mutex_lock(&psp->rap_context.mutex);
2283
2284 rap_cmd = (struct ta_rap_shared_memory *)
2285 psp->rap_context.context.mem_context.shared_buf;
2286 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2287
2288 rap_cmd->cmd_id = ta_cmd_id;
2289 rap_cmd->validation_method_id = METHOD_A;
2290
2291 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2292 if (ret)
2293 goto out_unlock;
2294
2295 if (status)
2296 *status = rap_cmd->rap_status;
2297
2298 out_unlock:
2299 mutex_unlock(&psp->rap_context.mutex);
2300
2301 return ret;
2302 }
2303 // RAP end
2304
2305 /* securedisplay start */
psp_securedisplay_initialize(struct psp_context * psp)2306 static int psp_securedisplay_initialize(struct psp_context *psp)
2307 {
2308 int ret;
2309 struct ta_securedisplay_cmd *securedisplay_cmd;
2310
2311 /*
2312 * TODO: bypass the initialize in sriov for now
2313 */
2314 if (amdgpu_sriov_vf(psp->adev))
2315 return 0;
2316
2317 /* bypass securedisplay initialization if dmu is harvested */
2318 if (!amdgpu_device_has_display_hardware(psp->adev))
2319 return 0;
2320
2321 if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2322 !psp->securedisplay_context.context.bin_desc.start_addr) {
2323 dev_info(psp->adev->dev,
2324 "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
2325 return 0;
2326 }
2327
2328 psp->securedisplay_context.context.mem_context.shared_mem_size =
2329 PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2330 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2331
2332 if (!psp->securedisplay_context.context.initialized) {
2333 ret = psp_ta_init_shared_buf(psp,
2334 &psp->securedisplay_context.context.mem_context);
2335 if (ret)
2336 return ret;
2337 }
2338
2339 ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2340 if (!ret) {
2341 psp->securedisplay_context.context.initialized = true;
2342 mutex_init(&psp->securedisplay_context.mutex);
2343 } else
2344 return ret;
2345
2346 mutex_lock(&psp->securedisplay_context.mutex);
2347
2348 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2349 TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2350
2351 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2352
2353 mutex_unlock(&psp->securedisplay_context.mutex);
2354
2355 if (ret) {
2356 psp_securedisplay_terminate(psp);
2357 /* free securedisplay shared memory */
2358 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2359 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2360 return -EINVAL;
2361 }
2362
2363 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2364 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2365 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2366 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2367 /* don't try again */
2368 psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2369 }
2370
2371 return 0;
2372 }
2373
psp_securedisplay_terminate(struct psp_context * psp)2374 static int psp_securedisplay_terminate(struct psp_context *psp)
2375 {
2376 int ret;
2377
2378 /*
2379 * TODO:bypass the terminate in sriov for now
2380 */
2381 if (amdgpu_sriov_vf(psp->adev))
2382 return 0;
2383
2384 if (!psp->securedisplay_context.context.initialized)
2385 return 0;
2386
2387 ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2388
2389 psp->securedisplay_context.context.initialized = false;
2390
2391 return ret;
2392 }
2393
psp_securedisplay_invoke(struct psp_context * psp,uint32_t ta_cmd_id)2394 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2395 {
2396 int ret;
2397
2398 if (!psp->securedisplay_context.context.initialized)
2399 return -EINVAL;
2400
2401 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2402 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
2403 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
2404 return -EINVAL;
2405
2406 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2407
2408 return ret;
2409 }
2410 /* SECUREDISPLAY end */
2411
amdgpu_psp_wait_for_bootloader(struct amdgpu_device * adev)2412 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2413 {
2414 struct psp_context *psp = &adev->psp;
2415 int ret = 0;
2416
2417 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2418 ret = psp->funcs->wait_for_bootloader(psp);
2419
2420 return ret;
2421 }
2422
amdgpu_psp_get_ras_capability(struct psp_context * psp)2423 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2424 {
2425 if (psp->funcs &&
2426 psp->funcs->get_ras_capability) {
2427 return psp->funcs->get_ras_capability(psp);
2428 } else {
2429 return false;
2430 }
2431 }
2432
amdgpu_psp_tos_reload_needed(struct amdgpu_device * adev)2433 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
2434 {
2435 struct psp_context *psp = &adev->psp;
2436
2437 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
2438 return false;
2439
2440 if (psp->funcs && psp->funcs->is_reload_needed)
2441 return psp->funcs->is_reload_needed(psp);
2442
2443 return false;
2444 }
2445
psp_update_gpu_addresses(struct amdgpu_device * adev)2446 static void psp_update_gpu_addresses(struct amdgpu_device *adev)
2447 {
2448 struct psp_context *psp = &adev->psp;
2449
2450 if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
2451 psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
2452 psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
2453 psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
2454 }
2455 if (adev->firmware.rbuf && psp->km_ring.ring_mem)
2456 psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
2457 }
2458
psp_hw_start(struct psp_context * psp)2459 static int psp_hw_start(struct psp_context *psp)
2460 {
2461 struct amdgpu_device *adev = psp->adev;
2462 int ret;
2463
2464 if (amdgpu_virt_xgmi_migrate_enabled(adev))
2465 psp_update_gpu_addresses(adev);
2466
2467 if (!amdgpu_sriov_vf(adev)) {
2468 if ((is_psp_fw_valid(psp->kdb)) &&
2469 (psp->funcs->bootloader_load_kdb != NULL)) {
2470 ret = psp_bootloader_load_kdb(psp);
2471 if (ret) {
2472 dev_err(adev->dev, "PSP load kdb failed!\n");
2473 return ret;
2474 }
2475 }
2476
2477 if ((is_psp_fw_valid(psp->spl)) &&
2478 (psp->funcs->bootloader_load_spl != NULL)) {
2479 ret = psp_bootloader_load_spl(psp);
2480 if (ret) {
2481 dev_err(adev->dev, "PSP load spl failed!\n");
2482 return ret;
2483 }
2484 }
2485
2486 if ((is_psp_fw_valid(psp->sys)) &&
2487 (psp->funcs->bootloader_load_sysdrv != NULL)) {
2488 ret = psp_bootloader_load_sysdrv(psp);
2489 if (ret) {
2490 dev_err(adev->dev, "PSP load sys drv failed!\n");
2491 return ret;
2492 }
2493 }
2494
2495 if ((is_psp_fw_valid(psp->soc_drv)) &&
2496 (psp->funcs->bootloader_load_soc_drv != NULL)) {
2497 ret = psp_bootloader_load_soc_drv(psp);
2498 if (ret) {
2499 dev_err(adev->dev, "PSP load soc drv failed!\n");
2500 return ret;
2501 }
2502 }
2503
2504 if ((is_psp_fw_valid(psp->intf_drv)) &&
2505 (psp->funcs->bootloader_load_intf_drv != NULL)) {
2506 ret = psp_bootloader_load_intf_drv(psp);
2507 if (ret) {
2508 dev_err(adev->dev, "PSP load intf drv failed!\n");
2509 return ret;
2510 }
2511 }
2512
2513 if ((is_psp_fw_valid(psp->dbg_drv)) &&
2514 (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2515 ret = psp_bootloader_load_dbg_drv(psp);
2516 if (ret) {
2517 dev_err(adev->dev, "PSP load dbg drv failed!\n");
2518 return ret;
2519 }
2520 }
2521
2522 if ((is_psp_fw_valid(psp->ras_drv)) &&
2523 (psp->funcs->bootloader_load_ras_drv != NULL)) {
2524 ret = psp_bootloader_load_ras_drv(psp);
2525 if (ret) {
2526 dev_err(adev->dev, "PSP load ras_drv failed!\n");
2527 return ret;
2528 }
2529 }
2530
2531 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2532 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2533 ret = psp_bootloader_load_ipkeymgr_drv(psp);
2534 if (ret) {
2535 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2536 return ret;
2537 }
2538 }
2539
2540 if ((is_psp_fw_valid(psp->spdm_drv)) &&
2541 (psp->funcs->bootloader_load_spdm_drv != NULL)) {
2542 ret = psp_bootloader_load_spdm_drv(psp);
2543 if (ret) {
2544 dev_err(adev->dev, "PSP load spdm_drv failed!\n");
2545 return ret;
2546 }
2547 }
2548
2549 if ((is_psp_fw_valid(psp->sos)) &&
2550 (psp->funcs->bootloader_load_sos != NULL)) {
2551 ret = psp_bootloader_load_sos(psp);
2552 if (ret) {
2553 dev_err(adev->dev, "PSP load sos failed!\n");
2554 return ret;
2555 }
2556 }
2557 }
2558
2559 ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2560 if (ret) {
2561 dev_err(adev->dev, "PSP create ring failed!\n");
2562 return ret;
2563 }
2564
2565 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2566 ret = psp_update_fw_reservation(psp);
2567 if (ret) {
2568 dev_err(adev->dev, "update fw reservation failed!\n");
2569 return ret;
2570 }
2571 }
2572
2573 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2574 goto skip_pin_bo;
2575
2576 if (!psp->boot_time_tmr || psp->autoload_supported) {
2577 ret = psp_tmr_init(psp);
2578 if (ret) {
2579 dev_err(adev->dev, "PSP tmr init failed!\n");
2580 return ret;
2581 }
2582 }
2583
2584 skip_pin_bo:
2585 /*
2586 * For ASICs with DF Cstate management centralized
2587 * to PMFW, TMR setup should be performed after PMFW
2588 * loaded and before other non-psp firmware loaded.
2589 */
2590 if (psp->pmfw_centralized_cstate_management) {
2591 ret = psp_load_smu_fw(psp);
2592 if (ret)
2593 return ret;
2594 }
2595
2596 if (!psp->boot_time_tmr || !psp->autoload_supported) {
2597 ret = psp_tmr_load(psp);
2598 if (ret) {
2599 dev_err(adev->dev, "PSP load tmr failed!\n");
2600 return ret;
2601 }
2602 }
2603
2604 return 0;
2605 }
2606
psp_get_fw_type(struct amdgpu_firmware_info * ucode,enum psp_gfx_fw_type * type)2607 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2608 enum psp_gfx_fw_type *type)
2609 {
2610 switch (ucode->ucode_id) {
2611 case AMDGPU_UCODE_ID_CAP:
2612 *type = GFX_FW_TYPE_CAP;
2613 break;
2614 case AMDGPU_UCODE_ID_SDMA0:
2615 *type = GFX_FW_TYPE_SDMA0;
2616 break;
2617 case AMDGPU_UCODE_ID_SDMA1:
2618 *type = GFX_FW_TYPE_SDMA1;
2619 break;
2620 case AMDGPU_UCODE_ID_SDMA2:
2621 *type = GFX_FW_TYPE_SDMA2;
2622 break;
2623 case AMDGPU_UCODE_ID_SDMA3:
2624 *type = GFX_FW_TYPE_SDMA3;
2625 break;
2626 case AMDGPU_UCODE_ID_SDMA4:
2627 *type = GFX_FW_TYPE_SDMA4;
2628 break;
2629 case AMDGPU_UCODE_ID_SDMA5:
2630 *type = GFX_FW_TYPE_SDMA5;
2631 break;
2632 case AMDGPU_UCODE_ID_SDMA6:
2633 *type = GFX_FW_TYPE_SDMA6;
2634 break;
2635 case AMDGPU_UCODE_ID_SDMA7:
2636 *type = GFX_FW_TYPE_SDMA7;
2637 break;
2638 case AMDGPU_UCODE_ID_CP_MES:
2639 *type = GFX_FW_TYPE_CP_MES;
2640 break;
2641 case AMDGPU_UCODE_ID_CP_MES_DATA:
2642 *type = GFX_FW_TYPE_MES_STACK;
2643 break;
2644 case AMDGPU_UCODE_ID_CP_MES1:
2645 *type = GFX_FW_TYPE_CP_MES_KIQ;
2646 break;
2647 case AMDGPU_UCODE_ID_CP_MES1_DATA:
2648 *type = GFX_FW_TYPE_MES_KIQ_STACK;
2649 break;
2650 case AMDGPU_UCODE_ID_CP_CE:
2651 *type = GFX_FW_TYPE_CP_CE;
2652 break;
2653 case AMDGPU_UCODE_ID_CP_PFP:
2654 *type = GFX_FW_TYPE_CP_PFP;
2655 break;
2656 case AMDGPU_UCODE_ID_CP_ME:
2657 *type = GFX_FW_TYPE_CP_ME;
2658 break;
2659 case AMDGPU_UCODE_ID_CP_MEC1:
2660 *type = GFX_FW_TYPE_CP_MEC;
2661 break;
2662 case AMDGPU_UCODE_ID_CP_MEC1_JT:
2663 *type = GFX_FW_TYPE_CP_MEC_ME1;
2664 break;
2665 case AMDGPU_UCODE_ID_CP_MEC2:
2666 *type = GFX_FW_TYPE_CP_MEC;
2667 break;
2668 case AMDGPU_UCODE_ID_CP_MEC2_JT:
2669 *type = GFX_FW_TYPE_CP_MEC_ME2;
2670 break;
2671 case AMDGPU_UCODE_ID_RLC_P:
2672 *type = GFX_FW_TYPE_RLC_P;
2673 break;
2674 case AMDGPU_UCODE_ID_RLC_V:
2675 *type = GFX_FW_TYPE_RLC_V;
2676 break;
2677 case AMDGPU_UCODE_ID_RLC_G:
2678 *type = GFX_FW_TYPE_RLC_G;
2679 break;
2680 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2681 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2682 break;
2683 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2684 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2685 break;
2686 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2687 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2688 break;
2689 case AMDGPU_UCODE_ID_RLC_IRAM:
2690 *type = GFX_FW_TYPE_RLC_IRAM;
2691 break;
2692 case AMDGPU_UCODE_ID_RLC_DRAM:
2693 *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2694 break;
2695 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2696 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2697 break;
2698 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2699 *type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2700 break;
2701 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2702 *type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2703 break;
2704 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2705 *type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2706 break;
2707 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2708 *type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2709 break;
2710 case AMDGPU_UCODE_ID_SMC:
2711 *type = GFX_FW_TYPE_SMU;
2712 break;
2713 case AMDGPU_UCODE_ID_PPTABLE:
2714 *type = GFX_FW_TYPE_PPTABLE;
2715 break;
2716 case AMDGPU_UCODE_ID_UVD:
2717 *type = GFX_FW_TYPE_UVD;
2718 break;
2719 case AMDGPU_UCODE_ID_UVD1:
2720 *type = GFX_FW_TYPE_UVD1;
2721 break;
2722 case AMDGPU_UCODE_ID_VCE:
2723 *type = GFX_FW_TYPE_VCE;
2724 break;
2725 case AMDGPU_UCODE_ID_VCN:
2726 *type = GFX_FW_TYPE_VCN;
2727 break;
2728 case AMDGPU_UCODE_ID_VCN1:
2729 *type = GFX_FW_TYPE_VCN1;
2730 break;
2731 case AMDGPU_UCODE_ID_DMCU_ERAM:
2732 *type = GFX_FW_TYPE_DMCU_ERAM;
2733 break;
2734 case AMDGPU_UCODE_ID_DMCU_INTV:
2735 *type = GFX_FW_TYPE_DMCU_ISR;
2736 break;
2737 case AMDGPU_UCODE_ID_VCN0_RAM:
2738 *type = GFX_FW_TYPE_VCN0_RAM;
2739 break;
2740 case AMDGPU_UCODE_ID_VCN1_RAM:
2741 *type = GFX_FW_TYPE_VCN1_RAM;
2742 break;
2743 case AMDGPU_UCODE_ID_DMCUB:
2744 *type = GFX_FW_TYPE_DMUB;
2745 break;
2746 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2747 case AMDGPU_UCODE_ID_SDMA_RS64:
2748 *type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2749 break;
2750 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2751 *type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2752 break;
2753 case AMDGPU_UCODE_ID_IMU_I:
2754 *type = GFX_FW_TYPE_IMU_I;
2755 break;
2756 case AMDGPU_UCODE_ID_IMU_D:
2757 *type = GFX_FW_TYPE_IMU_D;
2758 break;
2759 case AMDGPU_UCODE_ID_CP_RS64_PFP:
2760 *type = GFX_FW_TYPE_RS64_PFP;
2761 break;
2762 case AMDGPU_UCODE_ID_CP_RS64_ME:
2763 *type = GFX_FW_TYPE_RS64_ME;
2764 break;
2765 case AMDGPU_UCODE_ID_CP_RS64_MEC:
2766 *type = GFX_FW_TYPE_RS64_MEC;
2767 break;
2768 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2769 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2770 break;
2771 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2772 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2773 break;
2774 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2775 *type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2776 break;
2777 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2778 *type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2779 break;
2780 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2781 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2782 break;
2783 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2784 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2785 break;
2786 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2787 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2788 break;
2789 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2790 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2791 break;
2792 case AMDGPU_UCODE_ID_VPE_CTX:
2793 *type = GFX_FW_TYPE_VPEC_FW1;
2794 break;
2795 case AMDGPU_UCODE_ID_VPE_CTL:
2796 *type = GFX_FW_TYPE_VPEC_FW2;
2797 break;
2798 case AMDGPU_UCODE_ID_VPE:
2799 *type = GFX_FW_TYPE_VPE;
2800 break;
2801 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2802 *type = GFX_FW_TYPE_UMSCH_UCODE;
2803 break;
2804 case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2805 *type = GFX_FW_TYPE_UMSCH_DATA;
2806 break;
2807 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2808 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2809 break;
2810 case AMDGPU_UCODE_ID_P2S_TABLE:
2811 *type = GFX_FW_TYPE_P2S_TABLE;
2812 break;
2813 case AMDGPU_UCODE_ID_JPEG_RAM:
2814 *type = GFX_FW_TYPE_JPEG_RAM;
2815 break;
2816 case AMDGPU_UCODE_ID_ISP:
2817 *type = GFX_FW_TYPE_ISP;
2818 break;
2819 case AMDGPU_UCODE_ID_MAXIMUM:
2820 default:
2821 return -EINVAL;
2822 }
2823
2824 return 0;
2825 }
2826
psp_print_fw_hdr(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2827 static void psp_print_fw_hdr(struct psp_context *psp,
2828 struct amdgpu_firmware_info *ucode)
2829 {
2830 struct amdgpu_device *adev = psp->adev;
2831 struct common_firmware_header *hdr;
2832
2833 switch (ucode->ucode_id) {
2834 case AMDGPU_UCODE_ID_SDMA0:
2835 case AMDGPU_UCODE_ID_SDMA1:
2836 case AMDGPU_UCODE_ID_SDMA2:
2837 case AMDGPU_UCODE_ID_SDMA3:
2838 case AMDGPU_UCODE_ID_SDMA4:
2839 case AMDGPU_UCODE_ID_SDMA5:
2840 case AMDGPU_UCODE_ID_SDMA6:
2841 case AMDGPU_UCODE_ID_SDMA7:
2842 hdr = (struct common_firmware_header *)
2843 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2844 amdgpu_ucode_print_sdma_hdr(hdr);
2845 break;
2846 case AMDGPU_UCODE_ID_CP_CE:
2847 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2848 amdgpu_ucode_print_gfx_hdr(hdr);
2849 break;
2850 case AMDGPU_UCODE_ID_CP_PFP:
2851 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2852 amdgpu_ucode_print_gfx_hdr(hdr);
2853 break;
2854 case AMDGPU_UCODE_ID_CP_ME:
2855 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2856 amdgpu_ucode_print_gfx_hdr(hdr);
2857 break;
2858 case AMDGPU_UCODE_ID_CP_MEC1:
2859 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2860 amdgpu_ucode_print_gfx_hdr(hdr);
2861 break;
2862 case AMDGPU_UCODE_ID_RLC_G:
2863 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2864 amdgpu_ucode_print_rlc_hdr(hdr);
2865 break;
2866 case AMDGPU_UCODE_ID_SMC:
2867 hdr = (struct common_firmware_header *)adev->pm.fw->data;
2868 amdgpu_ucode_print_smc_hdr(hdr);
2869 break;
2870 default:
2871 break;
2872 }
2873 }
2874
psp_prep_load_ip_fw_cmd_buf(struct psp_context * psp,struct amdgpu_firmware_info * ucode,struct psp_gfx_cmd_resp * cmd)2875 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2876 struct amdgpu_firmware_info *ucode,
2877 struct psp_gfx_cmd_resp *cmd)
2878 {
2879 int ret;
2880 uint64_t fw_mem_mc_addr = ucode->mc_addr;
2881
2882 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2883 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2884 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2885 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2886
2887 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2888 if (ret)
2889 dev_err(psp->adev->dev, "Unknown firmware type\n");
2890
2891 return ret;
2892 }
2893
psp_execute_ip_fw_load(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2894 int psp_execute_ip_fw_load(struct psp_context *psp,
2895 struct amdgpu_firmware_info *ucode)
2896 {
2897 int ret = 0;
2898 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2899
2900 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2901 if (!ret) {
2902 ret = psp_cmd_submit_buf(psp, ucode, cmd,
2903 psp->fence_buf_mc_addr);
2904 }
2905
2906 release_psp_cmd_buf(psp);
2907
2908 return ret;
2909 }
2910
psp_load_p2s_table(struct psp_context * psp)2911 static int psp_load_p2s_table(struct psp_context *psp)
2912 {
2913 int ret;
2914 struct amdgpu_device *adev = psp->adev;
2915 struct amdgpu_firmware_info *ucode =
2916 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2917
2918 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2919 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2920 return 0;
2921
2922 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2923 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2924 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2925 0x0036003C;
2926 if (psp->sos.fw_version < supp_vers)
2927 return 0;
2928 }
2929
2930 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2931 return 0;
2932
2933 ret = psp_execute_ip_fw_load(psp, ucode);
2934
2935 return ret;
2936 }
2937
psp_load_smu_fw(struct psp_context * psp)2938 static int psp_load_smu_fw(struct psp_context *psp)
2939 {
2940 int ret;
2941 struct amdgpu_device *adev = psp->adev;
2942 struct amdgpu_firmware_info *ucode =
2943 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2944 struct amdgpu_ras *ras = psp->ras_context.ras;
2945
2946 /*
2947 * Skip SMU FW reloading in case of using BACO for runpm only,
2948 * as SMU is always alive.
2949 */
2950 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2951 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2952 return 0;
2953
2954 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2955 return 0;
2956
2957 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2958 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2959 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2960 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2961 if (ret)
2962 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2963 }
2964
2965 ret = psp_execute_ip_fw_load(psp, ucode);
2966
2967 if (ret)
2968 dev_err(adev->dev, "PSP load smu failed!\n");
2969
2970 return ret;
2971 }
2972
fw_load_skip_check(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2973 static bool fw_load_skip_check(struct psp_context *psp,
2974 struct amdgpu_firmware_info *ucode)
2975 {
2976 if (!ucode->fw || !ucode->ucode_size)
2977 return true;
2978
2979 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2980 return true;
2981
2982 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2983 (psp_smu_reload_quirk(psp) ||
2984 psp->autoload_supported ||
2985 psp->pmfw_centralized_cstate_management))
2986 return true;
2987
2988 if (amdgpu_sriov_vf(psp->adev) &&
2989 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2990 return true;
2991
2992 if (psp->autoload_supported &&
2993 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2994 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2995 /* skip mec JT when autoload is enabled */
2996 return true;
2997
2998 return false;
2999 }
3000
psp_load_fw_list(struct psp_context * psp,struct amdgpu_firmware_info ** ucode_list,int ucode_count)3001 int psp_load_fw_list(struct psp_context *psp,
3002 struct amdgpu_firmware_info **ucode_list, int ucode_count)
3003 {
3004 int ret = 0, i;
3005 struct amdgpu_firmware_info *ucode;
3006
3007 for (i = 0; i < ucode_count; ++i) {
3008 ucode = ucode_list[i];
3009 psp_print_fw_hdr(psp, ucode);
3010 ret = psp_execute_ip_fw_load(psp, ucode);
3011 if (ret)
3012 return ret;
3013 }
3014 return ret;
3015 }
3016
psp_load_non_psp_fw(struct psp_context * psp)3017 static int psp_load_non_psp_fw(struct psp_context *psp)
3018 {
3019 int i, ret;
3020 struct amdgpu_firmware_info *ucode;
3021 struct amdgpu_device *adev = psp->adev;
3022
3023 if (psp->autoload_supported &&
3024 !psp->pmfw_centralized_cstate_management) {
3025 ret = psp_load_smu_fw(psp);
3026 if (ret)
3027 return ret;
3028 }
3029
3030 /* Load P2S table first if it's available */
3031 psp_load_p2s_table(psp);
3032
3033 for (i = 0; i < adev->firmware.max_ucodes; i++) {
3034 ucode = &adev->firmware.ucode[i];
3035
3036 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
3037 !fw_load_skip_check(psp, ucode)) {
3038 ret = psp_load_smu_fw(psp);
3039 if (ret)
3040 return ret;
3041 continue;
3042 }
3043
3044 if (fw_load_skip_check(psp, ucode))
3045 continue;
3046
3047 if (psp->autoload_supported &&
3048 (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3049 IP_VERSION(11, 0, 7) ||
3050 amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3051 IP_VERSION(11, 0, 11) ||
3052 amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3053 IP_VERSION(11, 0, 12)) &&
3054 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
3055 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
3056 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
3057 /* PSP only receive one SDMA fw for sienna_cichlid,
3058 * as all four sdma fw are same
3059 */
3060 continue;
3061
3062 psp_print_fw_hdr(psp, ucode);
3063
3064 ret = psp_execute_ip_fw_load(psp, ucode);
3065 if (ret)
3066 return ret;
3067
3068 /* Start rlc autoload after psp received all the gfx firmware */
3069 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
3070 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
3071 ret = psp_rlc_autoload_start(psp);
3072 if (ret) {
3073 dev_err(adev->dev, "Failed to start rlc autoload\n");
3074 return ret;
3075 }
3076 }
3077 }
3078
3079 return 0;
3080 }
3081
psp_load_fw(struct amdgpu_device * adev)3082 static int psp_load_fw(struct amdgpu_device *adev)
3083 {
3084 int ret;
3085 struct psp_context *psp = &adev->psp;
3086
3087 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3088 /* should not destroy ring, only stop */
3089 psp_ring_stop(psp, PSP_RING_TYPE__KM);
3090 } else {
3091 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
3092
3093 ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
3094 if (ret) {
3095 dev_err(adev->dev, "PSP ring init failed!\n");
3096 goto failed;
3097 }
3098 }
3099
3100 ret = psp_hw_start(psp);
3101 if (ret)
3102 goto failed;
3103
3104 ret = psp_load_non_psp_fw(psp);
3105 if (ret)
3106 goto failed1;
3107
3108 ret = psp_asd_initialize(psp);
3109 if (ret) {
3110 dev_err(adev->dev, "PSP load asd failed!\n");
3111 goto failed1;
3112 }
3113
3114 ret = psp_rl_load(adev);
3115 if (ret) {
3116 dev_err(adev->dev, "PSP load RL failed!\n");
3117 goto failed1;
3118 }
3119
3120 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3121 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3122 ret = psp_xgmi_initialize(psp, false, true);
3123 /* Warning the XGMI seesion initialize failure
3124 * Instead of stop driver initialization
3125 */
3126 if (ret)
3127 dev_err(psp->adev->dev,
3128 "XGMI: Failed to initialize XGMI session\n");
3129 }
3130 }
3131
3132 if (psp->ta_fw) {
3133 ret = psp_ras_initialize(psp);
3134 if (ret)
3135 dev_err(psp->adev->dev,
3136 "RAS: Failed to initialize RAS\n");
3137
3138 ret = psp_hdcp_initialize(psp);
3139 if (ret)
3140 dev_err(psp->adev->dev,
3141 "HDCP: Failed to initialize HDCP\n");
3142
3143 ret = psp_dtm_initialize(psp);
3144 if (ret)
3145 dev_err(psp->adev->dev,
3146 "DTM: Failed to initialize DTM\n");
3147
3148 ret = psp_rap_initialize(psp);
3149 if (ret)
3150 dev_err(psp->adev->dev,
3151 "RAP: Failed to initialize RAP\n");
3152
3153 ret = psp_securedisplay_initialize(psp);
3154 if (ret)
3155 dev_err(psp->adev->dev,
3156 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3157 }
3158
3159 return 0;
3160
3161 failed1:
3162 psp_free_shared_bufs(psp);
3163 failed:
3164 /*
3165 * all cleanup jobs (xgmi terminate, ras terminate,
3166 * ring destroy, cmd/fence/fw buffers destory,
3167 * psp->cmd destory) are delayed to psp_hw_fini
3168 */
3169 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3170 return ret;
3171 }
3172
psp_hw_init(struct amdgpu_ip_block * ip_block)3173 static int psp_hw_init(struct amdgpu_ip_block *ip_block)
3174 {
3175 int ret;
3176 struct amdgpu_device *adev = ip_block->adev;
3177
3178 mutex_lock(&adev->firmware.mutex);
3179
3180 ret = amdgpu_ucode_init_bo(adev);
3181 if (ret)
3182 goto failed;
3183
3184 ret = psp_load_fw(adev);
3185 if (ret) {
3186 dev_err(adev->dev, "PSP firmware loading failed\n");
3187 goto failed;
3188 }
3189
3190 mutex_unlock(&adev->firmware.mutex);
3191 return 0;
3192
3193 failed:
3194 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
3195 mutex_unlock(&adev->firmware.mutex);
3196 return -EINVAL;
3197 }
3198
psp_hw_fini(struct amdgpu_ip_block * ip_block)3199 static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
3200 {
3201 struct amdgpu_device *adev = ip_block->adev;
3202 struct psp_context *psp = &adev->psp;
3203
3204 if (psp->ta_fw) {
3205 psp_ras_terminate(psp);
3206 psp_securedisplay_terminate(psp);
3207 psp_rap_terminate(psp);
3208 psp_dtm_terminate(psp);
3209 psp_hdcp_terminate(psp);
3210
3211 if (adev->gmc.xgmi.num_physical_nodes > 1)
3212 psp_xgmi_terminate(psp);
3213 }
3214
3215 psp_asd_terminate(psp);
3216 psp_tmr_terminate(psp);
3217
3218 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3219
3220 return 0;
3221 }
3222
psp_suspend(struct amdgpu_ip_block * ip_block)3223 static int psp_suspend(struct amdgpu_ip_block *ip_block)
3224 {
3225 int ret = 0;
3226 struct amdgpu_device *adev = ip_block->adev;
3227 struct psp_context *psp = &adev->psp;
3228
3229 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3230 psp->xgmi_context.context.initialized) {
3231 ret = psp_xgmi_terminate(psp);
3232 if (ret) {
3233 dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3234 goto out;
3235 }
3236 }
3237
3238 if (psp->ta_fw) {
3239 ret = psp_ras_terminate(psp);
3240 if (ret) {
3241 dev_err(adev->dev, "Failed to terminate ras ta\n");
3242 goto out;
3243 }
3244 ret = psp_hdcp_terminate(psp);
3245 if (ret) {
3246 dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3247 goto out;
3248 }
3249 ret = psp_dtm_terminate(psp);
3250 if (ret) {
3251 dev_err(adev->dev, "Failed to terminate dtm ta\n");
3252 goto out;
3253 }
3254 ret = psp_rap_terminate(psp);
3255 if (ret) {
3256 dev_err(adev->dev, "Failed to terminate rap ta\n");
3257 goto out;
3258 }
3259 ret = psp_securedisplay_terminate(psp);
3260 if (ret) {
3261 dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3262 goto out;
3263 }
3264 }
3265
3266 ret = psp_asd_terminate(psp);
3267 if (ret) {
3268 dev_err(adev->dev, "Failed to terminate asd\n");
3269 goto out;
3270 }
3271
3272 ret = psp_tmr_terminate(psp);
3273 if (ret) {
3274 dev_err(adev->dev, "Failed to terminate tmr\n");
3275 goto out;
3276 }
3277
3278 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3279 if (ret)
3280 dev_err(adev->dev, "PSP ring stop failed\n");
3281
3282 out:
3283 return ret;
3284 }
3285
psp_resume(struct amdgpu_ip_block * ip_block)3286 static int psp_resume(struct amdgpu_ip_block *ip_block)
3287 {
3288 int ret;
3289 struct amdgpu_device *adev = ip_block->adev;
3290 struct psp_context *psp = &adev->psp;
3291
3292 dev_info(adev->dev, "PSP is resuming...\n");
3293
3294 if (psp->mem_train_ctx.enable_mem_training) {
3295 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3296 if (ret) {
3297 dev_err(adev->dev, "Failed to process memory training!\n");
3298 return ret;
3299 }
3300 }
3301
3302 mutex_lock(&adev->firmware.mutex);
3303
3304 ret = amdgpu_ucode_init_bo(adev);
3305 if (ret)
3306 goto failed;
3307
3308 ret = psp_hw_start(psp);
3309 if (ret)
3310 goto failed;
3311
3312 ret = psp_load_non_psp_fw(psp);
3313 if (ret)
3314 goto failed;
3315
3316 ret = psp_asd_initialize(psp);
3317 if (ret) {
3318 dev_err(adev->dev, "PSP load asd failed!\n");
3319 goto failed;
3320 }
3321
3322 ret = psp_rl_load(adev);
3323 if (ret) {
3324 dev_err(adev->dev, "PSP load RL failed!\n");
3325 goto failed;
3326 }
3327
3328 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3329 ret = psp_xgmi_initialize(psp, false, true);
3330 /* Warning the XGMI seesion initialize failure
3331 * Instead of stop driver initialization
3332 */
3333 if (ret)
3334 dev_err(psp->adev->dev,
3335 "XGMI: Failed to initialize XGMI session\n");
3336 }
3337
3338 if (psp->ta_fw) {
3339 ret = psp_ras_initialize(psp);
3340 if (ret)
3341 dev_err(psp->adev->dev,
3342 "RAS: Failed to initialize RAS\n");
3343
3344 ret = psp_hdcp_initialize(psp);
3345 if (ret)
3346 dev_err(psp->adev->dev,
3347 "HDCP: Failed to initialize HDCP\n");
3348
3349 ret = psp_dtm_initialize(psp);
3350 if (ret)
3351 dev_err(psp->adev->dev,
3352 "DTM: Failed to initialize DTM\n");
3353
3354 ret = psp_rap_initialize(psp);
3355 if (ret)
3356 dev_err(psp->adev->dev,
3357 "RAP: Failed to initialize RAP\n");
3358
3359 ret = psp_securedisplay_initialize(psp);
3360 if (ret)
3361 dev_err(psp->adev->dev,
3362 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3363 }
3364
3365 mutex_unlock(&adev->firmware.mutex);
3366
3367 return 0;
3368
3369 failed:
3370 dev_err(adev->dev, "PSP resume failed\n");
3371 mutex_unlock(&adev->firmware.mutex);
3372 return ret;
3373 }
3374
psp_gpu_reset(struct amdgpu_device * adev)3375 int psp_gpu_reset(struct amdgpu_device *adev)
3376 {
3377 int ret;
3378
3379 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3380 return 0;
3381
3382 mutex_lock(&adev->psp.mutex);
3383 ret = psp_mode1_reset(&adev->psp);
3384 mutex_unlock(&adev->psp.mutex);
3385
3386 return ret;
3387 }
3388
psp_rlc_autoload_start(struct psp_context * psp)3389 int psp_rlc_autoload_start(struct psp_context *psp)
3390 {
3391 int ret;
3392 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3393
3394 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3395
3396 ret = psp_cmd_submit_buf(psp, NULL, cmd,
3397 psp->fence_buf_mc_addr);
3398
3399 release_psp_cmd_buf(psp);
3400
3401 return ret;
3402 }
3403
psp_ring_cmd_submit(struct psp_context * psp,uint64_t cmd_buf_mc_addr,uint64_t fence_mc_addr,int index)3404 int psp_ring_cmd_submit(struct psp_context *psp,
3405 uint64_t cmd_buf_mc_addr,
3406 uint64_t fence_mc_addr,
3407 int index)
3408 {
3409 unsigned int psp_write_ptr_reg = 0;
3410 struct psp_gfx_rb_frame *write_frame;
3411 struct psp_ring *ring = &psp->km_ring;
3412 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3413 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3414 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3415 struct amdgpu_device *adev = psp->adev;
3416 uint32_t ring_size_dw = ring->ring_size / 4;
3417 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3418
3419 /* KM (GPCOM) prepare write pointer */
3420 psp_write_ptr_reg = psp_ring_get_wptr(psp);
3421
3422 /* Update KM RB frame pointer to new frame */
3423 /* write_frame ptr increments by size of rb_frame in bytes */
3424 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3425 if ((psp_write_ptr_reg % ring_size_dw) == 0)
3426 write_frame = ring_buffer_start;
3427 else
3428 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3429 /* Check invalid write_frame ptr address */
3430 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3431 dev_err(adev->dev,
3432 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3433 ring_buffer_start, ring_buffer_end, write_frame);
3434 dev_err(adev->dev,
3435 "write_frame is pointing to address out of bounds\n");
3436 return -EINVAL;
3437 }
3438
3439 /* Initialize KM RB frame */
3440 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3441
3442 /* Update KM RB frame */
3443 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3444 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3445 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3446 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3447 write_frame->fence_value = index;
3448 amdgpu_device_flush_hdp(adev, NULL);
3449
3450 /* Update the write Pointer in DWORDs */
3451 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3452 psp_ring_set_wptr(psp, psp_write_ptr_reg);
3453 return 0;
3454 }
3455
psp_init_asd_microcode(struct psp_context * psp,const char * chip_name)3456 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3457 {
3458 struct amdgpu_device *adev = psp->adev;
3459 const struct psp_firmware_header_v1_0 *asd_hdr;
3460 int err = 0;
3461
3462 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
3463 "amdgpu/%s_asd.bin", chip_name);
3464 if (err)
3465 goto out;
3466
3467 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3468 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3469 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3470 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3471 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3472 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3473 return 0;
3474 out:
3475 amdgpu_ucode_release(&adev->psp.asd_fw);
3476 return err;
3477 }
3478
psp_init_toc_microcode(struct psp_context * psp,const char * chip_name)3479 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3480 {
3481 struct amdgpu_device *adev = psp->adev;
3482 const struct psp_firmware_header_v1_0 *toc_hdr;
3483 int err = 0;
3484
3485 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
3486 "amdgpu/%s_toc.bin", chip_name);
3487 if (err)
3488 goto out;
3489
3490 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3491 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3492 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3493 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3494 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3495 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3496 return 0;
3497 out:
3498 amdgpu_ucode_release(&adev->psp.toc_fw);
3499 return err;
3500 }
3501
parse_sos_bin_descriptor(struct psp_context * psp,const struct psp_fw_bin_desc * desc,const struct psp_firmware_header_v2_0 * sos_hdr)3502 static int parse_sos_bin_descriptor(struct psp_context *psp,
3503 const struct psp_fw_bin_desc *desc,
3504 const struct psp_firmware_header_v2_0 *sos_hdr)
3505 {
3506 uint8_t *ucode_start_addr = NULL;
3507
3508 if (!psp || !desc || !sos_hdr)
3509 return -EINVAL;
3510
3511 ucode_start_addr = (uint8_t *)sos_hdr +
3512 le32_to_cpu(desc->offset_bytes) +
3513 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3514
3515 switch (desc->fw_type) {
3516 case PSP_FW_TYPE_PSP_SOS:
3517 psp->sos.fw_version = le32_to_cpu(desc->fw_version);
3518 psp->sos.feature_version = le32_to_cpu(desc->fw_version);
3519 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
3520 psp->sos.start_addr = ucode_start_addr;
3521 break;
3522 case PSP_FW_TYPE_PSP_SYS_DRV:
3523 psp->sys.fw_version = le32_to_cpu(desc->fw_version);
3524 psp->sys.feature_version = le32_to_cpu(desc->fw_version);
3525 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes);
3526 psp->sys.start_addr = ucode_start_addr;
3527 break;
3528 case PSP_FW_TYPE_PSP_KDB:
3529 psp->kdb.fw_version = le32_to_cpu(desc->fw_version);
3530 psp->kdb.feature_version = le32_to_cpu(desc->fw_version);
3531 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes);
3532 psp->kdb.start_addr = ucode_start_addr;
3533 break;
3534 case PSP_FW_TYPE_PSP_TOC:
3535 psp->toc.fw_version = le32_to_cpu(desc->fw_version);
3536 psp->toc.feature_version = le32_to_cpu(desc->fw_version);
3537 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes);
3538 psp->toc.start_addr = ucode_start_addr;
3539 break;
3540 case PSP_FW_TYPE_PSP_SPL:
3541 psp->spl.fw_version = le32_to_cpu(desc->fw_version);
3542 psp->spl.feature_version = le32_to_cpu(desc->fw_version);
3543 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes);
3544 psp->spl.start_addr = ucode_start_addr;
3545 break;
3546 case PSP_FW_TYPE_PSP_RL:
3547 psp->rl.fw_version = le32_to_cpu(desc->fw_version);
3548 psp->rl.feature_version = le32_to_cpu(desc->fw_version);
3549 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes);
3550 psp->rl.start_addr = ucode_start_addr;
3551 break;
3552 case PSP_FW_TYPE_PSP_SOC_DRV:
3553 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version);
3554 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version);
3555 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3556 psp->soc_drv.start_addr = ucode_start_addr;
3557 break;
3558 case PSP_FW_TYPE_PSP_INTF_DRV:
3559 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version);
3560 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version);
3561 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3562 psp->intf_drv.start_addr = ucode_start_addr;
3563 break;
3564 case PSP_FW_TYPE_PSP_DBG_DRV:
3565 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version);
3566 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version);
3567 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3568 psp->dbg_drv.start_addr = ucode_start_addr;
3569 break;
3570 case PSP_FW_TYPE_PSP_RAS_DRV:
3571 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
3572 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
3573 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3574 psp->ras_drv.start_addr = ucode_start_addr;
3575 break;
3576 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3577 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version);
3578 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version);
3579 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3580 psp->ipkeymgr_drv.start_addr = ucode_start_addr;
3581 break;
3582 case PSP_FW_TYPE_PSP_SPDM_DRV:
3583 psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version);
3584 psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version);
3585 psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3586 psp->spdm_drv.start_addr = ucode_start_addr;
3587 break;
3588 default:
3589 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3590 break;
3591 }
3592
3593 return 0;
3594 }
3595
psp_init_sos_base_fw(struct amdgpu_device * adev)3596 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3597 {
3598 const struct psp_firmware_header_v1_0 *sos_hdr;
3599 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3600 uint8_t *ucode_array_start_addr;
3601
3602 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3603 ucode_array_start_addr = (uint8_t *)sos_hdr +
3604 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3605
3606 if (adev->gmc.xgmi.connected_to_cpu ||
3607 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3608 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3609 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3610
3611 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3612 adev->psp.sys.start_addr = ucode_array_start_addr;
3613
3614 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3615 adev->psp.sos.start_addr = ucode_array_start_addr +
3616 le32_to_cpu(sos_hdr->sos.offset_bytes);
3617 } else {
3618 /* Load alternate PSP SOS FW */
3619 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3620
3621 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3622 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3623
3624 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3625 adev->psp.sys.start_addr = ucode_array_start_addr +
3626 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3627
3628 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3629 adev->psp.sos.start_addr = ucode_array_start_addr +
3630 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3631 }
3632
3633 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3634 dev_warn(adev->dev, "PSP SOS FW not available");
3635 return -EINVAL;
3636 }
3637
3638 return 0;
3639 }
3640
psp_init_sos_microcode(struct psp_context * psp,const char * chip_name)3641 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3642 {
3643 struct amdgpu_device *adev = psp->adev;
3644 const struct psp_firmware_header_v1_0 *sos_hdr;
3645 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3646 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3647 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3648 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3649 const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
3650 int fw_index, fw_bin_count, start_index = 0;
3651 const struct psp_fw_bin_desc *fw_bin;
3652 uint8_t *ucode_array_start_addr;
3653 int err = 0;
3654
3655 if (amdgpu_is_kicker_fw(adev))
3656 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3657 "amdgpu/%s_sos_kicker.bin", chip_name);
3658 else
3659 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3660 "amdgpu/%s_sos.bin", chip_name);
3661 if (err)
3662 goto out;
3663
3664 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3665 ucode_array_start_addr = (uint8_t *)sos_hdr +
3666 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3667 amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3668
3669 switch (sos_hdr->header.header_version_major) {
3670 case 1:
3671 err = psp_init_sos_base_fw(adev);
3672 if (err)
3673 goto out;
3674
3675 if (sos_hdr->header.header_version_minor == 1) {
3676 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3677 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3678 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3679 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3680 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3681 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3682 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3683 }
3684 if (sos_hdr->header.header_version_minor == 2) {
3685 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3686 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3687 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3688 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3689 }
3690 if (sos_hdr->header.header_version_minor == 3) {
3691 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3692 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3693 adev->psp.toc.start_addr = ucode_array_start_addr +
3694 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3695 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3696 adev->psp.kdb.start_addr = ucode_array_start_addr +
3697 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3698 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3699 adev->psp.spl.start_addr = ucode_array_start_addr +
3700 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3701 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3702 adev->psp.rl.start_addr = ucode_array_start_addr +
3703 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3704 }
3705 break;
3706 case 2:
3707 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3708
3709 fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
3710
3711 if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
3712 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3713 err = -EINVAL;
3714 goto out;
3715 }
3716
3717 if (sos_hdr_v2_0->header.header_version_minor == 1) {
3718 sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
3719
3720 fw_bin = sos_hdr_v2_1->psp_fw_bin;
3721
3722 if (psp_is_aux_sos_load_required(psp))
3723 start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3724 else
3725 fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3726
3727 } else {
3728 fw_bin = sos_hdr_v2_0->psp_fw_bin;
3729 }
3730
3731 for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
3732 err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
3733 sos_hdr_v2_0);
3734 if (err)
3735 goto out;
3736 }
3737 break;
3738 default:
3739 dev_err(adev->dev,
3740 "unsupported psp sos firmware\n");
3741 err = -EINVAL;
3742 goto out;
3743 }
3744
3745 return 0;
3746 out:
3747 amdgpu_ucode_release(&adev->psp.sos_fw);
3748
3749 return err;
3750 }
3751
is_ta_fw_applicable(struct psp_context * psp,const struct psp_fw_bin_desc * desc)3752 static bool is_ta_fw_applicable(struct psp_context *psp,
3753 const struct psp_fw_bin_desc *desc)
3754 {
3755 struct amdgpu_device *adev = psp->adev;
3756 uint32_t fw_version;
3757
3758 switch (desc->fw_type) {
3759 case TA_FW_TYPE_PSP_XGMI:
3760 case TA_FW_TYPE_PSP_XGMI_AUX:
3761 /* for now, AUX TA only exists on 13.0.6 ta bin,
3762 * from v20.00.0x.14
3763 */
3764 if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3765 IP_VERSION(13, 0, 6)) {
3766 fw_version = le32_to_cpu(desc->fw_version);
3767
3768 if (adev->flags & AMD_IS_APU &&
3769 (fw_version & 0xff) >= 0x14)
3770 return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
3771 else
3772 return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
3773 }
3774 break;
3775 default:
3776 break;
3777 }
3778
3779 return true;
3780 }
3781
parse_ta_bin_descriptor(struct psp_context * psp,const struct psp_fw_bin_desc * desc,const struct ta_firmware_header_v2_0 * ta_hdr)3782 static int parse_ta_bin_descriptor(struct psp_context *psp,
3783 const struct psp_fw_bin_desc *desc,
3784 const struct ta_firmware_header_v2_0 *ta_hdr)
3785 {
3786 uint8_t *ucode_start_addr = NULL;
3787
3788 if (!psp || !desc || !ta_hdr)
3789 return -EINVAL;
3790
3791 if (!is_ta_fw_applicable(psp, desc))
3792 return 0;
3793
3794 ucode_start_addr = (uint8_t *)ta_hdr +
3795 le32_to_cpu(desc->offset_bytes) +
3796 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3797
3798 switch (desc->fw_type) {
3799 case TA_FW_TYPE_PSP_ASD:
3800 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3801 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
3802 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3803 psp->asd_context.bin_desc.start_addr = ucode_start_addr;
3804 break;
3805 case TA_FW_TYPE_PSP_XGMI:
3806 case TA_FW_TYPE_PSP_XGMI_AUX:
3807 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3808 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3809 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
3810 break;
3811 case TA_FW_TYPE_PSP_RAS:
3812 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3813 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3814 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
3815 break;
3816 case TA_FW_TYPE_PSP_HDCP:
3817 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3818 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3819 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
3820 break;
3821 case TA_FW_TYPE_PSP_DTM:
3822 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3823 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3824 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
3825 break;
3826 case TA_FW_TYPE_PSP_RAP:
3827 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3828 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3829 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
3830 break;
3831 case TA_FW_TYPE_PSP_SECUREDISPLAY:
3832 psp->securedisplay_context.context.bin_desc.fw_version =
3833 le32_to_cpu(desc->fw_version);
3834 psp->securedisplay_context.context.bin_desc.size_bytes =
3835 le32_to_cpu(desc->size_bytes);
3836 psp->securedisplay_context.context.bin_desc.start_addr =
3837 ucode_start_addr;
3838 break;
3839 default:
3840 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3841 break;
3842 }
3843
3844 return 0;
3845 }
3846
parse_ta_v1_microcode(struct psp_context * psp)3847 static int parse_ta_v1_microcode(struct psp_context *psp)
3848 {
3849 const struct ta_firmware_header_v1_0 *ta_hdr;
3850 struct amdgpu_device *adev = psp->adev;
3851
3852 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3853
3854 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3855 return -EINVAL;
3856
3857 adev->psp.xgmi_context.context.bin_desc.fw_version =
3858 le32_to_cpu(ta_hdr->xgmi.fw_version);
3859 adev->psp.xgmi_context.context.bin_desc.size_bytes =
3860 le32_to_cpu(ta_hdr->xgmi.size_bytes);
3861 adev->psp.xgmi_context.context.bin_desc.start_addr =
3862 (uint8_t *)ta_hdr +
3863 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3864
3865 adev->psp.ras_context.context.bin_desc.fw_version =
3866 le32_to_cpu(ta_hdr->ras.fw_version);
3867 adev->psp.ras_context.context.bin_desc.size_bytes =
3868 le32_to_cpu(ta_hdr->ras.size_bytes);
3869 adev->psp.ras_context.context.bin_desc.start_addr =
3870 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3871 le32_to_cpu(ta_hdr->ras.offset_bytes);
3872
3873 adev->psp.hdcp_context.context.bin_desc.fw_version =
3874 le32_to_cpu(ta_hdr->hdcp.fw_version);
3875 adev->psp.hdcp_context.context.bin_desc.size_bytes =
3876 le32_to_cpu(ta_hdr->hdcp.size_bytes);
3877 adev->psp.hdcp_context.context.bin_desc.start_addr =
3878 (uint8_t *)ta_hdr +
3879 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3880
3881 adev->psp.dtm_context.context.bin_desc.fw_version =
3882 le32_to_cpu(ta_hdr->dtm.fw_version);
3883 adev->psp.dtm_context.context.bin_desc.size_bytes =
3884 le32_to_cpu(ta_hdr->dtm.size_bytes);
3885 adev->psp.dtm_context.context.bin_desc.start_addr =
3886 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3887 le32_to_cpu(ta_hdr->dtm.offset_bytes);
3888
3889 adev->psp.securedisplay_context.context.bin_desc.fw_version =
3890 le32_to_cpu(ta_hdr->securedisplay.fw_version);
3891 adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3892 le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3893 adev->psp.securedisplay_context.context.bin_desc.start_addr =
3894 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3895 le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3896
3897 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3898
3899 return 0;
3900 }
3901
parse_ta_v2_microcode(struct psp_context * psp)3902 static int parse_ta_v2_microcode(struct psp_context *psp)
3903 {
3904 const struct ta_firmware_header_v2_0 *ta_hdr;
3905 struct amdgpu_device *adev = psp->adev;
3906 int err = 0;
3907 int ta_index = 0;
3908
3909 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3910
3911 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3912 return -EINVAL;
3913
3914 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3915 dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3916 return -EINVAL;
3917 }
3918
3919 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3920 err = parse_ta_bin_descriptor(psp,
3921 &ta_hdr->ta_fw_bin[ta_index],
3922 ta_hdr);
3923 if (err)
3924 return err;
3925 }
3926
3927 return 0;
3928 }
3929
psp_init_ta_microcode(struct psp_context * psp,const char * chip_name)3930 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3931 {
3932 const struct common_firmware_header *hdr;
3933 struct amdgpu_device *adev = psp->adev;
3934 int err;
3935
3936 if (amdgpu_is_kicker_fw(adev))
3937 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3938 "amdgpu/%s_ta_kicker.bin", chip_name);
3939 else
3940 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3941 "amdgpu/%s_ta.bin", chip_name);
3942 if (err)
3943 return err;
3944
3945 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3946 switch (le16_to_cpu(hdr->header_version_major)) {
3947 case 1:
3948 err = parse_ta_v1_microcode(psp);
3949 break;
3950 case 2:
3951 err = parse_ta_v2_microcode(psp);
3952 break;
3953 default:
3954 dev_err(adev->dev, "unsupported TA header version\n");
3955 err = -EINVAL;
3956 }
3957
3958 if (err)
3959 amdgpu_ucode_release(&adev->psp.ta_fw);
3960
3961 return err;
3962 }
3963
psp_init_cap_microcode(struct psp_context * psp,const char * chip_name)3964 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3965 {
3966 struct amdgpu_device *adev = psp->adev;
3967 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3968 struct amdgpu_firmware_info *info = NULL;
3969 int err = 0;
3970
3971 if (!amdgpu_sriov_vf(adev)) {
3972 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3973 return -EINVAL;
3974 }
3975
3976 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
3977 "amdgpu/%s_cap.bin", chip_name);
3978 if (err) {
3979 if (err == -ENODEV) {
3980 dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3981 err = 0;
3982 } else {
3983 dev_err(adev->dev, "fail to initialize cap microcode\n");
3984 }
3985 goto out;
3986 }
3987
3988 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3989 info->ucode_id = AMDGPU_UCODE_ID_CAP;
3990 info->fw = adev->psp.cap_fw;
3991 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3992 adev->psp.cap_fw->data;
3993 adev->firmware.fw_size += ALIGN(
3994 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3995 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3996 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3997 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3998
3999 return 0;
4000
4001 out:
4002 amdgpu_ucode_release(&adev->psp.cap_fw);
4003 return err;
4004 }
4005
psp_config_sq_perfmon(struct psp_context * psp,uint32_t xcp_id,bool core_override_enable,bool reg_override_enable,bool perfmon_override_enable)4006 int psp_config_sq_perfmon(struct psp_context *psp,
4007 uint32_t xcp_id, bool core_override_enable,
4008 bool reg_override_enable, bool perfmon_override_enable)
4009 {
4010 int ret;
4011
4012 if (amdgpu_sriov_vf(psp->adev))
4013 return 0;
4014
4015 if (xcp_id > MAX_XCP) {
4016 dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
4017 return -EINVAL;
4018 }
4019
4020 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
4021 dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
4022 amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
4023 return -EINVAL;
4024 }
4025 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
4026
4027 cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON;
4028 cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id);
4029 cmd->cmd.config_sq_perfmon.core_override = core_override_enable;
4030 cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable;
4031 cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
4032
4033 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
4034 if (ret)
4035 dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
4036 xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
4037
4038 release_psp_cmd_buf(psp);
4039 return ret;
4040 }
4041
psp_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)4042 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
4043 enum amd_clockgating_state state)
4044 {
4045 return 0;
4046 }
4047
psp_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)4048 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
4049 enum amd_powergating_state state)
4050 {
4051 return 0;
4052 }
4053
psp_usbc_pd_fw_sysfs_read(struct device * dev,struct device_attribute * attr,char * buf)4054 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
4055 struct device_attribute *attr,
4056 char *buf)
4057 {
4058 struct drm_device *ddev = dev_get_drvdata(dev);
4059 struct amdgpu_device *adev = drm_to_adev(ddev);
4060 struct amdgpu_ip_block *ip_block;
4061 uint32_t fw_ver;
4062 int ret;
4063
4064 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
4065 if (!ip_block || !ip_block->status.late_initialized) {
4066 dev_info(adev->dev, "PSP block is not ready yet\n.");
4067 return -EBUSY;
4068 }
4069
4070 mutex_lock(&adev->psp.mutex);
4071 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
4072 mutex_unlock(&adev->psp.mutex);
4073
4074 if (ret) {
4075 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
4076 return ret;
4077 }
4078
4079 return sysfs_emit(buf, "%x\n", fw_ver);
4080 }
4081
psp_usbc_pd_fw_sysfs_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4082 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
4083 struct device_attribute *attr,
4084 const char *buf,
4085 size_t count)
4086 {
4087 struct drm_device *ddev = dev_get_drvdata(dev);
4088 struct amdgpu_device *adev = drm_to_adev(ddev);
4089 int ret, idx;
4090 const struct firmware *usbc_pd_fw;
4091 struct amdgpu_bo *fw_buf_bo = NULL;
4092 uint64_t fw_pri_mc_addr;
4093 void *fw_pri_cpu_addr;
4094 struct amdgpu_ip_block *ip_block;
4095
4096 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
4097 if (!ip_block || !ip_block->status.late_initialized) {
4098 dev_err(adev->dev, "PSP block is not ready yet.");
4099 return -EBUSY;
4100 }
4101
4102 if (!drm_dev_enter(ddev, &idx))
4103 return -ENODEV;
4104
4105 ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
4106 "amdgpu/%s", buf);
4107 if (ret)
4108 goto fail;
4109
4110 /* LFB address which is aligned to 1MB boundary per PSP request */
4111 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
4112 AMDGPU_GEM_DOMAIN_VRAM |
4113 AMDGPU_GEM_DOMAIN_GTT,
4114 &fw_buf_bo, &fw_pri_mc_addr,
4115 &fw_pri_cpu_addr);
4116 if (ret)
4117 goto rel_buf;
4118
4119 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
4120
4121 mutex_lock(&adev->psp.mutex);
4122 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
4123 mutex_unlock(&adev->psp.mutex);
4124
4125 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4126
4127 rel_buf:
4128 amdgpu_ucode_release(&usbc_pd_fw);
4129 fail:
4130 if (ret) {
4131 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
4132 count = ret;
4133 }
4134
4135 drm_dev_exit(idx);
4136 return count;
4137 }
4138
psp_copy_fw(struct psp_context * psp,uint8_t * start_addr,uint32_t bin_size)4139 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
4140 {
4141 int idx;
4142
4143 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
4144 return;
4145
4146 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
4147 memcpy(psp->fw_pri_buf, start_addr, bin_size);
4148
4149 drm_dev_exit(idx);
4150 }
4151
4152 /**
4153 * DOC: usbc_pd_fw
4154 * Reading from this file will retrieve the USB-C PD firmware version. Writing to
4155 * this file will trigger the update process.
4156 */
4157 static DEVICE_ATTR(usbc_pd_fw, 0644,
4158 psp_usbc_pd_fw_sysfs_read,
4159 psp_usbc_pd_fw_sysfs_write);
4160
is_psp_fw_valid(struct psp_bin_desc bin)4161 int is_psp_fw_valid(struct psp_bin_desc bin)
4162 {
4163 return bin.size_bytes;
4164 }
4165
amdgpu_psp_vbflash_write(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buffer,loff_t pos,size_t count)4166 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
4167 const struct bin_attribute *bin_attr,
4168 char *buffer, loff_t pos, size_t count)
4169 {
4170 struct device *dev = kobj_to_dev(kobj);
4171 struct drm_device *ddev = dev_get_drvdata(dev);
4172 struct amdgpu_device *adev = drm_to_adev(ddev);
4173
4174 adev->psp.vbflash_done = false;
4175
4176 /* Safeguard against memory drain */
4177 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
4178 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
4179 kvfree(adev->psp.vbflash_tmp_buf);
4180 adev->psp.vbflash_tmp_buf = NULL;
4181 adev->psp.vbflash_image_size = 0;
4182 return -ENOMEM;
4183 }
4184
4185 /* TODO Just allocate max for now and optimize to realloc later if needed */
4186 if (!adev->psp.vbflash_tmp_buf) {
4187 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
4188 if (!adev->psp.vbflash_tmp_buf)
4189 return -ENOMEM;
4190 }
4191
4192 mutex_lock(&adev->psp.mutex);
4193 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
4194 adev->psp.vbflash_image_size += count;
4195 mutex_unlock(&adev->psp.mutex);
4196
4197 dev_dbg(adev->dev, "IFWI staged for update\n");
4198
4199 return count;
4200 }
4201
amdgpu_psp_vbflash_read(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buffer,loff_t pos,size_t count)4202 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
4203 const struct bin_attribute *bin_attr, char *buffer,
4204 loff_t pos, size_t count)
4205 {
4206 struct device *dev = kobj_to_dev(kobj);
4207 struct drm_device *ddev = dev_get_drvdata(dev);
4208 struct amdgpu_device *adev = drm_to_adev(ddev);
4209 struct amdgpu_bo *fw_buf_bo = NULL;
4210 uint64_t fw_pri_mc_addr;
4211 void *fw_pri_cpu_addr;
4212 int ret;
4213
4214 if (adev->psp.vbflash_image_size == 0)
4215 return -EINVAL;
4216
4217 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
4218
4219 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
4220 AMDGPU_GPU_PAGE_SIZE,
4221 AMDGPU_GEM_DOMAIN_VRAM,
4222 &fw_buf_bo,
4223 &fw_pri_mc_addr,
4224 &fw_pri_cpu_addr);
4225 if (ret)
4226 goto rel_buf;
4227
4228 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
4229
4230 mutex_lock(&adev->psp.mutex);
4231 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
4232 mutex_unlock(&adev->psp.mutex);
4233
4234 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4235
4236 rel_buf:
4237 kvfree(adev->psp.vbflash_tmp_buf);
4238 adev->psp.vbflash_tmp_buf = NULL;
4239 adev->psp.vbflash_image_size = 0;
4240
4241 if (ret) {
4242 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
4243 return ret;
4244 }
4245
4246 dev_dbg(adev->dev, "PSP IFWI flash process done\n");
4247 return 0;
4248 }
4249
4250 /**
4251 * DOC: psp_vbflash
4252 * Writing to this file will stage an IFWI for update. Reading from this file
4253 * will trigger the update process.
4254 */
4255 static const struct bin_attribute psp_vbflash_bin_attr = {
4256 .attr = {.name = "psp_vbflash", .mode = 0660},
4257 .size = 0,
4258 .write = amdgpu_psp_vbflash_write,
4259 .read = amdgpu_psp_vbflash_read,
4260 };
4261
4262 /**
4263 * DOC: psp_vbflash_status
4264 * The status of the flash process.
4265 * 0: IFWI flash not complete.
4266 * 1: IFWI flash complete.
4267 */
amdgpu_psp_vbflash_status(struct device * dev,struct device_attribute * attr,char * buf)4268 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
4269 struct device_attribute *attr,
4270 char *buf)
4271 {
4272 struct drm_device *ddev = dev_get_drvdata(dev);
4273 struct amdgpu_device *adev = drm_to_adev(ddev);
4274 uint32_t vbflash_status;
4275
4276 vbflash_status = psp_vbflash_status(&adev->psp);
4277 if (!adev->psp.vbflash_done)
4278 vbflash_status = 0;
4279 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
4280 vbflash_status = 1;
4281
4282 return sysfs_emit(buf, "0x%x\n", vbflash_status);
4283 }
4284 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
4285
4286 static const struct bin_attribute *const bin_flash_attrs[] = {
4287 &psp_vbflash_bin_attr,
4288 NULL
4289 };
4290
4291 static struct attribute *flash_attrs[] = {
4292 &dev_attr_psp_vbflash_status.attr,
4293 &dev_attr_usbc_pd_fw.attr,
4294 NULL
4295 };
4296
amdgpu_flash_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)4297 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
4298 {
4299 struct device *dev = kobj_to_dev(kobj);
4300 struct drm_device *ddev = dev_get_drvdata(dev);
4301 struct amdgpu_device *adev = drm_to_adev(ddev);
4302
4303 if (attr == &dev_attr_usbc_pd_fw.attr)
4304 return adev->psp.sup_pd_fw_up ? 0660 : 0;
4305
4306 return adev->psp.sup_ifwi_up ? 0440 : 0;
4307 }
4308
amdgpu_bin_flash_attr_is_visible(struct kobject * kobj,const struct bin_attribute * attr,int idx)4309 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
4310 const struct bin_attribute *attr,
4311 int idx)
4312 {
4313 struct device *dev = kobj_to_dev(kobj);
4314 struct drm_device *ddev = dev_get_drvdata(dev);
4315 struct amdgpu_device *adev = drm_to_adev(ddev);
4316
4317 return adev->psp.sup_ifwi_up ? 0660 : 0;
4318 }
4319
4320 const struct attribute_group amdgpu_flash_attr_group = {
4321 .attrs = flash_attrs,
4322 .bin_attrs = bin_flash_attrs,
4323 .is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4324 .is_visible = amdgpu_flash_attr_is_visible,
4325 };
4326
4327 #if defined(CONFIG_DEBUG_FS)
psp_read_spirom_debugfs_open(struct inode * inode,struct file * filp)4328 static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
4329 {
4330 struct amdgpu_device *adev = filp->f_inode->i_private;
4331 struct spirom_bo *bo_triplet;
4332 int ret;
4333
4334 /* serialize the open() file calling */
4335 if (!mutex_trylock(&adev->psp.mutex))
4336 return -EBUSY;
4337
4338 /*
4339 * make sure only one userpace process is alive for dumping so that
4340 * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
4341 * let's say the case where one process try opening the file while
4342 * another one has proceeded to read or release. In this way, eliminate
4343 * the use of mutex for read() or release() callback as well.
4344 */
4345 if (adev->psp.spirom_dump_trip) {
4346 mutex_unlock(&adev->psp.mutex);
4347 return -EBUSY;
4348 }
4349
4350 bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
4351 if (!bo_triplet) {
4352 mutex_unlock(&adev->psp.mutex);
4353 return -ENOMEM;
4354 }
4355
4356 ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
4357 AMDGPU_GPU_PAGE_SIZE,
4358 AMDGPU_GEM_DOMAIN_GTT,
4359 &bo_triplet->bo,
4360 &bo_triplet->mc_addr,
4361 &bo_triplet->cpu_addr);
4362 if (ret)
4363 goto rel_trip;
4364
4365 ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
4366 if (ret)
4367 goto rel_bo;
4368
4369 adev->psp.spirom_dump_trip = bo_triplet;
4370 mutex_unlock(&adev->psp.mutex);
4371 return 0;
4372 rel_bo:
4373 amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4374 &bo_triplet->cpu_addr);
4375 rel_trip:
4376 kfree(bo_triplet);
4377 mutex_unlock(&adev->psp.mutex);
4378 dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
4379 return ret;
4380 }
4381
psp_read_spirom_debugfs_read(struct file * filp,char __user * buf,size_t size,loff_t * pos)4382 static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
4383 loff_t *pos)
4384 {
4385 struct amdgpu_device *adev = filp->f_inode->i_private;
4386 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4387
4388 if (!bo_triplet)
4389 return -EINVAL;
4390
4391 return simple_read_from_buffer(buf,
4392 size,
4393 pos, bo_triplet->cpu_addr,
4394 AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4395 }
4396
psp_read_spirom_debugfs_release(struct inode * inode,struct file * filp)4397 static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
4398 {
4399 struct amdgpu_device *adev = filp->f_inode->i_private;
4400 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4401
4402 if (bo_triplet) {
4403 amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4404 &bo_triplet->cpu_addr);
4405 kfree(bo_triplet);
4406 }
4407
4408 adev->psp.spirom_dump_trip = NULL;
4409 return 0;
4410 }
4411
4412 static const struct file_operations psp_dump_spirom_debugfs_ops = {
4413 .owner = THIS_MODULE,
4414 .open = psp_read_spirom_debugfs_open,
4415 .read = psp_read_spirom_debugfs_read,
4416 .release = psp_read_spirom_debugfs_release,
4417 .llseek = default_llseek,
4418 };
4419 #endif
4420
amdgpu_psp_debugfs_init(struct amdgpu_device * adev)4421 void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
4422 {
4423 #if defined(CONFIG_DEBUG_FS)
4424 struct drm_minor *minor = adev_to_drm(adev)->primary;
4425
4426 debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
4427 adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4428 #endif
4429 }
4430
4431 const struct amd_ip_funcs psp_ip_funcs = {
4432 .name = "psp",
4433 .early_init = psp_early_init,
4434 .sw_init = psp_sw_init,
4435 .sw_fini = psp_sw_fini,
4436 .hw_init = psp_hw_init,
4437 .hw_fini = psp_hw_fini,
4438 .suspend = psp_suspend,
4439 .resume = psp_resume,
4440 .set_clockgating_state = psp_set_clockgating_state,
4441 .set_powergating_state = psp_set_powergating_state,
4442 };
4443
4444 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4445 .type = AMD_IP_BLOCK_TYPE_PSP,
4446 .major = 3,
4447 .minor = 1,
4448 .rev = 0,
4449 .funcs = &psp_ip_funcs,
4450 };
4451
4452 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4453 .type = AMD_IP_BLOCK_TYPE_PSP,
4454 .major = 10,
4455 .minor = 0,
4456 .rev = 0,
4457 .funcs = &psp_ip_funcs,
4458 };
4459
4460 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4461 .type = AMD_IP_BLOCK_TYPE_PSP,
4462 .major = 11,
4463 .minor = 0,
4464 .rev = 0,
4465 .funcs = &psp_ip_funcs,
4466 };
4467
4468 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4469 .type = AMD_IP_BLOCK_TYPE_PSP,
4470 .major = 11,
4471 .minor = 0,
4472 .rev = 8,
4473 .funcs = &psp_ip_funcs,
4474 };
4475
4476 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4477 .type = AMD_IP_BLOCK_TYPE_PSP,
4478 .major = 12,
4479 .minor = 0,
4480 .rev = 0,
4481 .funcs = &psp_ip_funcs,
4482 };
4483
4484 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4485 .type = AMD_IP_BLOCK_TYPE_PSP,
4486 .major = 13,
4487 .minor = 0,
4488 .rev = 0,
4489 .funcs = &psp_ip_funcs,
4490 };
4491
4492 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4493 .type = AMD_IP_BLOCK_TYPE_PSP,
4494 .major = 13,
4495 .minor = 0,
4496 .rev = 4,
4497 .funcs = &psp_ip_funcs,
4498 };
4499
4500 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4501 .type = AMD_IP_BLOCK_TYPE_PSP,
4502 .major = 14,
4503 .minor = 0,
4504 .rev = 0,
4505 .funcs = &psp_ip_funcs,
4506 };
4507