1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui
23 *
24 */
25
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42
43 #include "amdgpu_ras.h"
44 #include "amdgpu_securedisplay.h"
45 #include "amdgpu_atomfirmware.h"
46
47 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16)
48
49 static int psp_load_smu_fw(struct psp_context *psp);
50 static int psp_rap_terminate(struct psp_context *psp);
51 static int psp_securedisplay_terminate(struct psp_context *psp);
52
psp_ring_init(struct psp_context * psp,enum psp_ring_type ring_type)53 static int psp_ring_init(struct psp_context *psp,
54 enum psp_ring_type ring_type)
55 {
56 int ret = 0;
57 struct psp_ring *ring;
58 struct amdgpu_device *adev = psp->adev;
59
60 ring = &psp->km_ring;
61
62 ring->ring_type = ring_type;
63
64 /* allocate 4k Page of Local Frame Buffer memory for ring */
65 ring->ring_size = 0x1000;
66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 AMDGPU_GEM_DOMAIN_VRAM |
68 AMDGPU_GEM_DOMAIN_GTT,
69 &adev->firmware.rbuf,
70 &ring->ring_mem_mc_addr,
71 (void **)&ring->ring_mem);
72 if (ret) {
73 ring->ring_size = 0;
74 return ret;
75 }
76
77 return 0;
78 }
79
80 /*
81 * Due to DF Cstate management centralized to PMFW, the firmware
82 * loading sequence will be updated as below:
83 * - Load KDB
84 * - Load SYS_DRV
85 * - Load tOS
86 * - Load PMFW
87 * - Setup TMR
88 * - Load other non-psp fw
89 * - Load ASD
90 * - Load XGMI/RAS/HDCP/DTM TA if any
91 *
92 * This new sequence is required for
93 * - Arcturus and onwards
94 */
psp_check_pmfw_centralized_cstate_management(struct psp_context * psp)95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96 {
97 struct amdgpu_device *adev = psp->adev;
98
99 if (amdgpu_sriov_vf(adev)) {
100 psp->pmfw_centralized_cstate_management = false;
101 return;
102 }
103
104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 case IP_VERSION(11, 0, 0):
106 case IP_VERSION(11, 0, 4):
107 case IP_VERSION(11, 0, 5):
108 case IP_VERSION(11, 0, 7):
109 case IP_VERSION(11, 0, 9):
110 case IP_VERSION(11, 0, 11):
111 case IP_VERSION(11, 0, 12):
112 case IP_VERSION(11, 0, 13):
113 case IP_VERSION(13, 0, 0):
114 case IP_VERSION(13, 0, 2):
115 case IP_VERSION(13, 0, 7):
116 psp->pmfw_centralized_cstate_management = true;
117 break;
118 default:
119 psp->pmfw_centralized_cstate_management = false;
120 break;
121 }
122 }
123
psp_init_sriov_microcode(struct psp_context * psp)124 static int psp_init_sriov_microcode(struct psp_context *psp)
125 {
126 struct amdgpu_device *adev = psp->adev;
127 char ucode_prefix[30];
128 int ret = 0;
129
130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131
132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 case IP_VERSION(9, 0, 0):
134 case IP_VERSION(11, 0, 7):
135 case IP_VERSION(11, 0, 9):
136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 ret = psp_init_cap_microcode(psp, ucode_prefix);
138 break;
139 case IP_VERSION(13, 0, 2):
140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 ret = psp_init_cap_microcode(psp, ucode_prefix);
142 ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 break;
144 case IP_VERSION(13, 0, 0):
145 adev->virt.autoload_ucode_id = 0;
146 break;
147 case IP_VERSION(13, 0, 6):
148 case IP_VERSION(13, 0, 14):
149 ret = psp_init_cap_microcode(psp, ucode_prefix);
150 ret &= psp_init_ta_microcode(psp, ucode_prefix);
151 break;
152 case IP_VERSION(13, 0, 10):
153 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
154 ret = psp_init_cap_microcode(psp, ucode_prefix);
155 break;
156 case IP_VERSION(13, 0, 12):
157 ret = psp_init_ta_microcode(psp, ucode_prefix);
158 break;
159 default:
160 return -EINVAL;
161 }
162 return ret;
163 }
164
psp_early_init(struct amdgpu_ip_block * ip_block)165 static int psp_early_init(struct amdgpu_ip_block *ip_block)
166 {
167 struct amdgpu_device *adev = ip_block->adev;
168 struct psp_context *psp = &adev->psp;
169
170 psp->autoload_supported = true;
171 psp->boot_time_tmr = true;
172
173 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
174 case IP_VERSION(9, 0, 0):
175 psp_v3_1_set_psp_funcs(psp);
176 psp->autoload_supported = false;
177 psp->boot_time_tmr = false;
178 break;
179 case IP_VERSION(10, 0, 0):
180 case IP_VERSION(10, 0, 1):
181 psp_v10_0_set_psp_funcs(psp);
182 psp->autoload_supported = false;
183 psp->boot_time_tmr = false;
184 break;
185 case IP_VERSION(11, 0, 2):
186 case IP_VERSION(11, 0, 4):
187 psp_v11_0_set_psp_funcs(psp);
188 psp->autoload_supported = false;
189 psp->boot_time_tmr = false;
190 break;
191 case IP_VERSION(11, 0, 0):
192 case IP_VERSION(11, 0, 7):
193 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
194 fallthrough;
195 case IP_VERSION(11, 0, 5):
196 case IP_VERSION(11, 0, 9):
197 case IP_VERSION(11, 0, 11):
198 case IP_VERSION(11, 5, 0):
199 case IP_VERSION(11, 5, 2):
200 case IP_VERSION(11, 0, 12):
201 case IP_VERSION(11, 0, 13):
202 psp_v11_0_set_psp_funcs(psp);
203 psp->boot_time_tmr = false;
204 break;
205 case IP_VERSION(11, 0, 3):
206 case IP_VERSION(12, 0, 1):
207 psp_v12_0_set_psp_funcs(psp);
208 psp->autoload_supported = false;
209 psp->boot_time_tmr = false;
210 break;
211 case IP_VERSION(13, 0, 2):
212 psp->boot_time_tmr = false;
213 fallthrough;
214 case IP_VERSION(13, 0, 6):
215 case IP_VERSION(13, 0, 14):
216 psp_v13_0_set_psp_funcs(psp);
217 psp->autoload_supported = false;
218 break;
219 case IP_VERSION(13, 0, 12):
220 psp_v13_0_set_psp_funcs(psp);
221 psp->autoload_supported = false;
222 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
223 break;
224 case IP_VERSION(13, 0, 1):
225 case IP_VERSION(13, 0, 3):
226 case IP_VERSION(13, 0, 5):
227 case IP_VERSION(13, 0, 8):
228 case IP_VERSION(13, 0, 11):
229 case IP_VERSION(14, 0, 0):
230 case IP_VERSION(14, 0, 1):
231 case IP_VERSION(14, 0, 4):
232 psp_v13_0_set_psp_funcs(psp);
233 psp->boot_time_tmr = false;
234 break;
235 case IP_VERSION(11, 0, 8):
236 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
237 psp_v11_0_8_set_psp_funcs(psp);
238 }
239 psp->autoload_supported = false;
240 psp->boot_time_tmr = false;
241 break;
242 case IP_VERSION(13, 0, 0):
243 case IP_VERSION(13, 0, 7):
244 case IP_VERSION(13, 0, 10):
245 psp_v13_0_set_psp_funcs(psp);
246 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
247 psp->boot_time_tmr = false;
248 break;
249 case IP_VERSION(13, 0, 4):
250 psp_v13_0_4_set_psp_funcs(psp);
251 psp->boot_time_tmr = false;
252 break;
253 case IP_VERSION(14, 0, 2):
254 case IP_VERSION(14, 0, 3):
255 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
256 psp_v14_0_set_psp_funcs(psp);
257 break;
258 case IP_VERSION(14, 0, 5):
259 psp_v14_0_set_psp_funcs(psp);
260 psp->boot_time_tmr = false;
261 break;
262 default:
263 return -EINVAL;
264 }
265
266 psp->adev = adev;
267
268 adev->psp_timeout = 20000;
269
270 psp_check_pmfw_centralized_cstate_management(psp);
271
272 if (amdgpu_sriov_vf(adev))
273 return psp_init_sriov_microcode(psp);
274 else
275 return psp_init_microcode(psp);
276 }
277
psp_ta_free_shared_buf(struct ta_mem_context * mem_ctx)278 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
279 {
280 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
281 &mem_ctx->shared_buf);
282 mem_ctx->shared_bo = NULL;
283 }
284
psp_free_shared_bufs(struct psp_context * psp)285 static void psp_free_shared_bufs(struct psp_context *psp)
286 {
287 void *tmr_buf;
288 void **pptr;
289
290 /* free TMR memory buffer */
291 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
292 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
293 psp->tmr_bo = NULL;
294
295 /* free xgmi shared memory */
296 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
297
298 /* free ras shared memory */
299 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
300
301 /* free hdcp shared memory */
302 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
303
304 /* free dtm shared memory */
305 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
306
307 /* free rap shared memory */
308 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
309
310 /* free securedisplay shared memory */
311 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
312
313
314 }
315
psp_memory_training_fini(struct psp_context * psp)316 static void psp_memory_training_fini(struct psp_context *psp)
317 {
318 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
319
320 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
321 kfree(ctx->sys_cache);
322 ctx->sys_cache = NULL;
323 }
324
psp_memory_training_init(struct psp_context * psp)325 static int psp_memory_training_init(struct psp_context *psp)
326 {
327 int ret;
328 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
329
330 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
331 dev_dbg(psp->adev->dev, "memory training is not supported!\n");
332 return 0;
333 }
334
335 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
336 if (ctx->sys_cache == NULL) {
337 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
338 ret = -ENOMEM;
339 goto Err_out;
340 }
341
342 dev_dbg(psp->adev->dev,
343 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
344 ctx->train_data_size,
345 ctx->p2c_train_data_offset,
346 ctx->c2p_train_data_offset);
347 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
348 return 0;
349
350 Err_out:
351 psp_memory_training_fini(psp);
352 return ret;
353 }
354
355 /*
356 * Helper funciton to query psp runtime database entry
357 *
358 * @adev: amdgpu_device pointer
359 * @entry_type: the type of psp runtime database entry
360 * @db_entry: runtime database entry pointer
361 *
362 * Return false if runtime database doesn't exit or entry is invalid
363 * or true if the specific database entry is found, and copy to @db_entry
364 */
psp_get_runtime_db_entry(struct amdgpu_device * adev,enum psp_runtime_entry_type entry_type,void * db_entry)365 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
366 enum psp_runtime_entry_type entry_type,
367 void *db_entry)
368 {
369 uint64_t db_header_pos, db_dir_pos;
370 struct psp_runtime_data_header db_header = {0};
371 struct psp_runtime_data_directory db_dir = {0};
372 bool ret = false;
373 int i;
374
375 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
376 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
377 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
378 return false;
379
380 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
381 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
382
383 /* read runtime db header from vram */
384 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
385 sizeof(struct psp_runtime_data_header), false);
386
387 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
388 /* runtime db doesn't exist, exit */
389 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
390 return false;
391 }
392
393 /* read runtime database entry from vram */
394 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
395 sizeof(struct psp_runtime_data_directory), false);
396
397 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
398 /* invalid db entry count, exit */
399 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
400 return false;
401 }
402
403 /* look up for requested entry type */
404 for (i = 0; i < db_dir.entry_count && !ret; i++) {
405 if (db_dir.entry_list[i].entry_type == entry_type) {
406 switch (entry_type) {
407 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
408 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
409 /* invalid db entry size */
410 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
411 return false;
412 }
413 /* read runtime database entry */
414 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
415 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
416 ret = true;
417 break;
418 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
419 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
420 /* invalid db entry size */
421 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
422 return false;
423 }
424 /* read runtime database entry */
425 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
426 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
427 ret = true;
428 break;
429 default:
430 ret = false;
431 break;
432 }
433 }
434 }
435
436 return ret;
437 }
438
psp_sw_init(struct amdgpu_ip_block * ip_block)439 static int psp_sw_init(struct amdgpu_ip_block *ip_block)
440 {
441 struct amdgpu_device *adev = ip_block->adev;
442 struct psp_context *psp = &adev->psp;
443 int ret;
444 struct psp_runtime_boot_cfg_entry boot_cfg_entry;
445 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
446 struct psp_runtime_scpm_entry scpm_entry;
447
448 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
449 if (!psp->cmd) {
450 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
451 return -ENOMEM;
452 }
453
454 adev->psp.xgmi_context.supports_extended_data =
455 !adev->gmc.xgmi.connected_to_cpu &&
456 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
457
458 memset(&scpm_entry, 0, sizeof(scpm_entry));
459 if ((psp_get_runtime_db_entry(adev,
460 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
461 &scpm_entry)) &&
462 (scpm_entry.scpm_status != SCPM_DISABLE)) {
463 adev->scpm_enabled = true;
464 adev->scpm_status = scpm_entry.scpm_status;
465 } else {
466 adev->scpm_enabled = false;
467 adev->scpm_status = SCPM_DISABLE;
468 }
469
470 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
471
472 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
473 if (psp_get_runtime_db_entry(adev,
474 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
475 &boot_cfg_entry)) {
476 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
477 if ((psp->boot_cfg_bitmask) &
478 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
479 /* If psp runtime database exists, then
480 * only enable two stage memory training
481 * when TWO_STAGE_DRAM_TRAINING bit is set
482 * in runtime database
483 */
484 mem_training_ctx->enable_mem_training = true;
485 }
486
487 } else {
488 /* If psp runtime database doesn't exist or is
489 * invalid, force enable two stage memory training
490 */
491 mem_training_ctx->enable_mem_training = true;
492 }
493
494 if (mem_training_ctx->enable_mem_training) {
495 ret = psp_memory_training_init(psp);
496 if (ret) {
497 dev_err(adev->dev, "Failed to initialize memory training!\n");
498 return ret;
499 }
500
501 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
502 if (ret) {
503 dev_err(adev->dev, "Failed to process memory training!\n");
504 return ret;
505 }
506 }
507
508 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
509 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
510 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
511 &psp->fw_pri_bo,
512 &psp->fw_pri_mc_addr,
513 &psp->fw_pri_buf);
514 if (ret)
515 return ret;
516
517 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
518 AMDGPU_GEM_DOMAIN_VRAM |
519 AMDGPU_GEM_DOMAIN_GTT,
520 &psp->fence_buf_bo,
521 &psp->fence_buf_mc_addr,
522 &psp->fence_buf);
523 if (ret)
524 goto failed1;
525
526 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
527 AMDGPU_GEM_DOMAIN_VRAM |
528 AMDGPU_GEM_DOMAIN_GTT,
529 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
530 (void **)&psp->cmd_buf_mem);
531 if (ret)
532 goto failed2;
533
534 return 0;
535
536 failed2:
537 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
538 &psp->fence_buf_mc_addr, &psp->fence_buf);
539 failed1:
540 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
541 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
542 return ret;
543 }
544
psp_sw_fini(struct amdgpu_ip_block * ip_block)545 static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
546 {
547 struct amdgpu_device *adev = ip_block->adev;
548 struct psp_context *psp = &adev->psp;
549
550 psp_memory_training_fini(psp);
551
552 amdgpu_ucode_release(&psp->sos_fw);
553 amdgpu_ucode_release(&psp->asd_fw);
554 amdgpu_ucode_release(&psp->ta_fw);
555 amdgpu_ucode_release(&psp->cap_fw);
556 amdgpu_ucode_release(&psp->toc_fw);
557
558 kfree(psp->cmd);
559 psp->cmd = NULL;
560
561 psp_free_shared_bufs(psp);
562
563 if (psp->km_ring.ring_mem)
564 amdgpu_bo_free_kernel(&adev->firmware.rbuf,
565 &psp->km_ring.ring_mem_mc_addr,
566 (void **)&psp->km_ring.ring_mem);
567
568 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
569 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
570 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
571 &psp->fence_buf_mc_addr, &psp->fence_buf);
572 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
573 (void **)&psp->cmd_buf_mem);
574
575 return 0;
576 }
577
psp_wait_for(struct psp_context * psp,uint32_t reg_index,uint32_t reg_val,uint32_t mask,uint32_t flags)578 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val,
579 uint32_t mask, uint32_t flags)
580 {
581 bool check_changed = flags & PSP_WAITREG_CHANGED;
582 bool verbose = !(flags & PSP_WAITREG_NOVERBOSE);
583 uint32_t val;
584 int i;
585 struct amdgpu_device *adev = psp->adev;
586
587 if (psp->adev->no_hw_access)
588 return 0;
589
590 for (i = 0; i < adev->usec_timeout; i++) {
591 val = RREG32(reg_index);
592 if (check_changed) {
593 if (val != reg_val)
594 return 0;
595 } else {
596 if ((val & mask) == reg_val)
597 return 0;
598 }
599 udelay(1);
600 }
601
602 if (verbose)
603 dev_err(adev->dev,
604 "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x",
605 reg_index, mask, val, reg_val);
606
607 return -ETIME;
608 }
609
psp_wait_for_spirom_update(struct psp_context * psp,uint32_t reg_index,uint32_t reg_val,uint32_t mask,uint32_t msec_timeout)610 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
611 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
612 {
613 uint32_t val;
614 int i;
615 struct amdgpu_device *adev = psp->adev;
616
617 if (psp->adev->no_hw_access)
618 return 0;
619
620 for (i = 0; i < msec_timeout; i++) {
621 val = RREG32(reg_index);
622 if ((val & mask) == reg_val)
623 return 0;
624 msleep(1);
625 }
626
627 return -ETIME;
628 }
629
psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)630 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
631 {
632 switch (cmd_id) {
633 case GFX_CMD_ID_LOAD_TA:
634 return "LOAD_TA";
635 case GFX_CMD_ID_UNLOAD_TA:
636 return "UNLOAD_TA";
637 case GFX_CMD_ID_INVOKE_CMD:
638 return "INVOKE_CMD";
639 case GFX_CMD_ID_LOAD_ASD:
640 return "LOAD_ASD";
641 case GFX_CMD_ID_SETUP_TMR:
642 return "SETUP_TMR";
643 case GFX_CMD_ID_LOAD_IP_FW:
644 return "LOAD_IP_FW";
645 case GFX_CMD_ID_DESTROY_TMR:
646 return "DESTROY_TMR";
647 case GFX_CMD_ID_SAVE_RESTORE:
648 return "SAVE_RESTORE_IP_FW";
649 case GFX_CMD_ID_SETUP_VMR:
650 return "SETUP_VMR";
651 case GFX_CMD_ID_DESTROY_VMR:
652 return "DESTROY_VMR";
653 case GFX_CMD_ID_PROG_REG:
654 return "PROG_REG";
655 case GFX_CMD_ID_GET_FW_ATTESTATION:
656 return "GET_FW_ATTESTATION";
657 case GFX_CMD_ID_LOAD_TOC:
658 return "ID_LOAD_TOC";
659 case GFX_CMD_ID_AUTOLOAD_RLC:
660 return "AUTOLOAD_RLC";
661 case GFX_CMD_ID_BOOT_CFG:
662 return "BOOT_CFG";
663 case GFX_CMD_ID_CONFIG_SQ_PERFMON:
664 return "CONFIG_SQ_PERFMON";
665 case GFX_CMD_ID_FB_FW_RESERV_ADDR:
666 return "FB_FW_RESERV_ADDR";
667 case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
668 return "FB_FW_RESERV_EXT_ADDR";
669 case GFX_CMD_ID_SRIOV_SPATIAL_PART:
670 return "SPATIAL_PARTITION";
671 case GFX_CMD_ID_FB_NPS_MODE:
672 return "NPS_MODE_CHANGE";
673 default:
674 return "UNKNOWN CMD";
675 }
676 }
677
psp_err_warn(struct psp_context * psp)678 static bool psp_err_warn(struct psp_context *psp)
679 {
680 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
681
682 /* This response indicates reg list is already loaded */
683 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
684 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
685 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
686 cmd->resp.status == TEE_ERROR_CANCEL)
687 return false;
688
689 return true;
690 }
691
692 static int
psp_cmd_submit_buf(struct psp_context * psp,struct amdgpu_firmware_info * ucode,struct psp_gfx_cmd_resp * cmd,uint64_t fence_mc_addr)693 psp_cmd_submit_buf(struct psp_context *psp,
694 struct amdgpu_firmware_info *ucode,
695 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
696 {
697 int ret;
698 int index;
699 int timeout = psp->adev->psp_timeout;
700 bool ras_intr = false;
701 bool skip_unsupport = false;
702
703 if (psp->adev->no_hw_access)
704 return 0;
705
706 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
707
708 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
709
710 index = atomic_inc_return(&psp->fence_value);
711 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
712 if (ret) {
713 atomic_dec(&psp->fence_value);
714 goto exit;
715 }
716
717 amdgpu_device_invalidate_hdp(psp->adev, NULL);
718 while (*((unsigned int *)psp->fence_buf) != index) {
719 if (--timeout == 0)
720 break;
721 /*
722 * Shouldn't wait for timeout when err_event_athub occurs,
723 * because gpu reset thread triggered and lock resource should
724 * be released for psp resume sequence.
725 */
726 ras_intr = amdgpu_ras_intr_triggered();
727 if (ras_intr)
728 break;
729 usleep_range(10, 100);
730 amdgpu_device_invalidate_hdp(psp->adev, NULL);
731 }
732
733 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
734 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
735 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
736
737 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
738
739 /* In some cases, psp response status is not 0 even there is no
740 * problem while the command is submitted. Some version of PSP FW
741 * doesn't write 0 to that field.
742 * So here we would like to only print a warning instead of an error
743 * during psp initialization to avoid breaking hw_init and it doesn't
744 * return -EINVAL.
745 */
746 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
747 if (ucode)
748 dev_warn(psp->adev->dev,
749 "failed to load ucode %s(0x%X) ",
750 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
751 if (psp_err_warn(psp))
752 dev_warn(
753 psp->adev->dev,
754 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
755 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
756 psp->cmd_buf_mem->cmd_id,
757 psp->cmd_buf_mem->resp.status);
758 /* If any firmware (including CAP) load fails under SRIOV, it should
759 * return failure to stop the VF from initializing.
760 * Also return failure in case of timeout
761 */
762 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
763 ret = -EINVAL;
764 goto exit;
765 }
766 }
767
768 if (ucode) {
769 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
770 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
771 }
772
773 exit:
774 return ret;
775 }
776
acquire_psp_cmd_buf(struct psp_context * psp)777 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
778 {
779 struct psp_gfx_cmd_resp *cmd = psp->cmd;
780
781 mutex_lock(&psp->mutex);
782
783 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
784
785 return cmd;
786 }
787
release_psp_cmd_buf(struct psp_context * psp)788 static void release_psp_cmd_buf(struct psp_context *psp)
789 {
790 mutex_unlock(&psp->mutex);
791 }
792
psp_prep_tmr_cmd_buf(struct psp_context * psp,struct psp_gfx_cmd_resp * cmd,uint64_t tmr_mc,struct amdgpu_bo * tmr_bo)793 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
794 struct psp_gfx_cmd_resp *cmd,
795 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
796 {
797 struct amdgpu_device *adev = psp->adev;
798 uint32_t size = 0;
799 uint64_t tmr_pa = 0;
800
801 if (tmr_bo) {
802 size = amdgpu_bo_size(tmr_bo);
803 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
804 }
805
806 if (amdgpu_sriov_vf(psp->adev))
807 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
808 else
809 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
810 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
811 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
812 cmd->cmd.cmd_setup_tmr.buf_size = size;
813 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
814 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
815 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
816 }
817
psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint64_t pri_buf_mc,uint32_t size)818 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
819 uint64_t pri_buf_mc, uint32_t size)
820 {
821 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
822 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
823 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
824 cmd->cmd.cmd_load_toc.toc_size = size;
825 }
826
827 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
psp_load_toc(struct psp_context * psp,uint32_t * tmr_size)828 static int psp_load_toc(struct psp_context *psp,
829 uint32_t *tmr_size)
830 {
831 int ret;
832 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
833
834 /* Copy toc to psp firmware private buffer */
835 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
836
837 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
838
839 ret = psp_cmd_submit_buf(psp, NULL, cmd,
840 psp->fence_buf_mc_addr);
841 if (!ret)
842 *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
843
844 release_psp_cmd_buf(psp);
845
846 return ret;
847 }
848
849 /* Set up Trusted Memory Region */
psp_tmr_init(struct psp_context * psp)850 static int psp_tmr_init(struct psp_context *psp)
851 {
852 int ret = 0;
853 int tmr_size;
854 void *tmr_buf;
855 void **pptr;
856
857 /*
858 * According to HW engineer, they prefer the TMR address be "naturally
859 * aligned" , e.g. the start address be an integer divide of TMR size.
860 *
861 * Note: this memory need be reserved till the driver
862 * uninitializes.
863 */
864 tmr_size = PSP_TMR_SIZE(psp->adev);
865
866 /* For ASICs support RLC autoload, psp will parse the toc
867 * and calculate the total size of TMR needed
868 */
869 if (!amdgpu_sriov_vf(psp->adev) &&
870 psp->toc.start_addr &&
871 psp->toc.size_bytes &&
872 psp->fw_pri_buf) {
873 ret = psp_load_toc(psp, &tmr_size);
874 if (ret) {
875 dev_err(psp->adev->dev, "Failed to load toc\n");
876 return ret;
877 }
878 }
879
880 if (!psp->tmr_bo && !psp->boot_time_tmr) {
881 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
882 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
883 PSP_TMR_ALIGNMENT,
884 AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM,
885 &psp->tmr_bo, &psp->tmr_mc_addr,
886 pptr);
887 }
888 if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
889 psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
890
891 return ret;
892 }
893
psp_skip_tmr(struct psp_context * psp)894 static bool psp_skip_tmr(struct psp_context *psp)
895 {
896 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
897 case IP_VERSION(11, 0, 9):
898 case IP_VERSION(11, 0, 7):
899 case IP_VERSION(13, 0, 2):
900 case IP_VERSION(13, 0, 6):
901 case IP_VERSION(13, 0, 10):
902 case IP_VERSION(13, 0, 12):
903 case IP_VERSION(13, 0, 14):
904 return true;
905 default:
906 return false;
907 }
908 }
909
psp_tmr_load(struct psp_context * psp)910 static int psp_tmr_load(struct psp_context *psp)
911 {
912 int ret;
913 struct psp_gfx_cmd_resp *cmd;
914
915 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
916 * Already set up by host driver.
917 */
918 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
919 return 0;
920
921 cmd = acquire_psp_cmd_buf(psp);
922
923 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
924 if (psp->tmr_bo)
925 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
926 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
927
928 ret = psp_cmd_submit_buf(psp, NULL, cmd,
929 psp->fence_buf_mc_addr);
930
931 release_psp_cmd_buf(psp);
932
933 return ret;
934 }
935
psp_prep_tmr_unload_cmd_buf(struct psp_context * psp,struct psp_gfx_cmd_resp * cmd)936 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
937 struct psp_gfx_cmd_resp *cmd)
938 {
939 if (amdgpu_sriov_vf(psp->adev))
940 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
941 else
942 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
943 }
944
psp_tmr_unload(struct psp_context * psp)945 static int psp_tmr_unload(struct psp_context *psp)
946 {
947 int ret;
948 struct psp_gfx_cmd_resp *cmd;
949
950 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
951 * as TMR is not loaded at all
952 */
953 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
954 return 0;
955
956 cmd = acquire_psp_cmd_buf(psp);
957
958 psp_prep_tmr_unload_cmd_buf(psp, cmd);
959 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
960
961 ret = psp_cmd_submit_buf(psp, NULL, cmd,
962 psp->fence_buf_mc_addr);
963
964 release_psp_cmd_buf(psp);
965
966 return ret;
967 }
968
psp_tmr_terminate(struct psp_context * psp)969 static int psp_tmr_terminate(struct psp_context *psp)
970 {
971 return psp_tmr_unload(psp);
972 }
973
psp_get_fw_attestation_records_addr(struct psp_context * psp,uint64_t * output_ptr)974 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
975 uint64_t *output_ptr)
976 {
977 int ret;
978 struct psp_gfx_cmd_resp *cmd;
979
980 if (!output_ptr)
981 return -EINVAL;
982
983 if (amdgpu_sriov_vf(psp->adev))
984 return 0;
985
986 cmd = acquire_psp_cmd_buf(psp);
987
988 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
989
990 ret = psp_cmd_submit_buf(psp, NULL, cmd,
991 psp->fence_buf_mc_addr);
992
993 if (!ret) {
994 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
995 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
996 }
997
998 release_psp_cmd_buf(psp);
999
1000 return ret;
1001 }
1002
psp_get_fw_reservation_info(struct psp_context * psp,uint32_t cmd_id,uint64_t * addr,uint32_t * size)1003 static int psp_get_fw_reservation_info(struct psp_context *psp,
1004 uint32_t cmd_id,
1005 uint64_t *addr,
1006 uint32_t *size)
1007 {
1008 int ret;
1009 uint32_t status;
1010 struct psp_gfx_cmd_resp *cmd;
1011
1012 cmd = acquire_psp_cmd_buf(psp);
1013
1014 cmd->cmd_id = cmd_id;
1015
1016 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1017 psp->fence_buf_mc_addr);
1018 if (ret) {
1019 release_psp_cmd_buf(psp);
1020 return ret;
1021 }
1022
1023 status = cmd->resp.status;
1024 if (status == PSP_ERR_UNKNOWN_COMMAND) {
1025 release_psp_cmd_buf(psp);
1026 *addr = 0;
1027 *size = 0;
1028 return 0;
1029 }
1030
1031 *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 |
1032 cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo;
1033 *size = cmd->resp.uresp.fw_reserve_info.reserve_size;
1034
1035 release_psp_cmd_buf(psp);
1036
1037 return 0;
1038 }
1039
psp_update_fw_reservation(struct psp_context * psp)1040 int psp_update_fw_reservation(struct psp_context *psp)
1041 {
1042 int ret;
1043 uint64_t reserv_addr, reserv_addr_ext;
1044 uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
1045 struct amdgpu_device *adev = psp->adev;
1046
1047 mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
1048
1049 if (amdgpu_sriov_vf(psp->adev))
1050 return 0;
1051
1052 switch (mp0_ip_ver) {
1053 case IP_VERSION(14, 0, 2):
1054 if (adev->psp.sos.fw_version < 0x3b0e0d)
1055 return 0;
1056 break;
1057
1058 case IP_VERSION(14, 0, 3):
1059 if (adev->psp.sos.fw_version < 0x3a0e14)
1060 return 0;
1061 break;
1062
1063 default:
1064 return 0;
1065 }
1066
1067 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
1068 if (ret)
1069 return ret;
1070 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
1071 if (ret)
1072 return ret;
1073
1074 if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
1075 dev_warn(adev->dev, "reserve fw region is not valid!\n");
1076 return 0;
1077 }
1078
1079 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
1080
1081 reserv_size = roundup(reserv_size, SZ_1M);
1082
1083 ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL);
1084 if (ret) {
1085 dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret);
1086 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
1087 return ret;
1088 }
1089
1090 reserv_size_ext = roundup(reserv_size_ext, SZ_1M);
1091
1092 ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext,
1093 &adev->mman.fw_reserved_memory_extend, NULL);
1094 if (ret) {
1095 dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret);
1096 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL);
1097 return ret;
1098 }
1099
1100 return 0;
1101 }
1102
psp_boot_config_get(struct amdgpu_device * adev,uint32_t * boot_cfg)1103 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
1104 {
1105 struct psp_context *psp = &adev->psp;
1106 struct psp_gfx_cmd_resp *cmd;
1107 int ret;
1108
1109 if (amdgpu_sriov_vf(adev))
1110 return 0;
1111
1112 cmd = acquire_psp_cmd_buf(psp);
1113
1114 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1115 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
1116
1117 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1118 if (!ret) {
1119 *boot_cfg =
1120 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
1121 }
1122
1123 release_psp_cmd_buf(psp);
1124
1125 return ret;
1126 }
1127
psp_boot_config_set(struct amdgpu_device * adev,uint32_t boot_cfg)1128 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
1129 {
1130 int ret;
1131 struct psp_context *psp = &adev->psp;
1132 struct psp_gfx_cmd_resp *cmd;
1133
1134 if (amdgpu_sriov_vf(adev))
1135 return 0;
1136
1137 cmd = acquire_psp_cmd_buf(psp);
1138
1139 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1140 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1141 cmd->cmd.boot_cfg.boot_config = boot_cfg;
1142 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1143
1144 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1145
1146 release_psp_cmd_buf(psp);
1147
1148 return ret;
1149 }
1150
psp_rl_load(struct amdgpu_device * adev)1151 static int psp_rl_load(struct amdgpu_device *adev)
1152 {
1153 int ret;
1154 struct psp_context *psp = &adev->psp;
1155 struct psp_gfx_cmd_resp *cmd;
1156
1157 if (!is_psp_fw_valid(psp->rl))
1158 return 0;
1159
1160 cmd = acquire_psp_cmd_buf(psp);
1161
1162 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1163 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1164
1165 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1166 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1167 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1168 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1169 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1170
1171 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1172
1173 release_psp_cmd_buf(psp);
1174
1175 return ret;
1176 }
1177
psp_memory_partition(struct psp_context * psp,int mode)1178 int psp_memory_partition(struct psp_context *psp, int mode)
1179 {
1180 struct psp_gfx_cmd_resp *cmd;
1181 int ret;
1182
1183 if (amdgpu_sriov_vf(psp->adev))
1184 return 0;
1185
1186 cmd = acquire_psp_cmd_buf(psp);
1187
1188 cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
1189 cmd->cmd.cmd_memory_part.mode = mode;
1190
1191 dev_info(psp->adev->dev,
1192 "Requesting %d memory partition change through PSP", mode);
1193 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1194 if (ret)
1195 dev_err(psp->adev->dev,
1196 "PSP request failed to change to NPS%d mode\n", mode);
1197
1198 release_psp_cmd_buf(psp);
1199
1200 return ret;
1201 }
1202
psp_spatial_partition(struct psp_context * psp,int mode)1203 int psp_spatial_partition(struct psp_context *psp, int mode)
1204 {
1205 struct psp_gfx_cmd_resp *cmd;
1206 int ret;
1207
1208 if (amdgpu_sriov_vf(psp->adev))
1209 return 0;
1210
1211 cmd = acquire_psp_cmd_buf(psp);
1212
1213 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1214 cmd->cmd.cmd_spatial_part.mode = mode;
1215
1216 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1217 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1218
1219 release_psp_cmd_buf(psp);
1220
1221 return ret;
1222 }
1223
psp_asd_initialize(struct psp_context * psp)1224 static int psp_asd_initialize(struct psp_context *psp)
1225 {
1226 int ret;
1227
1228 /* If PSP version doesn't match ASD version, asd loading will be failed.
1229 * add workaround to bypass it for sriov now.
1230 * TODO: add version check to make it common
1231 */
1232 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1233 return 0;
1234
1235 /* bypass asd if display hardware is not available */
1236 if (!amdgpu_device_has_display_hardware(psp->adev) &&
1237 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1238 return 0;
1239
1240 psp->asd_context.mem_context.shared_mc_addr = 0;
1241 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1242 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
1243
1244 ret = psp_ta_load(psp, &psp->asd_context);
1245 if (!ret)
1246 psp->asd_context.initialized = true;
1247
1248 return ret;
1249 }
1250
psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t session_id)1251 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1252 uint32_t session_id)
1253 {
1254 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1255 cmd->cmd.cmd_unload_ta.session_id = session_id;
1256 }
1257
psp_ta_unload(struct psp_context * psp,struct ta_context * context)1258 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1259 {
1260 int ret;
1261 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1262
1263 psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1264
1265 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1266
1267 context->resp_status = cmd->resp.status;
1268
1269 release_psp_cmd_buf(psp);
1270
1271 return ret;
1272 }
1273
psp_asd_terminate(struct psp_context * psp)1274 static int psp_asd_terminate(struct psp_context *psp)
1275 {
1276 int ret;
1277
1278 if (amdgpu_sriov_vf(psp->adev))
1279 return 0;
1280
1281 if (!psp->asd_context.initialized)
1282 return 0;
1283
1284 ret = psp_ta_unload(psp, &psp->asd_context);
1285 if (!ret)
1286 psp->asd_context.initialized = false;
1287
1288 return ret;
1289 }
1290
psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t id,uint32_t value)1291 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1292 uint32_t id, uint32_t value)
1293 {
1294 cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1295 cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1296 cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1297 }
1298
psp_reg_program(struct psp_context * psp,enum psp_reg_prog_id reg,uint32_t value)1299 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1300 uint32_t value)
1301 {
1302 struct psp_gfx_cmd_resp *cmd;
1303 int ret = 0;
1304
1305 if (reg >= PSP_REG_LAST)
1306 return -EINVAL;
1307
1308 cmd = acquire_psp_cmd_buf(psp);
1309
1310 psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1311 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1312 if (ret)
1313 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1314
1315 release_psp_cmd_buf(psp);
1316
1317 return ret;
1318 }
1319
psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint64_t ta_bin_mc,struct ta_context * context)1320 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1321 uint64_t ta_bin_mc,
1322 struct ta_context *context)
1323 {
1324 cmd->cmd_id = context->ta_load_type;
1325 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
1326 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
1327 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
1328
1329 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1330 lower_32_bits(context->mem_context.shared_mc_addr);
1331 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1332 upper_32_bits(context->mem_context.shared_mc_addr);
1333 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1334 }
1335
psp_ta_init_shared_buf(struct psp_context * psp,struct ta_mem_context * mem_ctx)1336 int psp_ta_init_shared_buf(struct psp_context *psp,
1337 struct ta_mem_context *mem_ctx)
1338 {
1339 /*
1340 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1341 * physical) for ta to host memory
1342 */
1343 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1344 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1345 AMDGPU_GEM_DOMAIN_GTT,
1346 &mem_ctx->shared_bo,
1347 &mem_ctx->shared_mc_addr,
1348 &mem_ctx->shared_buf);
1349 }
1350
psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t ta_cmd_id,uint32_t session_id)1351 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1352 uint32_t ta_cmd_id,
1353 uint32_t session_id)
1354 {
1355 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
1356 cmd->cmd.cmd_invoke_cmd.session_id = session_id;
1357 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
1358 }
1359
psp_ta_invoke(struct psp_context * psp,uint32_t ta_cmd_id,struct ta_context * context)1360 int psp_ta_invoke(struct psp_context *psp,
1361 uint32_t ta_cmd_id,
1362 struct ta_context *context)
1363 {
1364 int ret;
1365 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1366
1367 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1368
1369 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1370 psp->fence_buf_mc_addr);
1371
1372 context->resp_status = cmd->resp.status;
1373
1374 release_psp_cmd_buf(psp);
1375
1376 return ret;
1377 }
1378
psp_ta_load(struct psp_context * psp,struct ta_context * context)1379 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1380 {
1381 int ret;
1382 struct psp_gfx_cmd_resp *cmd;
1383
1384 cmd = acquire_psp_cmd_buf(psp);
1385
1386 psp_copy_fw(psp, context->bin_desc.start_addr,
1387 context->bin_desc.size_bytes);
1388
1389 if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
1390 context->mem_context.shared_bo)
1391 context->mem_context.shared_mc_addr =
1392 amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
1393
1394 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1395
1396 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1397 psp->fence_buf_mc_addr);
1398
1399 context->resp_status = cmd->resp.status;
1400
1401 if (!ret)
1402 context->session_id = cmd->resp.session_id;
1403
1404 release_psp_cmd_buf(psp);
1405
1406 return ret;
1407 }
1408
psp_xgmi_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1409 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1410 {
1411 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1412 }
1413
psp_xgmi_terminate(struct psp_context * psp)1414 int psp_xgmi_terminate(struct psp_context *psp)
1415 {
1416 int ret;
1417 struct amdgpu_device *adev = psp->adev;
1418
1419 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1420 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1421 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1422 adev->gmc.xgmi.connected_to_cpu))
1423 return 0;
1424
1425 if (!psp->xgmi_context.context.initialized)
1426 return 0;
1427
1428 ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1429
1430 psp->xgmi_context.context.initialized = false;
1431
1432 return ret;
1433 }
1434
psp_xgmi_initialize(struct psp_context * psp,bool set_extended_data,bool load_ta)1435 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1436 {
1437 struct ta_xgmi_shared_memory *xgmi_cmd;
1438 int ret;
1439
1440 if (!psp->ta_fw ||
1441 !psp->xgmi_context.context.bin_desc.size_bytes ||
1442 !psp->xgmi_context.context.bin_desc.start_addr)
1443 return -ENOENT;
1444
1445 if (!load_ta)
1446 goto invoke;
1447
1448 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1449 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1450
1451 if (!psp->xgmi_context.context.mem_context.shared_buf) {
1452 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1453 if (ret)
1454 return ret;
1455 }
1456
1457 /* Load XGMI TA */
1458 ret = psp_ta_load(psp, &psp->xgmi_context.context);
1459 if (!ret)
1460 psp->xgmi_context.context.initialized = true;
1461 else
1462 return ret;
1463
1464 invoke:
1465 /* Initialize XGMI session */
1466 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1467 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1468 xgmi_cmd->flag_extend_link_record = set_extended_data;
1469 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1470
1471 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1472 /* note down the capbility flag for XGMI TA */
1473 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1474
1475 return ret;
1476 }
1477
psp_xgmi_get_hive_id(struct psp_context * psp,uint64_t * hive_id)1478 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1479 {
1480 struct ta_xgmi_shared_memory *xgmi_cmd;
1481 int ret;
1482
1483 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1484 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1485
1486 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1487
1488 /* Invoke xgmi ta to get hive id */
1489 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1490 if (ret)
1491 return ret;
1492
1493 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1494
1495 return 0;
1496 }
1497
psp_xgmi_get_node_id(struct psp_context * psp,uint64_t * node_id)1498 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1499 {
1500 struct ta_xgmi_shared_memory *xgmi_cmd;
1501 int ret;
1502
1503 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1504 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1505
1506 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1507
1508 /* Invoke xgmi ta to get the node id */
1509 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1510 if (ret)
1511 return ret;
1512
1513 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1514
1515 return 0;
1516 }
1517
psp_xgmi_peer_link_info_supported(struct psp_context * psp)1518 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1519 {
1520 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1521 IP_VERSION(13, 0, 2) &&
1522 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1523 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1524 IP_VERSION(13, 0, 6);
1525 }
1526
1527 /*
1528 * Chips that support extended topology information require the driver to
1529 * reflect topology information in the opposite direction. This is
1530 * because the TA has already exceeded its link record limit and if the
1531 * TA holds bi-directional information, the driver would have to do
1532 * multiple fetches instead of just two.
1533 */
psp_xgmi_reflect_topology_info(struct psp_context * psp,struct psp_xgmi_node_info node_info)1534 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1535 struct psp_xgmi_node_info node_info)
1536 {
1537 struct amdgpu_device *mirror_adev;
1538 struct amdgpu_hive_info *hive;
1539 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1540 uint64_t dst_node_id = node_info.node_id;
1541 uint8_t dst_num_hops = node_info.num_hops;
1542 uint8_t dst_num_links = node_info.num_links;
1543
1544 hive = amdgpu_get_xgmi_hive(psp->adev);
1545 if (WARN_ON(!hive))
1546 return;
1547
1548 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1549 struct psp_xgmi_topology_info *mirror_top_info;
1550 int j;
1551
1552 if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1553 continue;
1554
1555 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1556 for (j = 0; j < mirror_top_info->num_nodes; j++) {
1557 if (mirror_top_info->nodes[j].node_id != src_node_id)
1558 continue;
1559
1560 mirror_top_info->nodes[j].num_hops = dst_num_hops;
1561 /*
1562 * prevent 0 num_links value re-reflection since reflection
1563 * criteria is based on num_hops (direct or indirect).
1564 *
1565 */
1566 if (dst_num_links)
1567 mirror_top_info->nodes[j].num_links = dst_num_links;
1568
1569 break;
1570 }
1571
1572 break;
1573 }
1574
1575 amdgpu_put_xgmi_hive(hive);
1576 }
1577
psp_xgmi_get_topology_info(struct psp_context * psp,int number_devices,struct psp_xgmi_topology_info * topology,bool get_extended_data)1578 int psp_xgmi_get_topology_info(struct psp_context *psp,
1579 int number_devices,
1580 struct psp_xgmi_topology_info *topology,
1581 bool get_extended_data)
1582 {
1583 struct ta_xgmi_shared_memory *xgmi_cmd;
1584 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1585 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1586 int i;
1587 int ret;
1588
1589 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1590 return -EINVAL;
1591
1592 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1593 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1594 xgmi_cmd->flag_extend_link_record = get_extended_data;
1595
1596 /* Fill in the shared memory with topology information as input */
1597 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1598 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1599 topology_info_input->num_nodes = number_devices;
1600
1601 for (i = 0; i < topology_info_input->num_nodes; i++) {
1602 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1603 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1604 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1605 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1606 }
1607
1608 /* Invoke xgmi ta to get the topology information */
1609 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1610 if (ret)
1611 return ret;
1612
1613 /* Read the output topology information from the shared memory */
1614 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1615 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1616 for (i = 0; i < topology->num_nodes; i++) {
1617 /* extended data will either be 0 or equal to non-extended data */
1618 if (topology_info_output->nodes[i].num_hops)
1619 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1620
1621 /* non-extended data gets everything here so no need to update */
1622 if (!get_extended_data) {
1623 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1624 topology->nodes[i].is_sharing_enabled =
1625 topology_info_output->nodes[i].is_sharing_enabled;
1626 topology->nodes[i].sdma_engine =
1627 topology_info_output->nodes[i].sdma_engine;
1628 }
1629
1630 }
1631
1632 /* Invoke xgmi ta again to get the link information */
1633 if (psp_xgmi_peer_link_info_supported(psp)) {
1634 struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1635 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1636 bool requires_reflection =
1637 (psp->xgmi_context.supports_extended_data &&
1638 get_extended_data) ||
1639 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1640 IP_VERSION(13, 0, 6) ||
1641 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1642 IP_VERSION(13, 0, 14);
1643 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1644 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1645
1646 /* popluate the shared output buffer rather than the cmd input buffer
1647 * with node_ids as the input for GET_PEER_LINKS command execution.
1648 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1649 * The same requirement for GET_EXTEND_PEER_LINKS command.
1650 */
1651 if (ta_port_num_support) {
1652 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1653
1654 for (i = 0; i < topology->num_nodes; i++)
1655 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1656
1657 link_extend_info_output->num_nodes = topology->num_nodes;
1658 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1659 } else {
1660 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1661
1662 for (i = 0; i < topology->num_nodes; i++)
1663 link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1664
1665 link_info_output->num_nodes = topology->num_nodes;
1666 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1667 }
1668
1669 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1670 if (ret)
1671 return ret;
1672
1673 for (i = 0; i < topology->num_nodes; i++) {
1674 uint8_t node_num_links = ta_port_num_support ?
1675 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1676 /* accumulate num_links on extended data */
1677 if (get_extended_data) {
1678 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1679 } else {
1680 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1681 topology->nodes[i].num_links : node_num_links;
1682 }
1683 /* popluate the connected port num info if supported and available */
1684 if (ta_port_num_support && topology->nodes[i].num_links) {
1685 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1686 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1687 }
1688
1689 /* reflect the topology information for bi-directionality */
1690 if (requires_reflection && topology->nodes[i].num_hops)
1691 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1692 }
1693 }
1694
1695 return 0;
1696 }
1697
psp_xgmi_set_topology_info(struct psp_context * psp,int number_devices,struct psp_xgmi_topology_info * topology)1698 int psp_xgmi_set_topology_info(struct psp_context *psp,
1699 int number_devices,
1700 struct psp_xgmi_topology_info *topology)
1701 {
1702 struct ta_xgmi_shared_memory *xgmi_cmd;
1703 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1704 int i;
1705
1706 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1707 return -EINVAL;
1708
1709 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1710 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1711
1712 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1713 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1714 topology_info_input->num_nodes = number_devices;
1715
1716 for (i = 0; i < topology_info_input->num_nodes; i++) {
1717 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1718 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1719 topology_info_input->nodes[i].is_sharing_enabled = 1;
1720 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1721 }
1722
1723 /* Invoke xgmi ta to set topology information */
1724 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1725 }
1726
1727 // ras begin
psp_ras_ta_check_status(struct psp_context * psp)1728 static void psp_ras_ta_check_status(struct psp_context *psp)
1729 {
1730 struct ta_ras_shared_memory *ras_cmd =
1731 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1732
1733 switch (ras_cmd->ras_status) {
1734 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1735 dev_warn(psp->adev->dev,
1736 "RAS WARNING: cmd failed due to unsupported ip\n");
1737 break;
1738 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1739 dev_warn(psp->adev->dev,
1740 "RAS WARNING: cmd failed due to unsupported error injection\n");
1741 break;
1742 case TA_RAS_STATUS__SUCCESS:
1743 break;
1744 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1745 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1746 dev_warn(psp->adev->dev,
1747 "RAS WARNING: Inject error to critical region is not allowed\n");
1748 break;
1749 default:
1750 dev_warn(psp->adev->dev,
1751 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1752 break;
1753 }
1754 }
1755
psp_ras_send_cmd(struct psp_context * psp,enum ras_command cmd_id,void * in,void * out)1756 static int psp_ras_send_cmd(struct psp_context *psp,
1757 enum ras_command cmd_id, void *in, void *out)
1758 {
1759 struct ta_ras_shared_memory *ras_cmd;
1760 uint32_t cmd = cmd_id;
1761 int ret = 0;
1762
1763 if (!in)
1764 return -EINVAL;
1765
1766 mutex_lock(&psp->ras_context.mutex);
1767 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1768 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1769
1770 switch (cmd) {
1771 case TA_RAS_COMMAND__ENABLE_FEATURES:
1772 case TA_RAS_COMMAND__DISABLE_FEATURES:
1773 memcpy(&ras_cmd->ras_in_message,
1774 in, sizeof(ras_cmd->ras_in_message));
1775 break;
1776 case TA_RAS_COMMAND__TRIGGER_ERROR:
1777 memcpy(&ras_cmd->ras_in_message.trigger_error,
1778 in, sizeof(ras_cmd->ras_in_message.trigger_error));
1779 break;
1780 case TA_RAS_COMMAND__QUERY_ADDRESS:
1781 memcpy(&ras_cmd->ras_in_message.address,
1782 in, sizeof(ras_cmd->ras_in_message.address));
1783 break;
1784 default:
1785 dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1786 ret = -EINVAL;
1787 goto err_out;
1788 }
1789
1790 ras_cmd->cmd_id = cmd;
1791 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1792
1793 switch (cmd) {
1794 case TA_RAS_COMMAND__TRIGGER_ERROR:
1795 if (!ret && out)
1796 memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1797 break;
1798 case TA_RAS_COMMAND__QUERY_ADDRESS:
1799 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1800 ret = -EINVAL;
1801 else if (out)
1802 memcpy(out,
1803 &ras_cmd->ras_out_message.address,
1804 sizeof(ras_cmd->ras_out_message.address));
1805 break;
1806 default:
1807 break;
1808 }
1809
1810 err_out:
1811 mutex_unlock(&psp->ras_context.mutex);
1812
1813 return ret;
1814 }
1815
psp_ras_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1816 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1817 {
1818 struct ta_ras_shared_memory *ras_cmd;
1819 int ret;
1820
1821 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1822
1823 /*
1824 * TODO: bypass the loading in sriov for now
1825 */
1826 if (amdgpu_sriov_vf(psp->adev))
1827 return 0;
1828
1829 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1830
1831 if (amdgpu_ras_intr_triggered())
1832 return ret;
1833
1834 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1835 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1836 return -EINVAL;
1837 }
1838
1839 if (!ret) {
1840 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1841 dev_warn(psp->adev->dev, "ECC switch disabled\n");
1842
1843 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1844 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1845 dev_warn(psp->adev->dev,
1846 "RAS internal register access blocked\n");
1847
1848 psp_ras_ta_check_status(psp);
1849 }
1850
1851 return ret;
1852 }
1853
psp_ras_enable_features(struct psp_context * psp,union ta_ras_cmd_input * info,bool enable)1854 int psp_ras_enable_features(struct psp_context *psp,
1855 union ta_ras_cmd_input *info, bool enable)
1856 {
1857 enum ras_command cmd_id;
1858 int ret;
1859
1860 if (!psp->ras_context.context.initialized || !info)
1861 return -EINVAL;
1862
1863 cmd_id = enable ?
1864 TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1865 ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
1866 if (ret)
1867 return -EINVAL;
1868
1869 return 0;
1870 }
1871
psp_ras_terminate(struct psp_context * psp)1872 int psp_ras_terminate(struct psp_context *psp)
1873 {
1874 int ret;
1875
1876 /*
1877 * TODO: bypass the terminate in sriov for now
1878 */
1879 if (amdgpu_sriov_vf(psp->adev))
1880 return 0;
1881
1882 if (!psp->ras_context.context.initialized)
1883 return 0;
1884
1885 ret = psp_ta_unload(psp, &psp->ras_context.context);
1886
1887 psp->ras_context.context.initialized = false;
1888
1889 mutex_destroy(&psp->ras_context.mutex);
1890
1891 return ret;
1892 }
1893
psp_ras_initialize(struct psp_context * psp)1894 int psp_ras_initialize(struct psp_context *psp)
1895 {
1896 int ret;
1897 uint32_t boot_cfg = 0xFF;
1898 struct amdgpu_device *adev = psp->adev;
1899 struct ta_ras_shared_memory *ras_cmd;
1900
1901 /*
1902 * TODO: bypass the initialize in sriov for now
1903 */
1904 if (amdgpu_sriov_vf(adev))
1905 return 0;
1906
1907 if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1908 !adev->psp.ras_context.context.bin_desc.start_addr) {
1909 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1910 return 0;
1911 }
1912
1913 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1914 /* query GECC enablement status from boot config
1915 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1916 */
1917 ret = psp_boot_config_get(adev, &boot_cfg);
1918 if (ret)
1919 dev_warn(adev->dev, "PSP get boot config failed\n");
1920
1921 if (boot_cfg == 1 && !adev->ras_default_ecc_enabled &&
1922 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1923 dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n");
1924 dev_warn(adev->dev,
1925 "To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n");
1926 } else {
1927 if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) &&
1928 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
1929 if (boot_cfg == 1) {
1930 dev_info(adev->dev, "GECC is enabled\n");
1931 } else {
1932 /* enable GECC in next boot cycle if it is disabled
1933 * in boot config, or force enable GECC if failed to
1934 * get boot configuration
1935 */
1936 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1937 if (ret)
1938 dev_warn(adev->dev, "PSP set boot config failed\n");
1939 else
1940 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1941 }
1942 } else {
1943 if (!boot_cfg) {
1944 if (!adev->ras_default_ecc_enabled &&
1945 amdgpu_ras_enable != 1 &&
1946 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
1947 dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n");
1948 else
1949 dev_info(adev->dev, "GECC is disabled\n");
1950 } else {
1951 /* disable GECC in next boot cycle if ras is
1952 * disabled by module parameter amdgpu_ras_enable
1953 * and/or amdgpu_ras_mask, or boot_config_get call
1954 * is failed
1955 */
1956 ret = psp_boot_config_set(adev, 0);
1957 if (ret)
1958 dev_warn(adev->dev, "PSP set boot config failed\n");
1959 else
1960 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1961 }
1962 }
1963 }
1964 }
1965
1966 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1967 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1968
1969 if (!psp->ras_context.context.mem_context.shared_buf) {
1970 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1971 if (ret)
1972 return ret;
1973 }
1974
1975 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1976 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1977
1978 if (amdgpu_ras_is_poison_mode_supported(adev))
1979 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1980 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1981 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1982 ras_cmd->ras_in_message.init_flags.xcc_mask =
1983 adev->gfx.xcc_mask;
1984 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1985 if (adev->gmc.gmc_funcs->query_mem_partition_mode)
1986 ras_cmd->ras_in_message.init_flags.nps_mode =
1987 adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1988 ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask;
1989
1990 ret = psp_ta_load(psp, &psp->ras_context.context);
1991
1992 if (!ret && !ras_cmd->ras_status) {
1993 psp->ras_context.context.initialized = true;
1994 mutex_init(&psp->ras_context.mutex);
1995 } else {
1996 if (ras_cmd->ras_status)
1997 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1998
1999 /* fail to load RAS TA */
2000 psp->ras_context.context.initialized = false;
2001 }
2002
2003 return ret;
2004 }
2005
psp_ras_trigger_error(struct psp_context * psp,struct ta_ras_trigger_error_input * info,uint32_t instance_mask)2006 int psp_ras_trigger_error(struct psp_context *psp,
2007 struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
2008 {
2009 struct amdgpu_device *adev = psp->adev;
2010 int ret;
2011 uint32_t dev_mask;
2012 uint32_t ras_status = 0;
2013
2014 if (!psp->ras_context.context.initialized || !info)
2015 return -EINVAL;
2016
2017 switch (info->block_id) {
2018 case TA_RAS_BLOCK__GFX:
2019 dev_mask = GET_MASK(GC, instance_mask);
2020 break;
2021 case TA_RAS_BLOCK__SDMA:
2022 dev_mask = GET_MASK(SDMA0, instance_mask);
2023 break;
2024 case TA_RAS_BLOCK__VCN:
2025 case TA_RAS_BLOCK__JPEG:
2026 dev_mask = GET_MASK(VCN, instance_mask);
2027 break;
2028 default:
2029 dev_mask = instance_mask;
2030 break;
2031 }
2032
2033 /* reuse sub_block_index for backward compatibility */
2034 dev_mask <<= AMDGPU_RAS_INST_SHIFT;
2035 dev_mask &= AMDGPU_RAS_INST_MASK;
2036 info->sub_block_index |= dev_mask;
2037
2038 ret = psp_ras_send_cmd(psp,
2039 TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
2040 if (ret)
2041 return -EINVAL;
2042
2043 /* If err_event_athub occurs error inject was successful, however
2044 * return status from TA is no long reliable
2045 */
2046 if (amdgpu_ras_intr_triggered())
2047 return 0;
2048
2049 if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
2050 return -EACCES;
2051 else if (ras_status)
2052 return -EINVAL;
2053
2054 return 0;
2055 }
2056
psp_ras_query_address(struct psp_context * psp,struct ta_ras_query_address_input * addr_in,struct ta_ras_query_address_output * addr_out)2057 int psp_ras_query_address(struct psp_context *psp,
2058 struct ta_ras_query_address_input *addr_in,
2059 struct ta_ras_query_address_output *addr_out)
2060 {
2061 int ret;
2062
2063 if (!psp->ras_context.context.initialized ||
2064 !addr_in || !addr_out)
2065 return -EINVAL;
2066
2067 ret = psp_ras_send_cmd(psp,
2068 TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
2069
2070 return ret;
2071 }
2072 // ras end
2073
2074 // HDCP start
psp_hdcp_initialize(struct psp_context * psp)2075 static int psp_hdcp_initialize(struct psp_context *psp)
2076 {
2077 int ret;
2078
2079 /*
2080 * TODO: bypass the initialize in sriov for now
2081 */
2082 if (amdgpu_sriov_vf(psp->adev))
2083 return 0;
2084
2085 /* bypass hdcp initialization if dmu is harvested */
2086 if (!amdgpu_device_has_display_hardware(psp->adev))
2087 return 0;
2088
2089 if (!psp->hdcp_context.context.bin_desc.size_bytes ||
2090 !psp->hdcp_context.context.bin_desc.start_addr) {
2091 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
2092 return 0;
2093 }
2094
2095 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
2096 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2097
2098 if (!psp->hdcp_context.context.mem_context.shared_buf) {
2099 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
2100 if (ret)
2101 return ret;
2102 }
2103
2104 ret = psp_ta_load(psp, &psp->hdcp_context.context);
2105 if (!ret) {
2106 psp->hdcp_context.context.initialized = true;
2107 mutex_init(&psp->hdcp_context.mutex);
2108 }
2109
2110 return ret;
2111 }
2112
psp_hdcp_invoke(struct psp_context * psp,uint32_t ta_cmd_id)2113 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2114 {
2115 /*
2116 * TODO: bypass the loading in sriov for now
2117 */
2118 if (amdgpu_sriov_vf(psp->adev))
2119 return 0;
2120
2121 if (!psp->hdcp_context.context.initialized)
2122 return 0;
2123
2124 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
2125 }
2126
psp_hdcp_terminate(struct psp_context * psp)2127 static int psp_hdcp_terminate(struct psp_context *psp)
2128 {
2129 int ret;
2130
2131 /*
2132 * TODO: bypass the terminate in sriov for now
2133 */
2134 if (amdgpu_sriov_vf(psp->adev))
2135 return 0;
2136
2137 if (!psp->hdcp_context.context.initialized)
2138 return 0;
2139
2140 ret = psp_ta_unload(psp, &psp->hdcp_context.context);
2141
2142 psp->hdcp_context.context.initialized = false;
2143
2144 return ret;
2145 }
2146 // HDCP end
2147
2148 // DTM start
psp_dtm_initialize(struct psp_context * psp)2149 static int psp_dtm_initialize(struct psp_context *psp)
2150 {
2151 int ret;
2152
2153 /*
2154 * TODO: bypass the initialize in sriov for now
2155 */
2156 if (amdgpu_sriov_vf(psp->adev))
2157 return 0;
2158
2159 /* bypass dtm initialization if dmu is harvested */
2160 if (!amdgpu_device_has_display_hardware(psp->adev))
2161 return 0;
2162
2163 if (!psp->dtm_context.context.bin_desc.size_bytes ||
2164 !psp->dtm_context.context.bin_desc.start_addr) {
2165 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
2166 return 0;
2167 }
2168
2169 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
2170 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2171
2172 if (!psp->dtm_context.context.mem_context.shared_buf) {
2173 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
2174 if (ret)
2175 return ret;
2176 }
2177
2178 ret = psp_ta_load(psp, &psp->dtm_context.context);
2179 if (!ret) {
2180 psp->dtm_context.context.initialized = true;
2181 mutex_init(&psp->dtm_context.mutex);
2182 }
2183
2184 return ret;
2185 }
2186
psp_dtm_invoke(struct psp_context * psp,uint32_t ta_cmd_id)2187 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2188 {
2189 /*
2190 * TODO: bypass the loading in sriov for now
2191 */
2192 if (amdgpu_sriov_vf(psp->adev))
2193 return 0;
2194
2195 if (!psp->dtm_context.context.initialized)
2196 return 0;
2197
2198 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
2199 }
2200
psp_dtm_terminate(struct psp_context * psp)2201 static int psp_dtm_terminate(struct psp_context *psp)
2202 {
2203 int ret;
2204
2205 /*
2206 * TODO: bypass the terminate in sriov for now
2207 */
2208 if (amdgpu_sriov_vf(psp->adev))
2209 return 0;
2210
2211 if (!psp->dtm_context.context.initialized)
2212 return 0;
2213
2214 ret = psp_ta_unload(psp, &psp->dtm_context.context);
2215
2216 psp->dtm_context.context.initialized = false;
2217
2218 return ret;
2219 }
2220 // DTM end
2221
2222 // RAP start
psp_rap_initialize(struct psp_context * psp)2223 static int psp_rap_initialize(struct psp_context *psp)
2224 {
2225 int ret;
2226 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2227
2228 /*
2229 * TODO: bypass the initialize in sriov for now
2230 */
2231 if (amdgpu_sriov_vf(psp->adev))
2232 return 0;
2233
2234 if (!psp->rap_context.context.bin_desc.size_bytes ||
2235 !psp->rap_context.context.bin_desc.start_addr) {
2236 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2237 return 0;
2238 }
2239
2240 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2241 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2242
2243 if (!psp->rap_context.context.mem_context.shared_buf) {
2244 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2245 if (ret)
2246 return ret;
2247 }
2248
2249 ret = psp_ta_load(psp, &psp->rap_context.context);
2250 if (!ret) {
2251 psp->rap_context.context.initialized = true;
2252 mutex_init(&psp->rap_context.mutex);
2253 } else
2254 return ret;
2255
2256 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2257 if (ret || status != TA_RAP_STATUS__SUCCESS) {
2258 psp_rap_terminate(psp);
2259 /* free rap shared memory */
2260 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2261
2262 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2263 ret, status);
2264
2265 return ret;
2266 }
2267
2268 return 0;
2269 }
2270
psp_rap_terminate(struct psp_context * psp)2271 static int psp_rap_terminate(struct psp_context *psp)
2272 {
2273 int ret;
2274
2275 if (!psp->rap_context.context.initialized)
2276 return 0;
2277
2278 ret = psp_ta_unload(psp, &psp->rap_context.context);
2279
2280 psp->rap_context.context.initialized = false;
2281
2282 return ret;
2283 }
2284
psp_rap_invoke(struct psp_context * psp,uint32_t ta_cmd_id,enum ta_rap_status * status)2285 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2286 {
2287 struct ta_rap_shared_memory *rap_cmd;
2288 int ret = 0;
2289
2290 if (!psp->rap_context.context.initialized)
2291 return 0;
2292
2293 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2294 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2295 return -EINVAL;
2296
2297 mutex_lock(&psp->rap_context.mutex);
2298
2299 rap_cmd = (struct ta_rap_shared_memory *)
2300 psp->rap_context.context.mem_context.shared_buf;
2301 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2302
2303 rap_cmd->cmd_id = ta_cmd_id;
2304 rap_cmd->validation_method_id = METHOD_A;
2305
2306 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2307 if (ret)
2308 goto out_unlock;
2309
2310 if (status)
2311 *status = rap_cmd->rap_status;
2312
2313 out_unlock:
2314 mutex_unlock(&psp->rap_context.mutex);
2315
2316 return ret;
2317 }
2318 // RAP end
2319
2320 /* securedisplay start */
psp_securedisplay_initialize(struct psp_context * psp)2321 static int psp_securedisplay_initialize(struct psp_context *psp)
2322 {
2323 int ret;
2324 struct ta_securedisplay_cmd *securedisplay_cmd;
2325
2326 /*
2327 * TODO: bypass the initialize in sriov for now
2328 */
2329 if (amdgpu_sriov_vf(psp->adev))
2330 return 0;
2331
2332 /* bypass securedisplay initialization if dmu is harvested */
2333 if (!amdgpu_device_has_display_hardware(psp->adev))
2334 return 0;
2335
2336 if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2337 !psp->securedisplay_context.context.bin_desc.start_addr) {
2338 dev_info(psp->adev->dev,
2339 "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
2340 return 0;
2341 }
2342
2343 psp->securedisplay_context.context.mem_context.shared_mem_size =
2344 PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2345 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2346
2347 if (!psp->securedisplay_context.context.initialized) {
2348 ret = psp_ta_init_shared_buf(psp,
2349 &psp->securedisplay_context.context.mem_context);
2350 if (ret)
2351 return ret;
2352 }
2353
2354 ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2355 if (!ret && !psp->securedisplay_context.context.resp_status) {
2356 psp->securedisplay_context.context.initialized = true;
2357 mutex_init(&psp->securedisplay_context.mutex);
2358 } else
2359 return ret;
2360
2361 mutex_lock(&psp->securedisplay_context.mutex);
2362
2363 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2364 TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2365
2366 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2367
2368 mutex_unlock(&psp->securedisplay_context.mutex);
2369
2370 if (ret) {
2371 psp_securedisplay_terminate(psp);
2372 /* free securedisplay shared memory */
2373 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2374 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2375 return -EINVAL;
2376 }
2377
2378 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2379 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2380 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2381 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2382 /* don't try again */
2383 psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2384 }
2385
2386 return 0;
2387 }
2388
psp_securedisplay_terminate(struct psp_context * psp)2389 static int psp_securedisplay_terminate(struct psp_context *psp)
2390 {
2391 int ret;
2392
2393 /*
2394 * TODO:bypass the terminate in sriov for now
2395 */
2396 if (amdgpu_sriov_vf(psp->adev))
2397 return 0;
2398
2399 if (!psp->securedisplay_context.context.initialized)
2400 return 0;
2401
2402 ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2403
2404 psp->securedisplay_context.context.initialized = false;
2405
2406 return ret;
2407 }
2408
psp_securedisplay_invoke(struct psp_context * psp,uint32_t ta_cmd_id)2409 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2410 {
2411 int ret;
2412
2413 if (!psp->securedisplay_context.context.initialized)
2414 return -EINVAL;
2415
2416 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2417 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
2418 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
2419 return -EINVAL;
2420
2421 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2422
2423 return ret;
2424 }
2425 /* SECUREDISPLAY end */
2426
amdgpu_psp_wait_for_bootloader(struct amdgpu_device * adev)2427 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2428 {
2429 struct psp_context *psp = &adev->psp;
2430 int ret = 0;
2431
2432 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2433 ret = psp->funcs->wait_for_bootloader(psp);
2434
2435 return ret;
2436 }
2437
amdgpu_psp_get_ras_capability(struct psp_context * psp)2438 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2439 {
2440 if (psp->funcs &&
2441 psp->funcs->get_ras_capability) {
2442 return psp->funcs->get_ras_capability(psp);
2443 } else {
2444 return false;
2445 }
2446 }
2447
amdgpu_psp_tos_reload_needed(struct amdgpu_device * adev)2448 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
2449 {
2450 struct psp_context *psp = &adev->psp;
2451
2452 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
2453 return false;
2454
2455 if (psp->funcs && psp->funcs->is_reload_needed)
2456 return psp->funcs->is_reload_needed(psp);
2457
2458 return false;
2459 }
2460
psp_update_gpu_addresses(struct amdgpu_device * adev)2461 static void psp_update_gpu_addresses(struct amdgpu_device *adev)
2462 {
2463 struct psp_context *psp = &adev->psp;
2464
2465 if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
2466 psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
2467 psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
2468 psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
2469 }
2470 if (adev->firmware.rbuf && psp->km_ring.ring_mem)
2471 psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
2472 }
2473
psp_hw_start(struct psp_context * psp)2474 static int psp_hw_start(struct psp_context *psp)
2475 {
2476 struct amdgpu_device *adev = psp->adev;
2477 int ret;
2478
2479 if (amdgpu_virt_xgmi_migrate_enabled(adev))
2480 psp_update_gpu_addresses(adev);
2481
2482 if (!amdgpu_sriov_vf(adev)) {
2483 if ((is_psp_fw_valid(psp->kdb)) &&
2484 (psp->funcs->bootloader_load_kdb != NULL)) {
2485 ret = psp_bootloader_load_kdb(psp);
2486 if (ret) {
2487 dev_err(adev->dev, "PSP load kdb failed!\n");
2488 return ret;
2489 }
2490 }
2491
2492 if ((is_psp_fw_valid(psp->spl)) &&
2493 (psp->funcs->bootloader_load_spl != NULL)) {
2494 ret = psp_bootloader_load_spl(psp);
2495 if (ret) {
2496 dev_err(adev->dev, "PSP load spl failed!\n");
2497 return ret;
2498 }
2499 }
2500
2501 if ((is_psp_fw_valid(psp->sys)) &&
2502 (psp->funcs->bootloader_load_sysdrv != NULL)) {
2503 ret = psp_bootloader_load_sysdrv(psp);
2504 if (ret) {
2505 dev_err(adev->dev, "PSP load sys drv failed!\n");
2506 return ret;
2507 }
2508 }
2509
2510 if ((is_psp_fw_valid(psp->soc_drv)) &&
2511 (psp->funcs->bootloader_load_soc_drv != NULL)) {
2512 ret = psp_bootloader_load_soc_drv(psp);
2513 if (ret) {
2514 dev_err(adev->dev, "PSP load soc drv failed!\n");
2515 return ret;
2516 }
2517 }
2518
2519 if ((is_psp_fw_valid(psp->intf_drv)) &&
2520 (psp->funcs->bootloader_load_intf_drv != NULL)) {
2521 ret = psp_bootloader_load_intf_drv(psp);
2522 if (ret) {
2523 dev_err(adev->dev, "PSP load intf drv failed!\n");
2524 return ret;
2525 }
2526 }
2527
2528 if ((is_psp_fw_valid(psp->dbg_drv)) &&
2529 (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2530 ret = psp_bootloader_load_dbg_drv(psp);
2531 if (ret) {
2532 dev_err(adev->dev, "PSP load dbg drv failed!\n");
2533 return ret;
2534 }
2535 }
2536
2537 if ((is_psp_fw_valid(psp->ras_drv)) &&
2538 (psp->funcs->bootloader_load_ras_drv != NULL)) {
2539 ret = psp_bootloader_load_ras_drv(psp);
2540 if (ret) {
2541 dev_err(adev->dev, "PSP load ras_drv failed!\n");
2542 return ret;
2543 }
2544 }
2545
2546 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2547 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2548 ret = psp_bootloader_load_ipkeymgr_drv(psp);
2549 if (ret) {
2550 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2551 return ret;
2552 }
2553 }
2554
2555 if ((is_psp_fw_valid(psp->spdm_drv)) &&
2556 (psp->funcs->bootloader_load_spdm_drv != NULL)) {
2557 ret = psp_bootloader_load_spdm_drv(psp);
2558 if (ret) {
2559 dev_err(adev->dev, "PSP load spdm_drv failed!\n");
2560 return ret;
2561 }
2562 }
2563
2564 if ((is_psp_fw_valid(psp->sos)) &&
2565 (psp->funcs->bootloader_load_sos != NULL)) {
2566 ret = psp_bootloader_load_sos(psp);
2567 if (ret) {
2568 dev_err(adev->dev, "PSP load sos failed!\n");
2569 return ret;
2570 }
2571 }
2572 }
2573
2574 ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2575 if (ret) {
2576 dev_err(adev->dev, "PSP create ring failed!\n");
2577 return ret;
2578 }
2579
2580 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2581 ret = psp_update_fw_reservation(psp);
2582 if (ret) {
2583 dev_err(adev->dev, "update fw reservation failed!\n");
2584 return ret;
2585 }
2586 }
2587
2588 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2589 goto skip_pin_bo;
2590
2591 if (!psp->boot_time_tmr || psp->autoload_supported) {
2592 ret = psp_tmr_init(psp);
2593 if (ret) {
2594 dev_err(adev->dev, "PSP tmr init failed!\n");
2595 return ret;
2596 }
2597 }
2598
2599 skip_pin_bo:
2600 /*
2601 * For ASICs with DF Cstate management centralized
2602 * to PMFW, TMR setup should be performed after PMFW
2603 * loaded and before other non-psp firmware loaded.
2604 */
2605 if (psp->pmfw_centralized_cstate_management) {
2606 ret = psp_load_smu_fw(psp);
2607 if (ret)
2608 return ret;
2609 }
2610
2611 if (!psp->boot_time_tmr || !psp->autoload_supported) {
2612 ret = psp_tmr_load(psp);
2613 if (ret) {
2614 dev_err(adev->dev, "PSP load tmr failed!\n");
2615 return ret;
2616 }
2617 }
2618
2619 return 0;
2620 }
2621
psp_get_fw_type(struct amdgpu_firmware_info * ucode,enum psp_gfx_fw_type * type)2622 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2623 enum psp_gfx_fw_type *type)
2624 {
2625 switch (ucode->ucode_id) {
2626 case AMDGPU_UCODE_ID_CAP:
2627 *type = GFX_FW_TYPE_CAP;
2628 break;
2629 case AMDGPU_UCODE_ID_SDMA0:
2630 *type = GFX_FW_TYPE_SDMA0;
2631 break;
2632 case AMDGPU_UCODE_ID_SDMA1:
2633 *type = GFX_FW_TYPE_SDMA1;
2634 break;
2635 case AMDGPU_UCODE_ID_SDMA2:
2636 *type = GFX_FW_TYPE_SDMA2;
2637 break;
2638 case AMDGPU_UCODE_ID_SDMA3:
2639 *type = GFX_FW_TYPE_SDMA3;
2640 break;
2641 case AMDGPU_UCODE_ID_SDMA4:
2642 *type = GFX_FW_TYPE_SDMA4;
2643 break;
2644 case AMDGPU_UCODE_ID_SDMA5:
2645 *type = GFX_FW_TYPE_SDMA5;
2646 break;
2647 case AMDGPU_UCODE_ID_SDMA6:
2648 *type = GFX_FW_TYPE_SDMA6;
2649 break;
2650 case AMDGPU_UCODE_ID_SDMA7:
2651 *type = GFX_FW_TYPE_SDMA7;
2652 break;
2653 case AMDGPU_UCODE_ID_CP_MES:
2654 *type = GFX_FW_TYPE_CP_MES;
2655 break;
2656 case AMDGPU_UCODE_ID_CP_MES_DATA:
2657 *type = GFX_FW_TYPE_MES_STACK;
2658 break;
2659 case AMDGPU_UCODE_ID_CP_MES1:
2660 *type = GFX_FW_TYPE_CP_MES_KIQ;
2661 break;
2662 case AMDGPU_UCODE_ID_CP_MES1_DATA:
2663 *type = GFX_FW_TYPE_MES_KIQ_STACK;
2664 break;
2665 case AMDGPU_UCODE_ID_CP_CE:
2666 *type = GFX_FW_TYPE_CP_CE;
2667 break;
2668 case AMDGPU_UCODE_ID_CP_PFP:
2669 *type = GFX_FW_TYPE_CP_PFP;
2670 break;
2671 case AMDGPU_UCODE_ID_CP_ME:
2672 *type = GFX_FW_TYPE_CP_ME;
2673 break;
2674 case AMDGPU_UCODE_ID_CP_MEC1:
2675 *type = GFX_FW_TYPE_CP_MEC;
2676 break;
2677 case AMDGPU_UCODE_ID_CP_MEC1_JT:
2678 *type = GFX_FW_TYPE_CP_MEC_ME1;
2679 break;
2680 case AMDGPU_UCODE_ID_CP_MEC2:
2681 *type = GFX_FW_TYPE_CP_MEC;
2682 break;
2683 case AMDGPU_UCODE_ID_CP_MEC2_JT:
2684 *type = GFX_FW_TYPE_CP_MEC_ME2;
2685 break;
2686 case AMDGPU_UCODE_ID_RLC_P:
2687 *type = GFX_FW_TYPE_RLC_P;
2688 break;
2689 case AMDGPU_UCODE_ID_RLC_V:
2690 *type = GFX_FW_TYPE_RLC_V;
2691 break;
2692 case AMDGPU_UCODE_ID_RLC_G:
2693 *type = GFX_FW_TYPE_RLC_G;
2694 break;
2695 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2696 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2697 break;
2698 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2699 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2700 break;
2701 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2702 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2703 break;
2704 case AMDGPU_UCODE_ID_RLC_IRAM:
2705 *type = GFX_FW_TYPE_RLC_IRAM;
2706 break;
2707 case AMDGPU_UCODE_ID_RLC_DRAM:
2708 *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2709 break;
2710 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2711 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2712 break;
2713 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2714 *type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2715 break;
2716 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2717 *type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2718 break;
2719 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2720 *type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2721 break;
2722 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2723 *type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2724 break;
2725 case AMDGPU_UCODE_ID_SMC:
2726 *type = GFX_FW_TYPE_SMU;
2727 break;
2728 case AMDGPU_UCODE_ID_PPTABLE:
2729 *type = GFX_FW_TYPE_PPTABLE;
2730 break;
2731 case AMDGPU_UCODE_ID_UVD:
2732 *type = GFX_FW_TYPE_UVD;
2733 break;
2734 case AMDGPU_UCODE_ID_UVD1:
2735 *type = GFX_FW_TYPE_UVD1;
2736 break;
2737 case AMDGPU_UCODE_ID_VCE:
2738 *type = GFX_FW_TYPE_VCE;
2739 break;
2740 case AMDGPU_UCODE_ID_VCN:
2741 *type = GFX_FW_TYPE_VCN;
2742 break;
2743 case AMDGPU_UCODE_ID_VCN1:
2744 *type = GFX_FW_TYPE_VCN1;
2745 break;
2746 case AMDGPU_UCODE_ID_DMCU_ERAM:
2747 *type = GFX_FW_TYPE_DMCU_ERAM;
2748 break;
2749 case AMDGPU_UCODE_ID_DMCU_INTV:
2750 *type = GFX_FW_TYPE_DMCU_ISR;
2751 break;
2752 case AMDGPU_UCODE_ID_VCN0_RAM:
2753 *type = GFX_FW_TYPE_VCN0_RAM;
2754 break;
2755 case AMDGPU_UCODE_ID_VCN1_RAM:
2756 *type = GFX_FW_TYPE_VCN1_RAM;
2757 break;
2758 case AMDGPU_UCODE_ID_DMCUB:
2759 *type = GFX_FW_TYPE_DMUB;
2760 break;
2761 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2762 case AMDGPU_UCODE_ID_SDMA_RS64:
2763 *type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2764 break;
2765 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2766 *type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2767 break;
2768 case AMDGPU_UCODE_ID_IMU_I:
2769 *type = GFX_FW_TYPE_IMU_I;
2770 break;
2771 case AMDGPU_UCODE_ID_IMU_D:
2772 *type = GFX_FW_TYPE_IMU_D;
2773 break;
2774 case AMDGPU_UCODE_ID_CP_RS64_PFP:
2775 *type = GFX_FW_TYPE_RS64_PFP;
2776 break;
2777 case AMDGPU_UCODE_ID_CP_RS64_ME:
2778 *type = GFX_FW_TYPE_RS64_ME;
2779 break;
2780 case AMDGPU_UCODE_ID_CP_RS64_MEC:
2781 *type = GFX_FW_TYPE_RS64_MEC;
2782 break;
2783 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2784 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2785 break;
2786 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2787 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2788 break;
2789 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2790 *type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2791 break;
2792 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2793 *type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2794 break;
2795 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2796 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2797 break;
2798 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2799 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2800 break;
2801 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2802 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2803 break;
2804 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2805 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2806 break;
2807 case AMDGPU_UCODE_ID_VPE_CTX:
2808 *type = GFX_FW_TYPE_VPEC_FW1;
2809 break;
2810 case AMDGPU_UCODE_ID_VPE_CTL:
2811 *type = GFX_FW_TYPE_VPEC_FW2;
2812 break;
2813 case AMDGPU_UCODE_ID_VPE:
2814 *type = GFX_FW_TYPE_VPE;
2815 break;
2816 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2817 *type = GFX_FW_TYPE_UMSCH_UCODE;
2818 break;
2819 case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2820 *type = GFX_FW_TYPE_UMSCH_DATA;
2821 break;
2822 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2823 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2824 break;
2825 case AMDGPU_UCODE_ID_P2S_TABLE:
2826 *type = GFX_FW_TYPE_P2S_TABLE;
2827 break;
2828 case AMDGPU_UCODE_ID_JPEG_RAM:
2829 *type = GFX_FW_TYPE_JPEG_RAM;
2830 break;
2831 case AMDGPU_UCODE_ID_ISP:
2832 *type = GFX_FW_TYPE_ISP;
2833 break;
2834 case AMDGPU_UCODE_ID_MAXIMUM:
2835 default:
2836 return -EINVAL;
2837 }
2838
2839 return 0;
2840 }
2841
psp_print_fw_hdr(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2842 static void psp_print_fw_hdr(struct psp_context *psp,
2843 struct amdgpu_firmware_info *ucode)
2844 {
2845 struct amdgpu_device *adev = psp->adev;
2846 struct common_firmware_header *hdr;
2847
2848 switch (ucode->ucode_id) {
2849 case AMDGPU_UCODE_ID_SDMA0:
2850 case AMDGPU_UCODE_ID_SDMA1:
2851 case AMDGPU_UCODE_ID_SDMA2:
2852 case AMDGPU_UCODE_ID_SDMA3:
2853 case AMDGPU_UCODE_ID_SDMA4:
2854 case AMDGPU_UCODE_ID_SDMA5:
2855 case AMDGPU_UCODE_ID_SDMA6:
2856 case AMDGPU_UCODE_ID_SDMA7:
2857 hdr = (struct common_firmware_header *)
2858 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2859 amdgpu_ucode_print_sdma_hdr(hdr);
2860 break;
2861 case AMDGPU_UCODE_ID_CP_CE:
2862 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2863 amdgpu_ucode_print_gfx_hdr(hdr);
2864 break;
2865 case AMDGPU_UCODE_ID_CP_PFP:
2866 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2867 amdgpu_ucode_print_gfx_hdr(hdr);
2868 break;
2869 case AMDGPU_UCODE_ID_CP_ME:
2870 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2871 amdgpu_ucode_print_gfx_hdr(hdr);
2872 break;
2873 case AMDGPU_UCODE_ID_CP_MEC1:
2874 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2875 amdgpu_ucode_print_gfx_hdr(hdr);
2876 break;
2877 case AMDGPU_UCODE_ID_RLC_G:
2878 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2879 amdgpu_ucode_print_rlc_hdr(hdr);
2880 break;
2881 case AMDGPU_UCODE_ID_SMC:
2882 hdr = (struct common_firmware_header *)adev->pm.fw->data;
2883 amdgpu_ucode_print_smc_hdr(hdr);
2884 break;
2885 default:
2886 break;
2887 }
2888 }
2889
psp_prep_load_ip_fw_cmd_buf(struct psp_context * psp,struct amdgpu_firmware_info * ucode,struct psp_gfx_cmd_resp * cmd)2890 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2891 struct amdgpu_firmware_info *ucode,
2892 struct psp_gfx_cmd_resp *cmd)
2893 {
2894 int ret;
2895 uint64_t fw_mem_mc_addr = ucode->mc_addr;
2896
2897 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2898 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2899 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2900 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2901
2902 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2903 if (ret)
2904 dev_err(psp->adev->dev, "Unknown firmware type\n");
2905
2906 return ret;
2907 }
2908
psp_execute_ip_fw_load(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2909 int psp_execute_ip_fw_load(struct psp_context *psp,
2910 struct amdgpu_firmware_info *ucode)
2911 {
2912 int ret = 0;
2913 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2914
2915 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2916 if (!ret) {
2917 ret = psp_cmd_submit_buf(psp, ucode, cmd,
2918 psp->fence_buf_mc_addr);
2919 }
2920
2921 release_psp_cmd_buf(psp);
2922
2923 return ret;
2924 }
2925
psp_load_p2s_table(struct psp_context * psp)2926 static int psp_load_p2s_table(struct psp_context *psp)
2927 {
2928 int ret;
2929 struct amdgpu_device *adev = psp->adev;
2930 struct amdgpu_firmware_info *ucode =
2931 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2932
2933 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2934 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2935 return 0;
2936
2937 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2938 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2939 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2940 0x0036003C;
2941 if (psp->sos.fw_version < supp_vers)
2942 return 0;
2943 }
2944
2945 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2946 return 0;
2947
2948 ret = psp_execute_ip_fw_load(psp, ucode);
2949
2950 return ret;
2951 }
2952
psp_load_smu_fw(struct psp_context * psp)2953 static int psp_load_smu_fw(struct psp_context *psp)
2954 {
2955 int ret;
2956 struct amdgpu_device *adev = psp->adev;
2957 struct amdgpu_firmware_info *ucode =
2958 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2959 struct amdgpu_ras *ras = psp->ras_context.ras;
2960
2961 /*
2962 * Skip SMU FW reloading in case of using BACO for runpm only,
2963 * as SMU is always alive.
2964 */
2965 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2966 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2967 return 0;
2968
2969 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2970 return 0;
2971
2972 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2973 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2974 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2975 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2976 if (ret)
2977 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2978 }
2979
2980 ret = psp_execute_ip_fw_load(psp, ucode);
2981
2982 if (ret)
2983 dev_err(adev->dev, "PSP load smu failed!\n");
2984
2985 return ret;
2986 }
2987
fw_load_skip_check(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2988 static bool fw_load_skip_check(struct psp_context *psp,
2989 struct amdgpu_firmware_info *ucode)
2990 {
2991 if (!ucode->fw || !ucode->ucode_size)
2992 return true;
2993
2994 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2995 return true;
2996
2997 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2998 (psp_smu_reload_quirk(psp) ||
2999 psp->autoload_supported ||
3000 psp->pmfw_centralized_cstate_management))
3001 return true;
3002
3003 if (amdgpu_sriov_vf(psp->adev) &&
3004 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
3005 return true;
3006
3007 if (psp->autoload_supported &&
3008 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
3009 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
3010 /* skip mec JT when autoload is enabled */
3011 return true;
3012
3013 return false;
3014 }
3015
psp_load_fw_list(struct psp_context * psp,struct amdgpu_firmware_info ** ucode_list,int ucode_count)3016 int psp_load_fw_list(struct psp_context *psp,
3017 struct amdgpu_firmware_info **ucode_list, int ucode_count)
3018 {
3019 int ret = 0, i;
3020 struct amdgpu_firmware_info *ucode;
3021
3022 for (i = 0; i < ucode_count; ++i) {
3023 ucode = ucode_list[i];
3024 psp_print_fw_hdr(psp, ucode);
3025 ret = psp_execute_ip_fw_load(psp, ucode);
3026 if (ret)
3027 return ret;
3028 }
3029 return ret;
3030 }
3031
psp_load_non_psp_fw(struct psp_context * psp)3032 static int psp_load_non_psp_fw(struct psp_context *psp)
3033 {
3034 int i, ret;
3035 struct amdgpu_firmware_info *ucode;
3036 struct amdgpu_device *adev = psp->adev;
3037
3038 if (psp->autoload_supported &&
3039 !psp->pmfw_centralized_cstate_management) {
3040 ret = psp_load_smu_fw(psp);
3041 if (ret)
3042 return ret;
3043 }
3044
3045 /* Load P2S table first if it's available */
3046 psp_load_p2s_table(psp);
3047
3048 for (i = 0; i < adev->firmware.max_ucodes; i++) {
3049 ucode = &adev->firmware.ucode[i];
3050
3051 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
3052 !fw_load_skip_check(psp, ucode)) {
3053 ret = psp_load_smu_fw(psp);
3054 if (ret)
3055 return ret;
3056 continue;
3057 }
3058
3059 if (fw_load_skip_check(psp, ucode))
3060 continue;
3061
3062 if (psp->autoload_supported &&
3063 (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3064 IP_VERSION(11, 0, 7) ||
3065 amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3066 IP_VERSION(11, 0, 11) ||
3067 amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3068 IP_VERSION(11, 0, 12)) &&
3069 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
3070 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
3071 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
3072 /* PSP only receive one SDMA fw for sienna_cichlid,
3073 * as all four sdma fw are same
3074 */
3075 continue;
3076
3077 psp_print_fw_hdr(psp, ucode);
3078
3079 ret = psp_execute_ip_fw_load(psp, ucode);
3080 if (ret)
3081 return ret;
3082
3083 /* Start rlc autoload after psp received all the gfx firmware */
3084 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
3085 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
3086 ret = psp_rlc_autoload_start(psp);
3087 if (ret) {
3088 dev_err(adev->dev, "Failed to start rlc autoload\n");
3089 return ret;
3090 }
3091 }
3092 }
3093
3094 return 0;
3095 }
3096
psp_load_fw(struct amdgpu_device * adev)3097 static int psp_load_fw(struct amdgpu_device *adev)
3098 {
3099 int ret;
3100 struct psp_context *psp = &adev->psp;
3101
3102 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3103 /* should not destroy ring, only stop */
3104 psp_ring_stop(psp, PSP_RING_TYPE__KM);
3105 } else {
3106 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
3107
3108 ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
3109 if (ret) {
3110 dev_err(adev->dev, "PSP ring init failed!\n");
3111 goto failed;
3112 }
3113 }
3114
3115 ret = psp_hw_start(psp);
3116 if (ret)
3117 goto failed;
3118
3119 ret = psp_load_non_psp_fw(psp);
3120 if (ret)
3121 goto failed1;
3122
3123 ret = psp_asd_initialize(psp);
3124 if (ret) {
3125 dev_err(adev->dev, "PSP load asd failed!\n");
3126 goto failed1;
3127 }
3128
3129 ret = psp_rl_load(adev);
3130 if (ret) {
3131 dev_err(adev->dev, "PSP load RL failed!\n");
3132 goto failed1;
3133 }
3134
3135 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
3136 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3137 ret = psp_xgmi_initialize(psp, false, true);
3138 /* Warning the XGMI seesion initialize failure
3139 * Instead of stop driver initialization
3140 */
3141 if (ret)
3142 dev_err(psp->adev->dev,
3143 "XGMI: Failed to initialize XGMI session\n");
3144 }
3145 }
3146
3147 if (psp->ta_fw) {
3148 ret = psp_ras_initialize(psp);
3149 if (ret)
3150 dev_err(psp->adev->dev,
3151 "RAS: Failed to initialize RAS\n");
3152
3153 ret = psp_hdcp_initialize(psp);
3154 if (ret)
3155 dev_err(psp->adev->dev,
3156 "HDCP: Failed to initialize HDCP\n");
3157
3158 ret = psp_dtm_initialize(psp);
3159 if (ret)
3160 dev_err(psp->adev->dev,
3161 "DTM: Failed to initialize DTM\n");
3162
3163 ret = psp_rap_initialize(psp);
3164 if (ret)
3165 dev_err(psp->adev->dev,
3166 "RAP: Failed to initialize RAP\n");
3167
3168 ret = psp_securedisplay_initialize(psp);
3169 if (ret)
3170 dev_err(psp->adev->dev,
3171 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3172 }
3173
3174 return 0;
3175
3176 failed1:
3177 psp_free_shared_bufs(psp);
3178 failed:
3179 /*
3180 * all cleanup jobs (xgmi terminate, ras terminate,
3181 * ring destroy, cmd/fence/fw buffers destory,
3182 * psp->cmd destory) are delayed to psp_hw_fini
3183 */
3184 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3185 return ret;
3186 }
3187
psp_hw_init(struct amdgpu_ip_block * ip_block)3188 static int psp_hw_init(struct amdgpu_ip_block *ip_block)
3189 {
3190 int ret;
3191 struct amdgpu_device *adev = ip_block->adev;
3192
3193 mutex_lock(&adev->firmware.mutex);
3194
3195 ret = amdgpu_ucode_init_bo(adev);
3196 if (ret)
3197 goto failed;
3198
3199 ret = psp_load_fw(adev);
3200 if (ret) {
3201 dev_err(adev->dev, "PSP firmware loading failed\n");
3202 goto failed;
3203 }
3204
3205 mutex_unlock(&adev->firmware.mutex);
3206 return 0;
3207
3208 failed:
3209 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
3210 mutex_unlock(&adev->firmware.mutex);
3211 return -EINVAL;
3212 }
3213
psp_hw_fini(struct amdgpu_ip_block * ip_block)3214 static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
3215 {
3216 struct amdgpu_device *adev = ip_block->adev;
3217 struct psp_context *psp = &adev->psp;
3218
3219 if (psp->ta_fw) {
3220 psp_ras_terminate(psp);
3221 psp_securedisplay_terminate(psp);
3222 psp_rap_terminate(psp);
3223 psp_dtm_terminate(psp);
3224 psp_hdcp_terminate(psp);
3225
3226 if (adev->gmc.xgmi.num_physical_nodes > 1)
3227 psp_xgmi_terminate(psp);
3228 }
3229
3230 psp_asd_terminate(psp);
3231 psp_tmr_terminate(psp);
3232
3233 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3234
3235 return 0;
3236 }
3237
psp_suspend(struct amdgpu_ip_block * ip_block)3238 static int psp_suspend(struct amdgpu_ip_block *ip_block)
3239 {
3240 int ret = 0;
3241 struct amdgpu_device *adev = ip_block->adev;
3242 struct psp_context *psp = &adev->psp;
3243
3244 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3245 psp->xgmi_context.context.initialized) {
3246 ret = psp_xgmi_terminate(psp);
3247 if (ret) {
3248 dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3249 goto out;
3250 }
3251 }
3252
3253 if (psp->ta_fw) {
3254 ret = psp_ras_terminate(psp);
3255 if (ret) {
3256 dev_err(adev->dev, "Failed to terminate ras ta\n");
3257 goto out;
3258 }
3259 ret = psp_hdcp_terminate(psp);
3260 if (ret) {
3261 dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3262 goto out;
3263 }
3264 ret = psp_dtm_terminate(psp);
3265 if (ret) {
3266 dev_err(adev->dev, "Failed to terminate dtm ta\n");
3267 goto out;
3268 }
3269 ret = psp_rap_terminate(psp);
3270 if (ret) {
3271 dev_err(adev->dev, "Failed to terminate rap ta\n");
3272 goto out;
3273 }
3274 ret = psp_securedisplay_terminate(psp);
3275 if (ret) {
3276 dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3277 goto out;
3278 }
3279 }
3280
3281 ret = psp_asd_terminate(psp);
3282 if (ret) {
3283 dev_err(adev->dev, "Failed to terminate asd\n");
3284 goto out;
3285 }
3286
3287 ret = psp_tmr_terminate(psp);
3288 if (ret) {
3289 dev_err(adev->dev, "Failed to terminate tmr\n");
3290 goto out;
3291 }
3292
3293 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3294 if (ret)
3295 dev_err(adev->dev, "PSP ring stop failed\n");
3296
3297 out:
3298 return ret;
3299 }
3300
psp_resume(struct amdgpu_ip_block * ip_block)3301 static int psp_resume(struct amdgpu_ip_block *ip_block)
3302 {
3303 int ret;
3304 struct amdgpu_device *adev = ip_block->adev;
3305 struct psp_context *psp = &adev->psp;
3306
3307 dev_info(adev->dev, "PSP is resuming...\n");
3308
3309 if (psp->mem_train_ctx.enable_mem_training) {
3310 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3311 if (ret) {
3312 dev_err(adev->dev, "Failed to process memory training!\n");
3313 return ret;
3314 }
3315 }
3316
3317 mutex_lock(&adev->firmware.mutex);
3318
3319 ret = amdgpu_ucode_init_bo(adev);
3320 if (ret)
3321 goto failed;
3322
3323 ret = psp_hw_start(psp);
3324 if (ret)
3325 goto failed;
3326
3327 ret = psp_load_non_psp_fw(psp);
3328 if (ret)
3329 goto failed;
3330
3331 ret = psp_asd_initialize(psp);
3332 if (ret) {
3333 dev_err(adev->dev, "PSP load asd failed!\n");
3334 goto failed;
3335 }
3336
3337 ret = psp_rl_load(adev);
3338 if (ret) {
3339 dev_err(adev->dev, "PSP load RL failed!\n");
3340 goto failed;
3341 }
3342
3343 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3344 ret = psp_xgmi_initialize(psp, false, true);
3345 /* Warning the XGMI seesion initialize failure
3346 * Instead of stop driver initialization
3347 */
3348 if (ret)
3349 dev_err(psp->adev->dev,
3350 "XGMI: Failed to initialize XGMI session\n");
3351 }
3352
3353 if (psp->ta_fw) {
3354 ret = psp_ras_initialize(psp);
3355 if (ret)
3356 dev_err(psp->adev->dev,
3357 "RAS: Failed to initialize RAS\n");
3358
3359 ret = psp_hdcp_initialize(psp);
3360 if (ret)
3361 dev_err(psp->adev->dev,
3362 "HDCP: Failed to initialize HDCP\n");
3363
3364 ret = psp_dtm_initialize(psp);
3365 if (ret)
3366 dev_err(psp->adev->dev,
3367 "DTM: Failed to initialize DTM\n");
3368
3369 ret = psp_rap_initialize(psp);
3370 if (ret)
3371 dev_err(psp->adev->dev,
3372 "RAP: Failed to initialize RAP\n");
3373
3374 ret = psp_securedisplay_initialize(psp);
3375 if (ret)
3376 dev_err(psp->adev->dev,
3377 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3378 }
3379
3380 mutex_unlock(&adev->firmware.mutex);
3381
3382 return 0;
3383
3384 failed:
3385 dev_err(adev->dev, "PSP resume failed\n");
3386 mutex_unlock(&adev->firmware.mutex);
3387 return ret;
3388 }
3389
psp_gpu_reset(struct amdgpu_device * adev)3390 int psp_gpu_reset(struct amdgpu_device *adev)
3391 {
3392 int ret;
3393
3394 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3395 return 0;
3396
3397 mutex_lock(&adev->psp.mutex);
3398 ret = psp_mode1_reset(&adev->psp);
3399 mutex_unlock(&adev->psp.mutex);
3400
3401 return ret;
3402 }
3403
psp_rlc_autoload_start(struct psp_context * psp)3404 int psp_rlc_autoload_start(struct psp_context *psp)
3405 {
3406 int ret;
3407 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3408
3409 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3410
3411 ret = psp_cmd_submit_buf(psp, NULL, cmd,
3412 psp->fence_buf_mc_addr);
3413
3414 release_psp_cmd_buf(psp);
3415
3416 return ret;
3417 }
3418
psp_ring_cmd_submit(struct psp_context * psp,uint64_t cmd_buf_mc_addr,uint64_t fence_mc_addr,int index)3419 int psp_ring_cmd_submit(struct psp_context *psp,
3420 uint64_t cmd_buf_mc_addr,
3421 uint64_t fence_mc_addr,
3422 int index)
3423 {
3424 unsigned int psp_write_ptr_reg = 0;
3425 struct psp_gfx_rb_frame *write_frame;
3426 struct psp_ring *ring = &psp->km_ring;
3427 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3428 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3429 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3430 struct amdgpu_device *adev = psp->adev;
3431 uint32_t ring_size_dw = ring->ring_size / 4;
3432 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3433
3434 /* KM (GPCOM) prepare write pointer */
3435 psp_write_ptr_reg = psp_ring_get_wptr(psp);
3436
3437 /* Update KM RB frame pointer to new frame */
3438 /* write_frame ptr increments by size of rb_frame in bytes */
3439 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3440 if ((psp_write_ptr_reg % ring_size_dw) == 0)
3441 write_frame = ring_buffer_start;
3442 else
3443 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3444 /* Check invalid write_frame ptr address */
3445 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3446 dev_err(adev->dev,
3447 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3448 ring_buffer_start, ring_buffer_end, write_frame);
3449 dev_err(adev->dev,
3450 "write_frame is pointing to address out of bounds\n");
3451 return -EINVAL;
3452 }
3453
3454 /* Initialize KM RB frame */
3455 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3456
3457 /* Update KM RB frame */
3458 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3459 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3460 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3461 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3462 write_frame->fence_value = index;
3463 amdgpu_device_flush_hdp(adev, NULL);
3464
3465 /* Update the write Pointer in DWORDs */
3466 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3467 psp_ring_set_wptr(psp, psp_write_ptr_reg);
3468 return 0;
3469 }
3470
psp_init_asd_microcode(struct psp_context * psp,const char * chip_name)3471 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3472 {
3473 struct amdgpu_device *adev = psp->adev;
3474 const struct psp_firmware_header_v1_0 *asd_hdr;
3475 int err = 0;
3476
3477 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
3478 "amdgpu/%s_asd.bin", chip_name);
3479 if (err)
3480 goto out;
3481
3482 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3483 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3484 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3485 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3486 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3487 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3488 return 0;
3489 out:
3490 amdgpu_ucode_release(&adev->psp.asd_fw);
3491 return err;
3492 }
3493
psp_init_toc_microcode(struct psp_context * psp,const char * chip_name)3494 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3495 {
3496 struct amdgpu_device *adev = psp->adev;
3497 const struct psp_firmware_header_v1_0 *toc_hdr;
3498 int err = 0;
3499
3500 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
3501 "amdgpu/%s_toc.bin", chip_name);
3502 if (err)
3503 goto out;
3504
3505 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3506 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3507 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3508 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3509 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3510 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3511 return 0;
3512 out:
3513 amdgpu_ucode_release(&adev->psp.toc_fw);
3514 return err;
3515 }
3516
parse_sos_bin_descriptor(struct psp_context * psp,const struct psp_fw_bin_desc * desc,const struct psp_firmware_header_v2_0 * sos_hdr)3517 static int parse_sos_bin_descriptor(struct psp_context *psp,
3518 const struct psp_fw_bin_desc *desc,
3519 const struct psp_firmware_header_v2_0 *sos_hdr)
3520 {
3521 uint8_t *ucode_start_addr = NULL;
3522
3523 if (!psp || !desc || !sos_hdr)
3524 return -EINVAL;
3525
3526 ucode_start_addr = (uint8_t *)sos_hdr +
3527 le32_to_cpu(desc->offset_bytes) +
3528 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3529
3530 switch (desc->fw_type) {
3531 case PSP_FW_TYPE_PSP_SOS:
3532 psp->sos.fw_version = le32_to_cpu(desc->fw_version);
3533 psp->sos.feature_version = le32_to_cpu(desc->fw_version);
3534 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
3535 psp->sos.start_addr = ucode_start_addr;
3536 break;
3537 case PSP_FW_TYPE_PSP_SYS_DRV:
3538 psp->sys.fw_version = le32_to_cpu(desc->fw_version);
3539 psp->sys.feature_version = le32_to_cpu(desc->fw_version);
3540 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes);
3541 psp->sys.start_addr = ucode_start_addr;
3542 break;
3543 case PSP_FW_TYPE_PSP_KDB:
3544 psp->kdb.fw_version = le32_to_cpu(desc->fw_version);
3545 psp->kdb.feature_version = le32_to_cpu(desc->fw_version);
3546 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes);
3547 psp->kdb.start_addr = ucode_start_addr;
3548 break;
3549 case PSP_FW_TYPE_PSP_TOC:
3550 psp->toc.fw_version = le32_to_cpu(desc->fw_version);
3551 psp->toc.feature_version = le32_to_cpu(desc->fw_version);
3552 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes);
3553 psp->toc.start_addr = ucode_start_addr;
3554 break;
3555 case PSP_FW_TYPE_PSP_SPL:
3556 psp->spl.fw_version = le32_to_cpu(desc->fw_version);
3557 psp->spl.feature_version = le32_to_cpu(desc->fw_version);
3558 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes);
3559 psp->spl.start_addr = ucode_start_addr;
3560 break;
3561 case PSP_FW_TYPE_PSP_RL:
3562 psp->rl.fw_version = le32_to_cpu(desc->fw_version);
3563 psp->rl.feature_version = le32_to_cpu(desc->fw_version);
3564 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes);
3565 psp->rl.start_addr = ucode_start_addr;
3566 break;
3567 case PSP_FW_TYPE_PSP_SOC_DRV:
3568 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version);
3569 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version);
3570 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3571 psp->soc_drv.start_addr = ucode_start_addr;
3572 break;
3573 case PSP_FW_TYPE_PSP_INTF_DRV:
3574 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version);
3575 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version);
3576 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3577 psp->intf_drv.start_addr = ucode_start_addr;
3578 break;
3579 case PSP_FW_TYPE_PSP_DBG_DRV:
3580 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version);
3581 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version);
3582 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3583 psp->dbg_drv.start_addr = ucode_start_addr;
3584 break;
3585 case PSP_FW_TYPE_PSP_RAS_DRV:
3586 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
3587 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
3588 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3589 psp->ras_drv.start_addr = ucode_start_addr;
3590 break;
3591 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3592 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version);
3593 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version);
3594 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3595 psp->ipkeymgr_drv.start_addr = ucode_start_addr;
3596 break;
3597 case PSP_FW_TYPE_PSP_SPDM_DRV:
3598 psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version);
3599 psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version);
3600 psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3601 psp->spdm_drv.start_addr = ucode_start_addr;
3602 break;
3603 default:
3604 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3605 break;
3606 }
3607
3608 return 0;
3609 }
3610
psp_init_sos_base_fw(struct amdgpu_device * adev)3611 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3612 {
3613 const struct psp_firmware_header_v1_0 *sos_hdr;
3614 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3615 uint8_t *ucode_array_start_addr;
3616
3617 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3618 ucode_array_start_addr = (uint8_t *)sos_hdr +
3619 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3620
3621 if (adev->gmc.xgmi.connected_to_cpu ||
3622 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3623 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3624 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3625
3626 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3627 adev->psp.sys.start_addr = ucode_array_start_addr;
3628
3629 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3630 adev->psp.sos.start_addr = ucode_array_start_addr +
3631 le32_to_cpu(sos_hdr->sos.offset_bytes);
3632 } else {
3633 /* Load alternate PSP SOS FW */
3634 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3635
3636 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3637 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3638
3639 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3640 adev->psp.sys.start_addr = ucode_array_start_addr +
3641 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3642
3643 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3644 adev->psp.sos.start_addr = ucode_array_start_addr +
3645 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3646 }
3647
3648 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3649 dev_warn(adev->dev, "PSP SOS FW not available");
3650 return -EINVAL;
3651 }
3652
3653 return 0;
3654 }
3655
psp_init_sos_microcode(struct psp_context * psp,const char * chip_name)3656 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3657 {
3658 struct amdgpu_device *adev = psp->adev;
3659 const struct psp_firmware_header_v1_0 *sos_hdr;
3660 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3661 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3662 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3663 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3664 const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
3665 int fw_index, fw_bin_count, start_index = 0;
3666 const struct psp_fw_bin_desc *fw_bin;
3667 uint8_t *ucode_array_start_addr;
3668 int err = 0;
3669
3670 if (amdgpu_is_kicker_fw(adev))
3671 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3672 "amdgpu/%s_sos_kicker.bin", chip_name);
3673 else
3674 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
3675 "amdgpu/%s_sos.bin", chip_name);
3676 if (err)
3677 goto out;
3678
3679 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3680 ucode_array_start_addr = (uint8_t *)sos_hdr +
3681 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3682 amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3683
3684 switch (sos_hdr->header.header_version_major) {
3685 case 1:
3686 err = psp_init_sos_base_fw(adev);
3687 if (err)
3688 goto out;
3689
3690 if (sos_hdr->header.header_version_minor == 1) {
3691 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3692 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3693 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3694 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3695 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3696 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3697 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3698 }
3699 if (sos_hdr->header.header_version_minor == 2) {
3700 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3701 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3702 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3703 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3704 }
3705 if (sos_hdr->header.header_version_minor == 3) {
3706 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3707 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3708 adev->psp.toc.start_addr = ucode_array_start_addr +
3709 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3710 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3711 adev->psp.kdb.start_addr = ucode_array_start_addr +
3712 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3713 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3714 adev->psp.spl.start_addr = ucode_array_start_addr +
3715 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3716 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3717 adev->psp.rl.start_addr = ucode_array_start_addr +
3718 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3719 }
3720 break;
3721 case 2:
3722 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3723
3724 fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
3725
3726 if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
3727 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3728 err = -EINVAL;
3729 goto out;
3730 }
3731
3732 if (sos_hdr_v2_0->header.header_version_minor == 1) {
3733 sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
3734
3735 fw_bin = sos_hdr_v2_1->psp_fw_bin;
3736
3737 if (psp_is_aux_sos_load_required(psp))
3738 start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3739 else
3740 fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3741
3742 } else {
3743 fw_bin = sos_hdr_v2_0->psp_fw_bin;
3744 }
3745
3746 for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
3747 err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
3748 sos_hdr_v2_0);
3749 if (err)
3750 goto out;
3751 }
3752 break;
3753 default:
3754 dev_err(adev->dev,
3755 "unsupported psp sos firmware\n");
3756 err = -EINVAL;
3757 goto out;
3758 }
3759
3760 return 0;
3761 out:
3762 amdgpu_ucode_release(&adev->psp.sos_fw);
3763
3764 return err;
3765 }
3766
is_ta_fw_applicable(struct psp_context * psp,const struct psp_fw_bin_desc * desc)3767 static bool is_ta_fw_applicable(struct psp_context *psp,
3768 const struct psp_fw_bin_desc *desc)
3769 {
3770 struct amdgpu_device *adev = psp->adev;
3771 uint32_t fw_version;
3772
3773 switch (desc->fw_type) {
3774 case TA_FW_TYPE_PSP_XGMI:
3775 case TA_FW_TYPE_PSP_XGMI_AUX:
3776 /* for now, AUX TA only exists on 13.0.6 ta bin,
3777 * from v20.00.0x.14
3778 */
3779 if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3780 IP_VERSION(13, 0, 6)) {
3781 fw_version = le32_to_cpu(desc->fw_version);
3782
3783 if (adev->flags & AMD_IS_APU &&
3784 (fw_version & 0xff) >= 0x14)
3785 return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
3786 else
3787 return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
3788 }
3789 break;
3790 default:
3791 break;
3792 }
3793
3794 return true;
3795 }
3796
parse_ta_bin_descriptor(struct psp_context * psp,const struct psp_fw_bin_desc * desc,const struct ta_firmware_header_v2_0 * ta_hdr)3797 static int parse_ta_bin_descriptor(struct psp_context *psp,
3798 const struct psp_fw_bin_desc *desc,
3799 const struct ta_firmware_header_v2_0 *ta_hdr)
3800 {
3801 uint8_t *ucode_start_addr = NULL;
3802
3803 if (!psp || !desc || !ta_hdr)
3804 return -EINVAL;
3805
3806 if (!is_ta_fw_applicable(psp, desc))
3807 return 0;
3808
3809 ucode_start_addr = (uint8_t *)ta_hdr +
3810 le32_to_cpu(desc->offset_bytes) +
3811 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3812
3813 switch (desc->fw_type) {
3814 case TA_FW_TYPE_PSP_ASD:
3815 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3816 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
3817 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3818 psp->asd_context.bin_desc.start_addr = ucode_start_addr;
3819 break;
3820 case TA_FW_TYPE_PSP_XGMI:
3821 case TA_FW_TYPE_PSP_XGMI_AUX:
3822 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3823 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3824 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
3825 break;
3826 case TA_FW_TYPE_PSP_RAS:
3827 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3828 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3829 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
3830 break;
3831 case TA_FW_TYPE_PSP_HDCP:
3832 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3833 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3834 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
3835 break;
3836 case TA_FW_TYPE_PSP_DTM:
3837 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3838 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3839 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
3840 break;
3841 case TA_FW_TYPE_PSP_RAP:
3842 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3843 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3844 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
3845 break;
3846 case TA_FW_TYPE_PSP_SECUREDISPLAY:
3847 psp->securedisplay_context.context.bin_desc.fw_version =
3848 le32_to_cpu(desc->fw_version);
3849 psp->securedisplay_context.context.bin_desc.size_bytes =
3850 le32_to_cpu(desc->size_bytes);
3851 psp->securedisplay_context.context.bin_desc.start_addr =
3852 ucode_start_addr;
3853 break;
3854 default:
3855 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3856 break;
3857 }
3858
3859 return 0;
3860 }
3861
parse_ta_v1_microcode(struct psp_context * psp)3862 static int parse_ta_v1_microcode(struct psp_context *psp)
3863 {
3864 const struct ta_firmware_header_v1_0 *ta_hdr;
3865 struct amdgpu_device *adev = psp->adev;
3866
3867 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3868
3869 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3870 return -EINVAL;
3871
3872 adev->psp.xgmi_context.context.bin_desc.fw_version =
3873 le32_to_cpu(ta_hdr->xgmi.fw_version);
3874 adev->psp.xgmi_context.context.bin_desc.size_bytes =
3875 le32_to_cpu(ta_hdr->xgmi.size_bytes);
3876 adev->psp.xgmi_context.context.bin_desc.start_addr =
3877 (uint8_t *)ta_hdr +
3878 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3879
3880 adev->psp.ras_context.context.bin_desc.fw_version =
3881 le32_to_cpu(ta_hdr->ras.fw_version);
3882 adev->psp.ras_context.context.bin_desc.size_bytes =
3883 le32_to_cpu(ta_hdr->ras.size_bytes);
3884 adev->psp.ras_context.context.bin_desc.start_addr =
3885 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3886 le32_to_cpu(ta_hdr->ras.offset_bytes);
3887
3888 adev->psp.hdcp_context.context.bin_desc.fw_version =
3889 le32_to_cpu(ta_hdr->hdcp.fw_version);
3890 adev->psp.hdcp_context.context.bin_desc.size_bytes =
3891 le32_to_cpu(ta_hdr->hdcp.size_bytes);
3892 adev->psp.hdcp_context.context.bin_desc.start_addr =
3893 (uint8_t *)ta_hdr +
3894 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3895
3896 adev->psp.dtm_context.context.bin_desc.fw_version =
3897 le32_to_cpu(ta_hdr->dtm.fw_version);
3898 adev->psp.dtm_context.context.bin_desc.size_bytes =
3899 le32_to_cpu(ta_hdr->dtm.size_bytes);
3900 adev->psp.dtm_context.context.bin_desc.start_addr =
3901 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3902 le32_to_cpu(ta_hdr->dtm.offset_bytes);
3903
3904 adev->psp.securedisplay_context.context.bin_desc.fw_version =
3905 le32_to_cpu(ta_hdr->securedisplay.fw_version);
3906 adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3907 le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3908 adev->psp.securedisplay_context.context.bin_desc.start_addr =
3909 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3910 le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3911
3912 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3913
3914 return 0;
3915 }
3916
parse_ta_v2_microcode(struct psp_context * psp)3917 static int parse_ta_v2_microcode(struct psp_context *psp)
3918 {
3919 const struct ta_firmware_header_v2_0 *ta_hdr;
3920 struct amdgpu_device *adev = psp->adev;
3921 int err = 0;
3922 int ta_index = 0;
3923
3924 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3925
3926 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3927 return -EINVAL;
3928
3929 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3930 dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3931 return -EINVAL;
3932 }
3933
3934 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3935 err = parse_ta_bin_descriptor(psp,
3936 &ta_hdr->ta_fw_bin[ta_index],
3937 ta_hdr);
3938 if (err)
3939 return err;
3940 }
3941
3942 return 0;
3943 }
3944
psp_init_ta_microcode(struct psp_context * psp,const char * chip_name)3945 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3946 {
3947 const struct common_firmware_header *hdr;
3948 struct amdgpu_device *adev = psp->adev;
3949 int err;
3950
3951 if (amdgpu_is_kicker_fw(adev))
3952 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3953 "amdgpu/%s_ta_kicker.bin", chip_name);
3954 else
3955 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
3956 "amdgpu/%s_ta.bin", chip_name);
3957 if (err)
3958 return err;
3959
3960 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3961 switch (le16_to_cpu(hdr->header_version_major)) {
3962 case 1:
3963 err = parse_ta_v1_microcode(psp);
3964 break;
3965 case 2:
3966 err = parse_ta_v2_microcode(psp);
3967 break;
3968 default:
3969 dev_err(adev->dev, "unsupported TA header version\n");
3970 err = -EINVAL;
3971 }
3972
3973 if (err)
3974 amdgpu_ucode_release(&adev->psp.ta_fw);
3975
3976 return err;
3977 }
3978
psp_init_cap_microcode(struct psp_context * psp,const char * chip_name)3979 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3980 {
3981 struct amdgpu_device *adev = psp->adev;
3982 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3983 struct amdgpu_firmware_info *info = NULL;
3984 int err = 0;
3985
3986 if (!amdgpu_sriov_vf(adev)) {
3987 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3988 return -EINVAL;
3989 }
3990
3991 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
3992 "amdgpu/%s_cap.bin", chip_name);
3993 if (err) {
3994 if (err == -ENODEV) {
3995 dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3996 err = 0;
3997 } else {
3998 dev_err(adev->dev, "fail to initialize cap microcode\n");
3999 }
4000 goto out;
4001 }
4002
4003 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
4004 info->ucode_id = AMDGPU_UCODE_ID_CAP;
4005 info->fw = adev->psp.cap_fw;
4006 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
4007 adev->psp.cap_fw->data;
4008 adev->firmware.fw_size += ALIGN(
4009 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
4010 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
4011 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
4012 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
4013
4014 return 0;
4015
4016 out:
4017 amdgpu_ucode_release(&adev->psp.cap_fw);
4018 return err;
4019 }
4020
psp_config_sq_perfmon(struct psp_context * psp,uint32_t xcp_id,bool core_override_enable,bool reg_override_enable,bool perfmon_override_enable)4021 int psp_config_sq_perfmon(struct psp_context *psp,
4022 uint32_t xcp_id, bool core_override_enable,
4023 bool reg_override_enable, bool perfmon_override_enable)
4024 {
4025 int ret;
4026
4027 if (amdgpu_sriov_vf(psp->adev))
4028 return 0;
4029
4030 if (xcp_id > MAX_XCP) {
4031 dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
4032 return -EINVAL;
4033 }
4034
4035 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
4036 dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
4037 amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
4038 return -EINVAL;
4039 }
4040 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
4041
4042 cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON;
4043 cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id);
4044 cmd->cmd.config_sq_perfmon.core_override = core_override_enable;
4045 cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable;
4046 cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
4047
4048 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
4049 if (ret)
4050 dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
4051 xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
4052
4053 release_psp_cmd_buf(psp);
4054 return ret;
4055 }
4056
psp_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)4057 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
4058 enum amd_clockgating_state state)
4059 {
4060 return 0;
4061 }
4062
psp_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)4063 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
4064 enum amd_powergating_state state)
4065 {
4066 return 0;
4067 }
4068
psp_usbc_pd_fw_sysfs_read(struct device * dev,struct device_attribute * attr,char * buf)4069 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
4070 struct device_attribute *attr,
4071 char *buf)
4072 {
4073 struct drm_device *ddev = dev_get_drvdata(dev);
4074 struct amdgpu_device *adev = drm_to_adev(ddev);
4075 struct amdgpu_ip_block *ip_block;
4076 uint32_t fw_ver;
4077 int ret;
4078
4079 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
4080 if (!ip_block || !ip_block->status.late_initialized) {
4081 dev_info(adev->dev, "PSP block is not ready yet\n.");
4082 return -EBUSY;
4083 }
4084
4085 mutex_lock(&adev->psp.mutex);
4086 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
4087 mutex_unlock(&adev->psp.mutex);
4088
4089 if (ret) {
4090 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
4091 return ret;
4092 }
4093
4094 return sysfs_emit(buf, "%x\n", fw_ver);
4095 }
4096
psp_usbc_pd_fw_sysfs_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4097 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
4098 struct device_attribute *attr,
4099 const char *buf,
4100 size_t count)
4101 {
4102 struct drm_device *ddev = dev_get_drvdata(dev);
4103 struct amdgpu_device *adev = drm_to_adev(ddev);
4104 int ret, idx;
4105 const struct firmware *usbc_pd_fw;
4106 struct amdgpu_bo *fw_buf_bo = NULL;
4107 uint64_t fw_pri_mc_addr;
4108 void *fw_pri_cpu_addr;
4109 struct amdgpu_ip_block *ip_block;
4110
4111 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
4112 if (!ip_block || !ip_block->status.late_initialized) {
4113 dev_err(adev->dev, "PSP block is not ready yet.");
4114 return -EBUSY;
4115 }
4116
4117 if (!drm_dev_enter(ddev, &idx))
4118 return -ENODEV;
4119
4120 ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
4121 "amdgpu/%s", buf);
4122 if (ret)
4123 goto fail;
4124
4125 /* LFB address which is aligned to 1MB boundary per PSP request */
4126 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
4127 AMDGPU_GEM_DOMAIN_VRAM |
4128 AMDGPU_GEM_DOMAIN_GTT,
4129 &fw_buf_bo, &fw_pri_mc_addr,
4130 &fw_pri_cpu_addr);
4131 if (ret)
4132 goto rel_buf;
4133
4134 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
4135
4136 mutex_lock(&adev->psp.mutex);
4137 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
4138 mutex_unlock(&adev->psp.mutex);
4139
4140 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4141
4142 rel_buf:
4143 amdgpu_ucode_release(&usbc_pd_fw);
4144 fail:
4145 if (ret) {
4146 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
4147 count = ret;
4148 }
4149
4150 drm_dev_exit(idx);
4151 return count;
4152 }
4153
psp_copy_fw(struct psp_context * psp,uint8_t * start_addr,uint32_t bin_size)4154 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
4155 {
4156 int idx;
4157
4158 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
4159 return;
4160
4161 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
4162 memcpy(psp->fw_pri_buf, start_addr, bin_size);
4163
4164 drm_dev_exit(idx);
4165 }
4166
4167 /**
4168 * DOC: usbc_pd_fw
4169 * Reading from this file will retrieve the USB-C PD firmware version. Writing to
4170 * this file will trigger the update process.
4171 */
4172 static DEVICE_ATTR(usbc_pd_fw, 0644,
4173 psp_usbc_pd_fw_sysfs_read,
4174 psp_usbc_pd_fw_sysfs_write);
4175
is_psp_fw_valid(struct psp_bin_desc bin)4176 int is_psp_fw_valid(struct psp_bin_desc bin)
4177 {
4178 return bin.size_bytes;
4179 }
4180
amdgpu_psp_vbflash_write(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buffer,loff_t pos,size_t count)4181 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
4182 const struct bin_attribute *bin_attr,
4183 char *buffer, loff_t pos, size_t count)
4184 {
4185 struct device *dev = kobj_to_dev(kobj);
4186 struct drm_device *ddev = dev_get_drvdata(dev);
4187 struct amdgpu_device *adev = drm_to_adev(ddev);
4188
4189 adev->psp.vbflash_done = false;
4190
4191 /* Safeguard against memory drain */
4192 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
4193 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
4194 kvfree(adev->psp.vbflash_tmp_buf);
4195 adev->psp.vbflash_tmp_buf = NULL;
4196 adev->psp.vbflash_image_size = 0;
4197 return -ENOMEM;
4198 }
4199
4200 /* TODO Just allocate max for now and optimize to realloc later if needed */
4201 if (!adev->psp.vbflash_tmp_buf) {
4202 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
4203 if (!adev->psp.vbflash_tmp_buf)
4204 return -ENOMEM;
4205 }
4206
4207 mutex_lock(&adev->psp.mutex);
4208 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
4209 adev->psp.vbflash_image_size += count;
4210 mutex_unlock(&adev->psp.mutex);
4211
4212 dev_dbg(adev->dev, "IFWI staged for update\n");
4213
4214 return count;
4215 }
4216
amdgpu_psp_vbflash_read(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buffer,loff_t pos,size_t count)4217 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
4218 const struct bin_attribute *bin_attr, char *buffer,
4219 loff_t pos, size_t count)
4220 {
4221 struct device *dev = kobj_to_dev(kobj);
4222 struct drm_device *ddev = dev_get_drvdata(dev);
4223 struct amdgpu_device *adev = drm_to_adev(ddev);
4224 struct amdgpu_bo *fw_buf_bo = NULL;
4225 uint64_t fw_pri_mc_addr;
4226 void *fw_pri_cpu_addr;
4227 int ret;
4228
4229 if (adev->psp.vbflash_image_size == 0)
4230 return -EINVAL;
4231
4232 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
4233
4234 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
4235 AMDGPU_GPU_PAGE_SIZE,
4236 AMDGPU_GEM_DOMAIN_VRAM,
4237 &fw_buf_bo,
4238 &fw_pri_mc_addr,
4239 &fw_pri_cpu_addr);
4240 if (ret)
4241 goto rel_buf;
4242
4243 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
4244
4245 mutex_lock(&adev->psp.mutex);
4246 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
4247 mutex_unlock(&adev->psp.mutex);
4248
4249 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4250
4251 rel_buf:
4252 kvfree(adev->psp.vbflash_tmp_buf);
4253 adev->psp.vbflash_tmp_buf = NULL;
4254 adev->psp.vbflash_image_size = 0;
4255
4256 if (ret) {
4257 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
4258 return ret;
4259 }
4260
4261 dev_dbg(adev->dev, "PSP IFWI flash process done\n");
4262 return 0;
4263 }
4264
4265 /**
4266 * DOC: psp_vbflash
4267 * Writing to this file will stage an IFWI for update. Reading from this file
4268 * will trigger the update process.
4269 */
4270 static const struct bin_attribute psp_vbflash_bin_attr = {
4271 .attr = {.name = "psp_vbflash", .mode = 0660},
4272 .size = 0,
4273 .write = amdgpu_psp_vbflash_write,
4274 .read = amdgpu_psp_vbflash_read,
4275 };
4276
4277 /**
4278 * DOC: psp_vbflash_status
4279 * The status of the flash process.
4280 * 0: IFWI flash not complete.
4281 * 1: IFWI flash complete.
4282 */
amdgpu_psp_vbflash_status(struct device * dev,struct device_attribute * attr,char * buf)4283 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
4284 struct device_attribute *attr,
4285 char *buf)
4286 {
4287 struct drm_device *ddev = dev_get_drvdata(dev);
4288 struct amdgpu_device *adev = drm_to_adev(ddev);
4289 uint32_t vbflash_status;
4290
4291 vbflash_status = psp_vbflash_status(&adev->psp);
4292 if (!adev->psp.vbflash_done)
4293 vbflash_status = 0;
4294 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
4295 vbflash_status = 1;
4296
4297 return sysfs_emit(buf, "0x%x\n", vbflash_status);
4298 }
4299 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
4300
4301 static const struct bin_attribute *const bin_flash_attrs[] = {
4302 &psp_vbflash_bin_attr,
4303 NULL
4304 };
4305
4306 static struct attribute *flash_attrs[] = {
4307 &dev_attr_psp_vbflash_status.attr,
4308 &dev_attr_usbc_pd_fw.attr,
4309 NULL
4310 };
4311
amdgpu_flash_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)4312 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
4313 {
4314 struct device *dev = kobj_to_dev(kobj);
4315 struct drm_device *ddev = dev_get_drvdata(dev);
4316 struct amdgpu_device *adev = drm_to_adev(ddev);
4317
4318 if (attr == &dev_attr_usbc_pd_fw.attr)
4319 return adev->psp.sup_pd_fw_up ? 0660 : 0;
4320
4321 return adev->psp.sup_ifwi_up ? 0440 : 0;
4322 }
4323
amdgpu_bin_flash_attr_is_visible(struct kobject * kobj,const struct bin_attribute * attr,int idx)4324 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
4325 const struct bin_attribute *attr,
4326 int idx)
4327 {
4328 struct device *dev = kobj_to_dev(kobj);
4329 struct drm_device *ddev = dev_get_drvdata(dev);
4330 struct amdgpu_device *adev = drm_to_adev(ddev);
4331
4332 return adev->psp.sup_ifwi_up ? 0660 : 0;
4333 }
4334
4335 const struct attribute_group amdgpu_flash_attr_group = {
4336 .attrs = flash_attrs,
4337 .bin_attrs = bin_flash_attrs,
4338 .is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4339 .is_visible = amdgpu_flash_attr_is_visible,
4340 };
4341
4342 #if defined(CONFIG_DEBUG_FS)
psp_read_spirom_debugfs_open(struct inode * inode,struct file * filp)4343 static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
4344 {
4345 struct amdgpu_device *adev = filp->f_inode->i_private;
4346 struct spirom_bo *bo_triplet;
4347 int ret;
4348
4349 /* serialize the open() file calling */
4350 if (!mutex_trylock(&adev->psp.mutex))
4351 return -EBUSY;
4352
4353 /*
4354 * make sure only one userpace process is alive for dumping so that
4355 * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
4356 * let's say the case where one process try opening the file while
4357 * another one has proceeded to read or release. In this way, eliminate
4358 * the use of mutex for read() or release() callback as well.
4359 */
4360 if (adev->psp.spirom_dump_trip) {
4361 mutex_unlock(&adev->psp.mutex);
4362 return -EBUSY;
4363 }
4364
4365 bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
4366 if (!bo_triplet) {
4367 mutex_unlock(&adev->psp.mutex);
4368 return -ENOMEM;
4369 }
4370
4371 ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
4372 AMDGPU_GPU_PAGE_SIZE,
4373 AMDGPU_GEM_DOMAIN_GTT,
4374 &bo_triplet->bo,
4375 &bo_triplet->mc_addr,
4376 &bo_triplet->cpu_addr);
4377 if (ret)
4378 goto rel_trip;
4379
4380 ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
4381 if (ret)
4382 goto rel_bo;
4383
4384 adev->psp.spirom_dump_trip = bo_triplet;
4385 mutex_unlock(&adev->psp.mutex);
4386 return 0;
4387 rel_bo:
4388 amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4389 &bo_triplet->cpu_addr);
4390 rel_trip:
4391 kfree(bo_triplet);
4392 mutex_unlock(&adev->psp.mutex);
4393 dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
4394 return ret;
4395 }
4396
psp_read_spirom_debugfs_read(struct file * filp,char __user * buf,size_t size,loff_t * pos)4397 static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
4398 loff_t *pos)
4399 {
4400 struct amdgpu_device *adev = filp->f_inode->i_private;
4401 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4402
4403 if (!bo_triplet)
4404 return -EINVAL;
4405
4406 return simple_read_from_buffer(buf,
4407 size,
4408 pos, bo_triplet->cpu_addr,
4409 AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4410 }
4411
psp_read_spirom_debugfs_release(struct inode * inode,struct file * filp)4412 static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
4413 {
4414 struct amdgpu_device *adev = filp->f_inode->i_private;
4415 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4416
4417 if (bo_triplet) {
4418 amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
4419 &bo_triplet->cpu_addr);
4420 kfree(bo_triplet);
4421 }
4422
4423 adev->psp.spirom_dump_trip = NULL;
4424 return 0;
4425 }
4426
4427 static const struct file_operations psp_dump_spirom_debugfs_ops = {
4428 .owner = THIS_MODULE,
4429 .open = psp_read_spirom_debugfs_open,
4430 .read = psp_read_spirom_debugfs_read,
4431 .release = psp_read_spirom_debugfs_release,
4432 .llseek = default_llseek,
4433 };
4434 #endif
4435
amdgpu_psp_debugfs_init(struct amdgpu_device * adev)4436 void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
4437 {
4438 #if defined(CONFIG_DEBUG_FS)
4439 struct drm_minor *minor = adev_to_drm(adev)->primary;
4440
4441 debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
4442 adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4443 #endif
4444 }
4445
4446 const struct amd_ip_funcs psp_ip_funcs = {
4447 .name = "psp",
4448 .early_init = psp_early_init,
4449 .sw_init = psp_sw_init,
4450 .sw_fini = psp_sw_fini,
4451 .hw_init = psp_hw_init,
4452 .hw_fini = psp_hw_fini,
4453 .suspend = psp_suspend,
4454 .resume = psp_resume,
4455 .set_clockgating_state = psp_set_clockgating_state,
4456 .set_powergating_state = psp_set_powergating_state,
4457 };
4458
4459 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4460 .type = AMD_IP_BLOCK_TYPE_PSP,
4461 .major = 3,
4462 .minor = 1,
4463 .rev = 0,
4464 .funcs = &psp_ip_funcs,
4465 };
4466
4467 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4468 .type = AMD_IP_BLOCK_TYPE_PSP,
4469 .major = 10,
4470 .minor = 0,
4471 .rev = 0,
4472 .funcs = &psp_ip_funcs,
4473 };
4474
4475 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4476 .type = AMD_IP_BLOCK_TYPE_PSP,
4477 .major = 11,
4478 .minor = 0,
4479 .rev = 0,
4480 .funcs = &psp_ip_funcs,
4481 };
4482
4483 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4484 .type = AMD_IP_BLOCK_TYPE_PSP,
4485 .major = 11,
4486 .minor = 0,
4487 .rev = 8,
4488 .funcs = &psp_ip_funcs,
4489 };
4490
4491 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4492 .type = AMD_IP_BLOCK_TYPE_PSP,
4493 .major = 12,
4494 .minor = 0,
4495 .rev = 0,
4496 .funcs = &psp_ip_funcs,
4497 };
4498
4499 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4500 .type = AMD_IP_BLOCK_TYPE_PSP,
4501 .major = 13,
4502 .minor = 0,
4503 .rev = 0,
4504 .funcs = &psp_ip_funcs,
4505 };
4506
4507 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4508 .type = AMD_IP_BLOCK_TYPE_PSP,
4509 .major = 13,
4510 .minor = 0,
4511 .rev = 4,
4512 .funcs = &psp_ip_funcs,
4513 };
4514
4515 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4516 .type = AMD_IP_BLOCK_TYPE_PSP,
4517 .major = 14,
4518 .minor = 0,
4519 .rev = 0,
4520 .funcs = &psp_ip_funcs,
4521 };
4522