1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui
23 *
24 */
25
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42
43 #include "amdgpu_ras.h"
44 #include "amdgpu_securedisplay.h"
45 #include "amdgpu_atomfirmware.h"
46
47 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3)
48
49 static int psp_load_smu_fw(struct psp_context *psp);
50 static int psp_rap_terminate(struct psp_context *psp);
51 static int psp_securedisplay_terminate(struct psp_context *psp);
52
psp_ring_init(struct psp_context * psp,enum psp_ring_type ring_type)53 static int psp_ring_init(struct psp_context *psp,
54 enum psp_ring_type ring_type)
55 {
56 int ret = 0;
57 struct psp_ring *ring;
58 struct amdgpu_device *adev = psp->adev;
59
60 ring = &psp->km_ring;
61
62 ring->ring_type = ring_type;
63
64 /* allocate 4k Page of Local Frame Buffer memory for ring */
65 ring->ring_size = 0x1000;
66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 AMDGPU_GEM_DOMAIN_VRAM |
68 AMDGPU_GEM_DOMAIN_GTT,
69 &adev->firmware.rbuf,
70 &ring->ring_mem_mc_addr,
71 (void **)&ring->ring_mem);
72 if (ret) {
73 ring->ring_size = 0;
74 return ret;
75 }
76
77 return 0;
78 }
79
80 /*
81 * Due to DF Cstate management centralized to PMFW, the firmware
82 * loading sequence will be updated as below:
83 * - Load KDB
84 * - Load SYS_DRV
85 * - Load tOS
86 * - Load PMFW
87 * - Setup TMR
88 * - Load other non-psp fw
89 * - Load ASD
90 * - Load XGMI/RAS/HDCP/DTM TA if any
91 *
92 * This new sequence is required for
93 * - Arcturus and onwards
94 */
psp_check_pmfw_centralized_cstate_management(struct psp_context * psp)95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96 {
97 struct amdgpu_device *adev = psp->adev;
98
99 if (amdgpu_sriov_vf(adev)) {
100 psp->pmfw_centralized_cstate_management = false;
101 return;
102 }
103
104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 case IP_VERSION(11, 0, 0):
106 case IP_VERSION(11, 0, 4):
107 case IP_VERSION(11, 0, 5):
108 case IP_VERSION(11, 0, 7):
109 case IP_VERSION(11, 0, 9):
110 case IP_VERSION(11, 0, 11):
111 case IP_VERSION(11, 0, 12):
112 case IP_VERSION(11, 0, 13):
113 case IP_VERSION(13, 0, 0):
114 case IP_VERSION(13, 0, 2):
115 case IP_VERSION(13, 0, 7):
116 psp->pmfw_centralized_cstate_management = true;
117 break;
118 default:
119 psp->pmfw_centralized_cstate_management = false;
120 break;
121 }
122 }
123
psp_init_sriov_microcode(struct psp_context * psp)124 static int psp_init_sriov_microcode(struct psp_context *psp)
125 {
126 struct amdgpu_device *adev = psp->adev;
127 char ucode_prefix[30];
128 int ret = 0;
129
130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131
132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 case IP_VERSION(9, 0, 0):
134 case IP_VERSION(11, 0, 7):
135 case IP_VERSION(11, 0, 9):
136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 ret = psp_init_cap_microcode(psp, ucode_prefix);
138 break;
139 case IP_VERSION(13, 0, 2):
140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 ret = psp_init_cap_microcode(psp, ucode_prefix);
142 ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 break;
144 case IP_VERSION(13, 0, 0):
145 adev->virt.autoload_ucode_id = 0;
146 break;
147 case IP_VERSION(13, 0, 6):
148 case IP_VERSION(13, 0, 14):
149 ret = psp_init_cap_microcode(psp, ucode_prefix);
150 ret &= psp_init_ta_microcode(psp, ucode_prefix);
151 break;
152 case IP_VERSION(13, 0, 10):
153 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
154 ret = psp_init_cap_microcode(psp, ucode_prefix);
155 break;
156 default:
157 return -EINVAL;
158 }
159 return ret;
160 }
161
psp_early_init(void * handle)162 static int psp_early_init(void *handle)
163 {
164 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
165 struct psp_context *psp = &adev->psp;
166
167 psp->autoload_supported = true;
168 psp->boot_time_tmr = true;
169
170 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
171 case IP_VERSION(9, 0, 0):
172 psp_v3_1_set_psp_funcs(psp);
173 psp->autoload_supported = false;
174 psp->boot_time_tmr = false;
175 break;
176 case IP_VERSION(10, 0, 0):
177 case IP_VERSION(10, 0, 1):
178 psp_v10_0_set_psp_funcs(psp);
179 psp->autoload_supported = false;
180 psp->boot_time_tmr = false;
181 break;
182 case IP_VERSION(11, 0, 2):
183 case IP_VERSION(11, 0, 4):
184 psp_v11_0_set_psp_funcs(psp);
185 psp->autoload_supported = false;
186 psp->boot_time_tmr = false;
187 break;
188 case IP_VERSION(11, 0, 0):
189 case IP_VERSION(11, 0, 7):
190 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
191 fallthrough;
192 case IP_VERSION(11, 0, 5):
193 case IP_VERSION(11, 0, 9):
194 case IP_VERSION(11, 0, 11):
195 case IP_VERSION(11, 5, 0):
196 case IP_VERSION(11, 0, 12):
197 case IP_VERSION(11, 0, 13):
198 psp_v11_0_set_psp_funcs(psp);
199 psp->boot_time_tmr = false;
200 break;
201 case IP_VERSION(11, 0, 3):
202 case IP_VERSION(12, 0, 1):
203 psp_v12_0_set_psp_funcs(psp);
204 psp->autoload_supported = false;
205 psp->boot_time_tmr = false;
206 break;
207 case IP_VERSION(13, 0, 2):
208 psp->boot_time_tmr = false;
209 fallthrough;
210 case IP_VERSION(13, 0, 6):
211 case IP_VERSION(13, 0, 14):
212 psp_v13_0_set_psp_funcs(psp);
213 psp->autoload_supported = false;
214 break;
215 case IP_VERSION(13, 0, 1):
216 case IP_VERSION(13, 0, 3):
217 case IP_VERSION(13, 0, 5):
218 case IP_VERSION(13, 0, 8):
219 case IP_VERSION(13, 0, 11):
220 case IP_VERSION(14, 0, 0):
221 case IP_VERSION(14, 0, 1):
222 case IP_VERSION(14, 0, 4):
223 psp_v13_0_set_psp_funcs(psp);
224 psp->boot_time_tmr = false;
225 break;
226 case IP_VERSION(11, 0, 8):
227 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
228 psp_v11_0_8_set_psp_funcs(psp);
229 }
230 psp->autoload_supported = false;
231 psp->boot_time_tmr = false;
232 break;
233 case IP_VERSION(13, 0, 0):
234 case IP_VERSION(13, 0, 7):
235 case IP_VERSION(13, 0, 10):
236 psp_v13_0_set_psp_funcs(psp);
237 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
238 psp->boot_time_tmr = false;
239 break;
240 case IP_VERSION(13, 0, 4):
241 psp_v13_0_4_set_psp_funcs(psp);
242 psp->boot_time_tmr = false;
243 break;
244 case IP_VERSION(14, 0, 2):
245 case IP_VERSION(14, 0, 3):
246 psp_v14_0_set_psp_funcs(psp);
247 break;
248 default:
249 return -EINVAL;
250 }
251
252 psp->adev = adev;
253
254 adev->psp_timeout = 20000;
255
256 psp_check_pmfw_centralized_cstate_management(psp);
257
258 if (amdgpu_sriov_vf(adev))
259 return psp_init_sriov_microcode(psp);
260 else
261 return psp_init_microcode(psp);
262 }
263
psp_ta_free_shared_buf(struct ta_mem_context * mem_ctx)264 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
265 {
266 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
267 &mem_ctx->shared_buf);
268 mem_ctx->shared_bo = NULL;
269 }
270
psp_free_shared_bufs(struct psp_context * psp)271 static void psp_free_shared_bufs(struct psp_context *psp)
272 {
273 void *tmr_buf;
274 void **pptr;
275
276 /* free TMR memory buffer */
277 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
278 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
279 psp->tmr_bo = NULL;
280
281 /* free xgmi shared memory */
282 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
283
284 /* free ras shared memory */
285 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
286
287 /* free hdcp shared memory */
288 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
289
290 /* free dtm shared memory */
291 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
292
293 /* free rap shared memory */
294 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
295
296 /* free securedisplay shared memory */
297 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
298
299
300 }
301
psp_memory_training_fini(struct psp_context * psp)302 static void psp_memory_training_fini(struct psp_context *psp)
303 {
304 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
305
306 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
307 kfree(ctx->sys_cache);
308 ctx->sys_cache = NULL;
309 }
310
psp_memory_training_init(struct psp_context * psp)311 static int psp_memory_training_init(struct psp_context *psp)
312 {
313 int ret;
314 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
315
316 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
317 dev_dbg(psp->adev->dev, "memory training is not supported!\n");
318 return 0;
319 }
320
321 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
322 if (ctx->sys_cache == NULL) {
323 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
324 ret = -ENOMEM;
325 goto Err_out;
326 }
327
328 dev_dbg(psp->adev->dev,
329 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
330 ctx->train_data_size,
331 ctx->p2c_train_data_offset,
332 ctx->c2p_train_data_offset);
333 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
334 return 0;
335
336 Err_out:
337 psp_memory_training_fini(psp);
338 return ret;
339 }
340
341 /*
342 * Helper funciton to query psp runtime database entry
343 *
344 * @adev: amdgpu_device pointer
345 * @entry_type: the type of psp runtime database entry
346 * @db_entry: runtime database entry pointer
347 *
348 * Return false if runtime database doesn't exit or entry is invalid
349 * or true if the specific database entry is found, and copy to @db_entry
350 */
psp_get_runtime_db_entry(struct amdgpu_device * adev,enum psp_runtime_entry_type entry_type,void * db_entry)351 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
352 enum psp_runtime_entry_type entry_type,
353 void *db_entry)
354 {
355 uint64_t db_header_pos, db_dir_pos;
356 struct psp_runtime_data_header db_header = {0};
357 struct psp_runtime_data_directory db_dir = {0};
358 bool ret = false;
359 int i;
360
361 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
362 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
363 return false;
364
365 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
366 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
367
368 /* read runtime db header from vram */
369 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
370 sizeof(struct psp_runtime_data_header), false);
371
372 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
373 /* runtime db doesn't exist, exit */
374 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
375 return false;
376 }
377
378 /* read runtime database entry from vram */
379 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
380 sizeof(struct psp_runtime_data_directory), false);
381
382 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
383 /* invalid db entry count, exit */
384 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
385 return false;
386 }
387
388 /* look up for requested entry type */
389 for (i = 0; i < db_dir.entry_count && !ret; i++) {
390 if (db_dir.entry_list[i].entry_type == entry_type) {
391 switch (entry_type) {
392 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
393 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
394 /* invalid db entry size */
395 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
396 return false;
397 }
398 /* read runtime database entry */
399 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
400 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
401 ret = true;
402 break;
403 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
404 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
405 /* invalid db entry size */
406 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
407 return false;
408 }
409 /* read runtime database entry */
410 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
411 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
412 ret = true;
413 break;
414 default:
415 ret = false;
416 break;
417 }
418 }
419 }
420
421 return ret;
422 }
423
psp_sw_init(void * handle)424 static int psp_sw_init(void *handle)
425 {
426 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
427 struct psp_context *psp = &adev->psp;
428 int ret;
429 struct psp_runtime_boot_cfg_entry boot_cfg_entry;
430 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
431 struct psp_runtime_scpm_entry scpm_entry;
432
433 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
434 if (!psp->cmd) {
435 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
436 ret = -ENOMEM;
437 }
438
439 adev->psp.xgmi_context.supports_extended_data =
440 !adev->gmc.xgmi.connected_to_cpu &&
441 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
442
443 memset(&scpm_entry, 0, sizeof(scpm_entry));
444 if ((psp_get_runtime_db_entry(adev,
445 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
446 &scpm_entry)) &&
447 (scpm_entry.scpm_status != SCPM_DISABLE)) {
448 adev->scpm_enabled = true;
449 adev->scpm_status = scpm_entry.scpm_status;
450 } else {
451 adev->scpm_enabled = false;
452 adev->scpm_status = SCPM_DISABLE;
453 }
454
455 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
456
457 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
458 if (psp_get_runtime_db_entry(adev,
459 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
460 &boot_cfg_entry)) {
461 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
462 if ((psp->boot_cfg_bitmask) &
463 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
464 /* If psp runtime database exists, then
465 * only enable two stage memory training
466 * when TWO_STAGE_DRAM_TRAINING bit is set
467 * in runtime database
468 */
469 mem_training_ctx->enable_mem_training = true;
470 }
471
472 } else {
473 /* If psp runtime database doesn't exist or is
474 * invalid, force enable two stage memory training
475 */
476 mem_training_ctx->enable_mem_training = true;
477 }
478
479 if (mem_training_ctx->enable_mem_training) {
480 ret = psp_memory_training_init(psp);
481 if (ret) {
482 dev_err(adev->dev, "Failed to initialize memory training!\n");
483 return ret;
484 }
485
486 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
487 if (ret) {
488 dev_err(adev->dev, "Failed to process memory training!\n");
489 return ret;
490 }
491 }
492
493 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
494 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
495 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
496 &psp->fw_pri_bo,
497 &psp->fw_pri_mc_addr,
498 &psp->fw_pri_buf);
499 if (ret)
500 return ret;
501
502 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
503 AMDGPU_GEM_DOMAIN_VRAM |
504 AMDGPU_GEM_DOMAIN_GTT,
505 &psp->fence_buf_bo,
506 &psp->fence_buf_mc_addr,
507 &psp->fence_buf);
508 if (ret)
509 goto failed1;
510
511 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
512 AMDGPU_GEM_DOMAIN_VRAM |
513 AMDGPU_GEM_DOMAIN_GTT,
514 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
515 (void **)&psp->cmd_buf_mem);
516 if (ret)
517 goto failed2;
518
519 return 0;
520
521 failed2:
522 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
523 &psp->fence_buf_mc_addr, &psp->fence_buf);
524 failed1:
525 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
526 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
527 return ret;
528 }
529
psp_sw_fini(void * handle)530 static int psp_sw_fini(void *handle)
531 {
532 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
533 struct psp_context *psp = &adev->psp;
534 struct psp_gfx_cmd_resp *cmd = psp->cmd;
535
536 psp_memory_training_fini(psp);
537
538 amdgpu_ucode_release(&psp->sos_fw);
539 amdgpu_ucode_release(&psp->asd_fw);
540 amdgpu_ucode_release(&psp->ta_fw);
541 amdgpu_ucode_release(&psp->cap_fw);
542 amdgpu_ucode_release(&psp->toc_fw);
543
544 kfree(cmd);
545 cmd = NULL;
546
547 psp_free_shared_bufs(psp);
548
549 if (psp->km_ring.ring_mem)
550 amdgpu_bo_free_kernel(&adev->firmware.rbuf,
551 &psp->km_ring.ring_mem_mc_addr,
552 (void **)&psp->km_ring.ring_mem);
553
554 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
555 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
556 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
557 &psp->fence_buf_mc_addr, &psp->fence_buf);
558 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
559 (void **)&psp->cmd_buf_mem);
560
561 return 0;
562 }
563
psp_wait_for(struct psp_context * psp,uint32_t reg_index,uint32_t reg_val,uint32_t mask,bool check_changed)564 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
565 uint32_t reg_val, uint32_t mask, bool check_changed)
566 {
567 uint32_t val;
568 int i;
569 struct amdgpu_device *adev = psp->adev;
570
571 if (psp->adev->no_hw_access)
572 return 0;
573
574 for (i = 0; i < adev->usec_timeout; i++) {
575 val = RREG32(reg_index);
576 if (check_changed) {
577 if (val != reg_val)
578 return 0;
579 } else {
580 if ((val & mask) == reg_val)
581 return 0;
582 }
583 udelay(1);
584 }
585
586 return -ETIME;
587 }
588
psp_wait_for_spirom_update(struct psp_context * psp,uint32_t reg_index,uint32_t reg_val,uint32_t mask,uint32_t msec_timeout)589 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
590 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
591 {
592 uint32_t val;
593 int i;
594 struct amdgpu_device *adev = psp->adev;
595
596 if (psp->adev->no_hw_access)
597 return 0;
598
599 for (i = 0; i < msec_timeout; i++) {
600 val = RREG32(reg_index);
601 if ((val & mask) == reg_val)
602 return 0;
603 msleep(1);
604 }
605
606 return -ETIME;
607 }
608
psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)609 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
610 {
611 switch (cmd_id) {
612 case GFX_CMD_ID_LOAD_TA:
613 return "LOAD_TA";
614 case GFX_CMD_ID_UNLOAD_TA:
615 return "UNLOAD_TA";
616 case GFX_CMD_ID_INVOKE_CMD:
617 return "INVOKE_CMD";
618 case GFX_CMD_ID_LOAD_ASD:
619 return "LOAD_ASD";
620 case GFX_CMD_ID_SETUP_TMR:
621 return "SETUP_TMR";
622 case GFX_CMD_ID_LOAD_IP_FW:
623 return "LOAD_IP_FW";
624 case GFX_CMD_ID_DESTROY_TMR:
625 return "DESTROY_TMR";
626 case GFX_CMD_ID_SAVE_RESTORE:
627 return "SAVE_RESTORE_IP_FW";
628 case GFX_CMD_ID_SETUP_VMR:
629 return "SETUP_VMR";
630 case GFX_CMD_ID_DESTROY_VMR:
631 return "DESTROY_VMR";
632 case GFX_CMD_ID_PROG_REG:
633 return "PROG_REG";
634 case GFX_CMD_ID_GET_FW_ATTESTATION:
635 return "GET_FW_ATTESTATION";
636 case GFX_CMD_ID_LOAD_TOC:
637 return "ID_LOAD_TOC";
638 case GFX_CMD_ID_AUTOLOAD_RLC:
639 return "AUTOLOAD_RLC";
640 case GFX_CMD_ID_BOOT_CFG:
641 return "BOOT_CFG";
642 default:
643 return "UNKNOWN CMD";
644 }
645 }
646
psp_err_warn(struct psp_context * psp)647 static bool psp_err_warn(struct psp_context *psp)
648 {
649 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
650
651 /* This response indicates reg list is already loaded */
652 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
653 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
654 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
655 cmd->resp.status == TEE_ERROR_CANCEL)
656 return false;
657
658 return true;
659 }
660
661 static int
psp_cmd_submit_buf(struct psp_context * psp,struct amdgpu_firmware_info * ucode,struct psp_gfx_cmd_resp * cmd,uint64_t fence_mc_addr)662 psp_cmd_submit_buf(struct psp_context *psp,
663 struct amdgpu_firmware_info *ucode,
664 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
665 {
666 int ret;
667 int index;
668 int timeout = psp->adev->psp_timeout;
669 bool ras_intr = false;
670 bool skip_unsupport = false;
671
672 if (psp->adev->no_hw_access)
673 return 0;
674
675 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
676
677 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
678
679 index = atomic_inc_return(&psp->fence_value);
680 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
681 if (ret) {
682 atomic_dec(&psp->fence_value);
683 goto exit;
684 }
685
686 amdgpu_device_invalidate_hdp(psp->adev, NULL);
687 while (*((unsigned int *)psp->fence_buf) != index) {
688 if (--timeout == 0)
689 break;
690 /*
691 * Shouldn't wait for timeout when err_event_athub occurs,
692 * because gpu reset thread triggered and lock resource should
693 * be released for psp resume sequence.
694 */
695 ras_intr = amdgpu_ras_intr_triggered();
696 if (ras_intr)
697 break;
698 usleep_range(10, 100);
699 amdgpu_device_invalidate_hdp(psp->adev, NULL);
700 }
701
702 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
703 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
704 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
705
706 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
707
708 /* In some cases, psp response status is not 0 even there is no
709 * problem while the command is submitted. Some version of PSP FW
710 * doesn't write 0 to that field.
711 * So here we would like to only print a warning instead of an error
712 * during psp initialization to avoid breaking hw_init and it doesn't
713 * return -EINVAL.
714 */
715 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
716 if (ucode)
717 dev_warn(psp->adev->dev,
718 "failed to load ucode %s(0x%X) ",
719 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
720 if (psp_err_warn(psp))
721 dev_warn(
722 psp->adev->dev,
723 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
724 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
725 psp->cmd_buf_mem->cmd_id,
726 psp->cmd_buf_mem->resp.status);
727 /* If any firmware (including CAP) load fails under SRIOV, it should
728 * return failure to stop the VF from initializing.
729 * Also return failure in case of timeout
730 */
731 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
732 ret = -EINVAL;
733 goto exit;
734 }
735 }
736
737 if (ucode) {
738 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
739 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
740 }
741
742 exit:
743 return ret;
744 }
745
acquire_psp_cmd_buf(struct psp_context * psp)746 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
747 {
748 struct psp_gfx_cmd_resp *cmd = psp->cmd;
749
750 mutex_lock(&psp->mutex);
751
752 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
753
754 return cmd;
755 }
756
release_psp_cmd_buf(struct psp_context * psp)757 static void release_psp_cmd_buf(struct psp_context *psp)
758 {
759 mutex_unlock(&psp->mutex);
760 }
761
psp_prep_tmr_cmd_buf(struct psp_context * psp,struct psp_gfx_cmd_resp * cmd,uint64_t tmr_mc,struct amdgpu_bo * tmr_bo)762 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
763 struct psp_gfx_cmd_resp *cmd,
764 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
765 {
766 struct amdgpu_device *adev = psp->adev;
767 uint32_t size = 0;
768 uint64_t tmr_pa = 0;
769
770 if (tmr_bo) {
771 size = amdgpu_bo_size(tmr_bo);
772 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
773 }
774
775 if (amdgpu_sriov_vf(psp->adev))
776 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
777 else
778 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
779 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
780 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
781 cmd->cmd.cmd_setup_tmr.buf_size = size;
782 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
783 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
784 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
785 }
786
psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint64_t pri_buf_mc,uint32_t size)787 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
788 uint64_t pri_buf_mc, uint32_t size)
789 {
790 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
791 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
792 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
793 cmd->cmd.cmd_load_toc.toc_size = size;
794 }
795
796 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
psp_load_toc(struct psp_context * psp,uint32_t * tmr_size)797 static int psp_load_toc(struct psp_context *psp,
798 uint32_t *tmr_size)
799 {
800 int ret;
801 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
802
803 /* Copy toc to psp firmware private buffer */
804 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
805
806 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
807
808 ret = psp_cmd_submit_buf(psp, NULL, cmd,
809 psp->fence_buf_mc_addr);
810 if (!ret)
811 *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
812
813 release_psp_cmd_buf(psp);
814
815 return ret;
816 }
817
818 /* Set up Trusted Memory Region */
psp_tmr_init(struct psp_context * psp)819 static int psp_tmr_init(struct psp_context *psp)
820 {
821 int ret = 0;
822 int tmr_size;
823 void *tmr_buf;
824 void **pptr;
825
826 /*
827 * According to HW engineer, they prefer the TMR address be "naturally
828 * aligned" , e.g. the start address be an integer divide of TMR size.
829 *
830 * Note: this memory need be reserved till the driver
831 * uninitializes.
832 */
833 tmr_size = PSP_TMR_SIZE(psp->adev);
834
835 /* For ASICs support RLC autoload, psp will parse the toc
836 * and calculate the total size of TMR needed
837 */
838 if (!amdgpu_sriov_vf(psp->adev) &&
839 psp->toc.start_addr &&
840 psp->toc.size_bytes &&
841 psp->fw_pri_buf) {
842 ret = psp_load_toc(psp, &tmr_size);
843 if (ret) {
844 dev_err(psp->adev->dev, "Failed to load toc\n");
845 return ret;
846 }
847 }
848
849 if (!psp->tmr_bo && !psp->boot_time_tmr) {
850 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
851 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
852 PSP_TMR_ALIGNMENT,
853 AMDGPU_HAS_VRAM(psp->adev) ?
854 AMDGPU_GEM_DOMAIN_VRAM :
855 AMDGPU_GEM_DOMAIN_GTT,
856 &psp->tmr_bo, &psp->tmr_mc_addr,
857 pptr);
858 }
859
860 return ret;
861 }
862
psp_skip_tmr(struct psp_context * psp)863 static bool psp_skip_tmr(struct psp_context *psp)
864 {
865 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
866 case IP_VERSION(11, 0, 9):
867 case IP_VERSION(11, 0, 7):
868 case IP_VERSION(13, 0, 2):
869 case IP_VERSION(13, 0, 6):
870 case IP_VERSION(13, 0, 10):
871 case IP_VERSION(13, 0, 14):
872 return true;
873 default:
874 return false;
875 }
876 }
877
psp_tmr_load(struct psp_context * psp)878 static int psp_tmr_load(struct psp_context *psp)
879 {
880 int ret;
881 struct psp_gfx_cmd_resp *cmd;
882
883 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
884 * Already set up by host driver.
885 */
886 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
887 return 0;
888
889 cmd = acquire_psp_cmd_buf(psp);
890
891 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
892 if (psp->tmr_bo)
893 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
894 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
895
896 ret = psp_cmd_submit_buf(psp, NULL, cmd,
897 psp->fence_buf_mc_addr);
898
899 release_psp_cmd_buf(psp);
900
901 return ret;
902 }
903
psp_prep_tmr_unload_cmd_buf(struct psp_context * psp,struct psp_gfx_cmd_resp * cmd)904 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
905 struct psp_gfx_cmd_resp *cmd)
906 {
907 if (amdgpu_sriov_vf(psp->adev))
908 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
909 else
910 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
911 }
912
psp_tmr_unload(struct psp_context * psp)913 static int psp_tmr_unload(struct psp_context *psp)
914 {
915 int ret;
916 struct psp_gfx_cmd_resp *cmd;
917
918 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
919 * as TMR is not loaded at all
920 */
921 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
922 return 0;
923
924 cmd = acquire_psp_cmd_buf(psp);
925
926 psp_prep_tmr_unload_cmd_buf(psp, cmd);
927 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
928
929 ret = psp_cmd_submit_buf(psp, NULL, cmd,
930 psp->fence_buf_mc_addr);
931
932 release_psp_cmd_buf(psp);
933
934 return ret;
935 }
936
psp_tmr_terminate(struct psp_context * psp)937 static int psp_tmr_terminate(struct psp_context *psp)
938 {
939 return psp_tmr_unload(psp);
940 }
941
psp_get_fw_attestation_records_addr(struct psp_context * psp,uint64_t * output_ptr)942 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
943 uint64_t *output_ptr)
944 {
945 int ret;
946 struct psp_gfx_cmd_resp *cmd;
947
948 if (!output_ptr)
949 return -EINVAL;
950
951 if (amdgpu_sriov_vf(psp->adev))
952 return 0;
953
954 cmd = acquire_psp_cmd_buf(psp);
955
956 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
957
958 ret = psp_cmd_submit_buf(psp, NULL, cmd,
959 psp->fence_buf_mc_addr);
960
961 if (!ret) {
962 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
963 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
964 }
965
966 release_psp_cmd_buf(psp);
967
968 return ret;
969 }
970
psp_boot_config_get(struct amdgpu_device * adev,uint32_t * boot_cfg)971 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
972 {
973 struct psp_context *psp = &adev->psp;
974 struct psp_gfx_cmd_resp *cmd;
975 int ret;
976
977 if (amdgpu_sriov_vf(adev))
978 return 0;
979
980 cmd = acquire_psp_cmd_buf(psp);
981
982 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
983 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
984
985 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
986 if (!ret) {
987 *boot_cfg =
988 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
989 }
990
991 release_psp_cmd_buf(psp);
992
993 return ret;
994 }
995
psp_boot_config_set(struct amdgpu_device * adev,uint32_t boot_cfg)996 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
997 {
998 int ret;
999 struct psp_context *psp = &adev->psp;
1000 struct psp_gfx_cmd_resp *cmd;
1001
1002 if (amdgpu_sriov_vf(adev))
1003 return 0;
1004
1005 cmd = acquire_psp_cmd_buf(psp);
1006
1007 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1008 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1009 cmd->cmd.boot_cfg.boot_config = boot_cfg;
1010 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1011
1012 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1013
1014 release_psp_cmd_buf(psp);
1015
1016 return ret;
1017 }
1018
psp_rl_load(struct amdgpu_device * adev)1019 static int psp_rl_load(struct amdgpu_device *adev)
1020 {
1021 int ret;
1022 struct psp_context *psp = &adev->psp;
1023 struct psp_gfx_cmd_resp *cmd;
1024
1025 if (!is_psp_fw_valid(psp->rl))
1026 return 0;
1027
1028 cmd = acquire_psp_cmd_buf(psp);
1029
1030 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1031 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1032
1033 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1034 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1035 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1036 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1037 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1038
1039 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1040
1041 release_psp_cmd_buf(psp);
1042
1043 return ret;
1044 }
1045
psp_spatial_partition(struct psp_context * psp,int mode)1046 int psp_spatial_partition(struct psp_context *psp, int mode)
1047 {
1048 struct psp_gfx_cmd_resp *cmd;
1049 int ret;
1050
1051 if (amdgpu_sriov_vf(psp->adev))
1052 return 0;
1053
1054 cmd = acquire_psp_cmd_buf(psp);
1055
1056 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1057 cmd->cmd.cmd_spatial_part.mode = mode;
1058
1059 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1060 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1061
1062 release_psp_cmd_buf(psp);
1063
1064 return ret;
1065 }
1066
psp_asd_initialize(struct psp_context * psp)1067 static int psp_asd_initialize(struct psp_context *psp)
1068 {
1069 int ret;
1070
1071 /* If PSP version doesn't match ASD version, asd loading will be failed.
1072 * add workaround to bypass it for sriov now.
1073 * TODO: add version check to make it common
1074 */
1075 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1076 return 0;
1077
1078 /* bypass asd if display hardware is not available */
1079 if (!amdgpu_device_has_display_hardware(psp->adev) &&
1080 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1081 return 0;
1082
1083 psp->asd_context.mem_context.shared_mc_addr = 0;
1084 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1085 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
1086
1087 ret = psp_ta_load(psp, &psp->asd_context);
1088 if (!ret)
1089 psp->asd_context.initialized = true;
1090
1091 return ret;
1092 }
1093
psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t session_id)1094 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1095 uint32_t session_id)
1096 {
1097 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1098 cmd->cmd.cmd_unload_ta.session_id = session_id;
1099 }
1100
psp_ta_unload(struct psp_context * psp,struct ta_context * context)1101 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1102 {
1103 int ret;
1104 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1105
1106 psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1107
1108 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1109
1110 context->resp_status = cmd->resp.status;
1111
1112 release_psp_cmd_buf(psp);
1113
1114 return ret;
1115 }
1116
psp_asd_terminate(struct psp_context * psp)1117 static int psp_asd_terminate(struct psp_context *psp)
1118 {
1119 int ret;
1120
1121 if (amdgpu_sriov_vf(psp->adev))
1122 return 0;
1123
1124 if (!psp->asd_context.initialized)
1125 return 0;
1126
1127 ret = psp_ta_unload(psp, &psp->asd_context);
1128 if (!ret)
1129 psp->asd_context.initialized = false;
1130
1131 return ret;
1132 }
1133
psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t id,uint32_t value)1134 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1135 uint32_t id, uint32_t value)
1136 {
1137 cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1138 cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1139 cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1140 }
1141
psp_reg_program(struct psp_context * psp,enum psp_reg_prog_id reg,uint32_t value)1142 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1143 uint32_t value)
1144 {
1145 struct psp_gfx_cmd_resp *cmd;
1146 int ret = 0;
1147
1148 if (reg >= PSP_REG_LAST)
1149 return -EINVAL;
1150
1151 cmd = acquire_psp_cmd_buf(psp);
1152
1153 psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1154 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1155 if (ret)
1156 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1157
1158 release_psp_cmd_buf(psp);
1159
1160 return ret;
1161 }
1162
psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint64_t ta_bin_mc,struct ta_context * context)1163 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1164 uint64_t ta_bin_mc,
1165 struct ta_context *context)
1166 {
1167 cmd->cmd_id = context->ta_load_type;
1168 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
1169 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
1170 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
1171
1172 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1173 lower_32_bits(context->mem_context.shared_mc_addr);
1174 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1175 upper_32_bits(context->mem_context.shared_mc_addr);
1176 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1177 }
1178
psp_ta_init_shared_buf(struct psp_context * psp,struct ta_mem_context * mem_ctx)1179 int psp_ta_init_shared_buf(struct psp_context *psp,
1180 struct ta_mem_context *mem_ctx)
1181 {
1182 /*
1183 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1184 * physical) for ta to host memory
1185 */
1186 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1187 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1188 AMDGPU_GEM_DOMAIN_GTT,
1189 &mem_ctx->shared_bo,
1190 &mem_ctx->shared_mc_addr,
1191 &mem_ctx->shared_buf);
1192 }
1193
psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t ta_cmd_id,uint32_t session_id)1194 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1195 uint32_t ta_cmd_id,
1196 uint32_t session_id)
1197 {
1198 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
1199 cmd->cmd.cmd_invoke_cmd.session_id = session_id;
1200 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
1201 }
1202
psp_ta_invoke(struct psp_context * psp,uint32_t ta_cmd_id,struct ta_context * context)1203 int psp_ta_invoke(struct psp_context *psp,
1204 uint32_t ta_cmd_id,
1205 struct ta_context *context)
1206 {
1207 int ret;
1208 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1209
1210 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1211
1212 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1213 psp->fence_buf_mc_addr);
1214
1215 context->resp_status = cmd->resp.status;
1216
1217 release_psp_cmd_buf(psp);
1218
1219 return ret;
1220 }
1221
psp_ta_load(struct psp_context * psp,struct ta_context * context)1222 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1223 {
1224 int ret;
1225 struct psp_gfx_cmd_resp *cmd;
1226
1227 cmd = acquire_psp_cmd_buf(psp);
1228
1229 psp_copy_fw(psp, context->bin_desc.start_addr,
1230 context->bin_desc.size_bytes);
1231
1232 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1233
1234 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1235 psp->fence_buf_mc_addr);
1236
1237 context->resp_status = cmd->resp.status;
1238
1239 if (!ret)
1240 context->session_id = cmd->resp.session_id;
1241
1242 release_psp_cmd_buf(psp);
1243
1244 return ret;
1245 }
1246
psp_xgmi_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1247 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1248 {
1249 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1250 }
1251
psp_xgmi_terminate(struct psp_context * psp)1252 int psp_xgmi_terminate(struct psp_context *psp)
1253 {
1254 int ret;
1255 struct amdgpu_device *adev = psp->adev;
1256
1257 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1258 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1259 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1260 adev->gmc.xgmi.connected_to_cpu))
1261 return 0;
1262
1263 if (!psp->xgmi_context.context.initialized)
1264 return 0;
1265
1266 ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1267
1268 psp->xgmi_context.context.initialized = false;
1269
1270 return ret;
1271 }
1272
psp_xgmi_initialize(struct psp_context * psp,bool set_extended_data,bool load_ta)1273 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1274 {
1275 struct ta_xgmi_shared_memory *xgmi_cmd;
1276 int ret;
1277
1278 if (!psp->ta_fw ||
1279 !psp->xgmi_context.context.bin_desc.size_bytes ||
1280 !psp->xgmi_context.context.bin_desc.start_addr)
1281 return -ENOENT;
1282
1283 if (!load_ta)
1284 goto invoke;
1285
1286 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1287 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1288
1289 if (!psp->xgmi_context.context.mem_context.shared_buf) {
1290 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1291 if (ret)
1292 return ret;
1293 }
1294
1295 /* Load XGMI TA */
1296 ret = psp_ta_load(psp, &psp->xgmi_context.context);
1297 if (!ret)
1298 psp->xgmi_context.context.initialized = true;
1299 else
1300 return ret;
1301
1302 invoke:
1303 /* Initialize XGMI session */
1304 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1305 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1306 xgmi_cmd->flag_extend_link_record = set_extended_data;
1307 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1308
1309 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1310 /* note down the capbility flag for XGMI TA */
1311 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1312
1313 return ret;
1314 }
1315
psp_xgmi_get_hive_id(struct psp_context * psp,uint64_t * hive_id)1316 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1317 {
1318 struct ta_xgmi_shared_memory *xgmi_cmd;
1319 int ret;
1320
1321 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1322 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1323
1324 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1325
1326 /* Invoke xgmi ta to get hive id */
1327 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1328 if (ret)
1329 return ret;
1330
1331 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1332
1333 return 0;
1334 }
1335
psp_xgmi_get_node_id(struct psp_context * psp,uint64_t * node_id)1336 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1337 {
1338 struct ta_xgmi_shared_memory *xgmi_cmd;
1339 int ret;
1340
1341 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1342 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1343
1344 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1345
1346 /* Invoke xgmi ta to get the node id */
1347 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1348 if (ret)
1349 return ret;
1350
1351 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1352
1353 return 0;
1354 }
1355
psp_xgmi_peer_link_info_supported(struct psp_context * psp)1356 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1357 {
1358 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1359 IP_VERSION(13, 0, 2) &&
1360 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1361 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1362 IP_VERSION(13, 0, 6);
1363 }
1364
1365 /*
1366 * Chips that support extended topology information require the driver to
1367 * reflect topology information in the opposite direction. This is
1368 * because the TA has already exceeded its link record limit and if the
1369 * TA holds bi-directional information, the driver would have to do
1370 * multiple fetches instead of just two.
1371 */
psp_xgmi_reflect_topology_info(struct psp_context * psp,struct psp_xgmi_node_info node_info)1372 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1373 struct psp_xgmi_node_info node_info)
1374 {
1375 struct amdgpu_device *mirror_adev;
1376 struct amdgpu_hive_info *hive;
1377 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1378 uint64_t dst_node_id = node_info.node_id;
1379 uint8_t dst_num_hops = node_info.num_hops;
1380 uint8_t dst_num_links = node_info.num_links;
1381
1382 hive = amdgpu_get_xgmi_hive(psp->adev);
1383 if (WARN_ON(!hive))
1384 return;
1385
1386 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1387 struct psp_xgmi_topology_info *mirror_top_info;
1388 int j;
1389
1390 if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1391 continue;
1392
1393 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1394 for (j = 0; j < mirror_top_info->num_nodes; j++) {
1395 if (mirror_top_info->nodes[j].node_id != src_node_id)
1396 continue;
1397
1398 mirror_top_info->nodes[j].num_hops = dst_num_hops;
1399 /*
1400 * prevent 0 num_links value re-reflection since reflection
1401 * criteria is based on num_hops (direct or indirect).
1402 *
1403 */
1404 if (dst_num_links)
1405 mirror_top_info->nodes[j].num_links = dst_num_links;
1406
1407 break;
1408 }
1409
1410 break;
1411 }
1412
1413 amdgpu_put_xgmi_hive(hive);
1414 }
1415
psp_xgmi_get_topology_info(struct psp_context * psp,int number_devices,struct psp_xgmi_topology_info * topology,bool get_extended_data)1416 int psp_xgmi_get_topology_info(struct psp_context *psp,
1417 int number_devices,
1418 struct psp_xgmi_topology_info *topology,
1419 bool get_extended_data)
1420 {
1421 struct ta_xgmi_shared_memory *xgmi_cmd;
1422 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1423 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1424 int i;
1425 int ret;
1426
1427 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1428 return -EINVAL;
1429
1430 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1431 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1432 xgmi_cmd->flag_extend_link_record = get_extended_data;
1433
1434 /* Fill in the shared memory with topology information as input */
1435 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1436 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1437 topology_info_input->num_nodes = number_devices;
1438
1439 for (i = 0; i < topology_info_input->num_nodes; i++) {
1440 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1441 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1442 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1443 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1444 }
1445
1446 /* Invoke xgmi ta to get the topology information */
1447 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1448 if (ret)
1449 return ret;
1450
1451 /* Read the output topology information from the shared memory */
1452 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1453 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1454 for (i = 0; i < topology->num_nodes; i++) {
1455 /* extended data will either be 0 or equal to non-extended data */
1456 if (topology_info_output->nodes[i].num_hops)
1457 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1458
1459 /* non-extended data gets everything here so no need to update */
1460 if (!get_extended_data) {
1461 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1462 topology->nodes[i].is_sharing_enabled =
1463 topology_info_output->nodes[i].is_sharing_enabled;
1464 topology->nodes[i].sdma_engine =
1465 topology_info_output->nodes[i].sdma_engine;
1466 }
1467
1468 }
1469
1470 /* Invoke xgmi ta again to get the link information */
1471 if (psp_xgmi_peer_link_info_supported(psp)) {
1472 struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1473 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1474 bool requires_reflection =
1475 (psp->xgmi_context.supports_extended_data &&
1476 get_extended_data) ||
1477 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1478 IP_VERSION(13, 0, 6) ||
1479 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1480 IP_VERSION(13, 0, 14);
1481 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1482 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1483
1484 /* popluate the shared output buffer rather than the cmd input buffer
1485 * with node_ids as the input for GET_PEER_LINKS command execution.
1486 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1487 * The same requirement for GET_EXTEND_PEER_LINKS command.
1488 */
1489 if (ta_port_num_support) {
1490 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1491
1492 for (i = 0; i < topology->num_nodes; i++)
1493 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1494
1495 link_extend_info_output->num_nodes = topology->num_nodes;
1496 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1497 } else {
1498 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1499
1500 for (i = 0; i < topology->num_nodes; i++)
1501 link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1502
1503 link_info_output->num_nodes = topology->num_nodes;
1504 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1505 }
1506
1507 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1508 if (ret)
1509 return ret;
1510
1511 for (i = 0; i < topology->num_nodes; i++) {
1512 uint8_t node_num_links = ta_port_num_support ?
1513 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1514 /* accumulate num_links on extended data */
1515 if (get_extended_data) {
1516 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1517 } else {
1518 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1519 topology->nodes[i].num_links : node_num_links;
1520 }
1521 /* popluate the connected port num info if supported and available */
1522 if (ta_port_num_support && topology->nodes[i].num_links) {
1523 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1524 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1525 }
1526
1527 /* reflect the topology information for bi-directionality */
1528 if (requires_reflection && topology->nodes[i].num_hops)
1529 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1530 }
1531 }
1532
1533 return 0;
1534 }
1535
psp_xgmi_set_topology_info(struct psp_context * psp,int number_devices,struct psp_xgmi_topology_info * topology)1536 int psp_xgmi_set_topology_info(struct psp_context *psp,
1537 int number_devices,
1538 struct psp_xgmi_topology_info *topology)
1539 {
1540 struct ta_xgmi_shared_memory *xgmi_cmd;
1541 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1542 int i;
1543
1544 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1545 return -EINVAL;
1546
1547 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1548 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1549
1550 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1551 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1552 topology_info_input->num_nodes = number_devices;
1553
1554 for (i = 0; i < topology_info_input->num_nodes; i++) {
1555 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1556 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1557 topology_info_input->nodes[i].is_sharing_enabled = 1;
1558 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1559 }
1560
1561 /* Invoke xgmi ta to set topology information */
1562 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1563 }
1564
1565 // ras begin
psp_ras_ta_check_status(struct psp_context * psp)1566 static void psp_ras_ta_check_status(struct psp_context *psp)
1567 {
1568 struct ta_ras_shared_memory *ras_cmd =
1569 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1570
1571 switch (ras_cmd->ras_status) {
1572 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1573 dev_warn(psp->adev->dev,
1574 "RAS WARNING: cmd failed due to unsupported ip\n");
1575 break;
1576 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1577 dev_warn(psp->adev->dev,
1578 "RAS WARNING: cmd failed due to unsupported error injection\n");
1579 break;
1580 case TA_RAS_STATUS__SUCCESS:
1581 break;
1582 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1583 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1584 dev_warn(psp->adev->dev,
1585 "RAS WARNING: Inject error to critical region is not allowed\n");
1586 break;
1587 default:
1588 dev_warn(psp->adev->dev,
1589 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1590 break;
1591 }
1592 }
1593
psp_ras_send_cmd(struct psp_context * psp,enum ras_command cmd_id,void * in,void * out)1594 static int psp_ras_send_cmd(struct psp_context *psp,
1595 enum ras_command cmd_id, void *in, void *out)
1596 {
1597 struct ta_ras_shared_memory *ras_cmd;
1598 uint32_t cmd = cmd_id;
1599 int ret = 0;
1600
1601 if (!in)
1602 return -EINVAL;
1603
1604 mutex_lock(&psp->ras_context.mutex);
1605 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1606 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1607
1608 switch (cmd) {
1609 case TA_RAS_COMMAND__ENABLE_FEATURES:
1610 case TA_RAS_COMMAND__DISABLE_FEATURES:
1611 memcpy(&ras_cmd->ras_in_message,
1612 in, sizeof(ras_cmd->ras_in_message));
1613 break;
1614 case TA_RAS_COMMAND__TRIGGER_ERROR:
1615 memcpy(&ras_cmd->ras_in_message.trigger_error,
1616 in, sizeof(ras_cmd->ras_in_message.trigger_error));
1617 break;
1618 case TA_RAS_COMMAND__QUERY_ADDRESS:
1619 memcpy(&ras_cmd->ras_in_message.address,
1620 in, sizeof(ras_cmd->ras_in_message.address));
1621 break;
1622 default:
1623 dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1624 ret = -EINVAL;
1625 goto err_out;
1626 }
1627
1628 ras_cmd->cmd_id = cmd;
1629 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1630
1631 switch (cmd) {
1632 case TA_RAS_COMMAND__TRIGGER_ERROR:
1633 if (!ret && out)
1634 memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1635 break;
1636 case TA_RAS_COMMAND__QUERY_ADDRESS:
1637 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1638 ret = -EINVAL;
1639 else if (out)
1640 memcpy(out,
1641 &ras_cmd->ras_out_message.address,
1642 sizeof(ras_cmd->ras_out_message.address));
1643 break;
1644 default:
1645 break;
1646 }
1647
1648 err_out:
1649 mutex_unlock(&psp->ras_context.mutex);
1650
1651 return ret;
1652 }
1653
psp_ras_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1654 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1655 {
1656 struct ta_ras_shared_memory *ras_cmd;
1657 int ret;
1658
1659 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1660
1661 /*
1662 * TODO: bypass the loading in sriov for now
1663 */
1664 if (amdgpu_sriov_vf(psp->adev))
1665 return 0;
1666
1667 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1668
1669 if (amdgpu_ras_intr_triggered())
1670 return ret;
1671
1672 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1673 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1674 return -EINVAL;
1675 }
1676
1677 if (!ret) {
1678 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1679 dev_warn(psp->adev->dev, "ECC switch disabled\n");
1680
1681 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1682 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1683 dev_warn(psp->adev->dev,
1684 "RAS internal register access blocked\n");
1685
1686 psp_ras_ta_check_status(psp);
1687 }
1688
1689 return ret;
1690 }
1691
psp_ras_enable_features(struct psp_context * psp,union ta_ras_cmd_input * info,bool enable)1692 int psp_ras_enable_features(struct psp_context *psp,
1693 union ta_ras_cmd_input *info, bool enable)
1694 {
1695 enum ras_command cmd_id;
1696 int ret;
1697
1698 if (!psp->ras_context.context.initialized || !info)
1699 return -EINVAL;
1700
1701 cmd_id = enable ?
1702 TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1703 ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
1704 if (ret)
1705 return -EINVAL;
1706
1707 return 0;
1708 }
1709
psp_ras_terminate(struct psp_context * psp)1710 int psp_ras_terminate(struct psp_context *psp)
1711 {
1712 int ret;
1713
1714 /*
1715 * TODO: bypass the terminate in sriov for now
1716 */
1717 if (amdgpu_sriov_vf(psp->adev))
1718 return 0;
1719
1720 if (!psp->ras_context.context.initialized)
1721 return 0;
1722
1723 ret = psp_ta_unload(psp, &psp->ras_context.context);
1724
1725 psp->ras_context.context.initialized = false;
1726
1727 mutex_destroy(&psp->ras_context.mutex);
1728
1729 return ret;
1730 }
1731
psp_ras_initialize(struct psp_context * psp)1732 int psp_ras_initialize(struct psp_context *psp)
1733 {
1734 int ret;
1735 uint32_t boot_cfg = 0xFF;
1736 struct amdgpu_device *adev = psp->adev;
1737 struct ta_ras_shared_memory *ras_cmd;
1738
1739 /*
1740 * TODO: bypass the initialize in sriov for now
1741 */
1742 if (amdgpu_sriov_vf(adev))
1743 return 0;
1744
1745 if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1746 !adev->psp.ras_context.context.bin_desc.start_addr) {
1747 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1748 return 0;
1749 }
1750
1751 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1752 /* query GECC enablement status from boot config
1753 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1754 */
1755 ret = psp_boot_config_get(adev, &boot_cfg);
1756 if (ret)
1757 dev_warn(adev->dev, "PSP get boot config failed\n");
1758
1759 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
1760 if (!boot_cfg) {
1761 dev_info(adev->dev, "GECC is disabled\n");
1762 } else {
1763 /* disable GECC in next boot cycle if ras is
1764 * disabled by module parameter amdgpu_ras_enable
1765 * and/or amdgpu_ras_mask, or boot_config_get call
1766 * is failed
1767 */
1768 ret = psp_boot_config_set(adev, 0);
1769 if (ret)
1770 dev_warn(adev->dev, "PSP set boot config failed\n");
1771 else
1772 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1773 }
1774 } else {
1775 if (boot_cfg == 1) {
1776 dev_info(adev->dev, "GECC is enabled\n");
1777 } else {
1778 /* enable GECC in next boot cycle if it is disabled
1779 * in boot config, or force enable GECC if failed to
1780 * get boot configuration
1781 */
1782 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1783 if (ret)
1784 dev_warn(adev->dev, "PSP set boot config failed\n");
1785 else
1786 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1787 }
1788 }
1789 }
1790
1791 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1792 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1793
1794 if (!psp->ras_context.context.mem_context.shared_buf) {
1795 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1796 if (ret)
1797 return ret;
1798 }
1799
1800 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1801 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1802
1803 if (amdgpu_ras_is_poison_mode_supported(adev))
1804 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1805 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1806 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1807 ras_cmd->ras_in_message.init_flags.xcc_mask =
1808 adev->gfx.xcc_mask;
1809 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1810
1811 ret = psp_ta_load(psp, &psp->ras_context.context);
1812
1813 if (!ret && !ras_cmd->ras_status) {
1814 psp->ras_context.context.initialized = true;
1815 mutex_init(&psp->ras_context.mutex);
1816 } else {
1817 if (ras_cmd->ras_status)
1818 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1819
1820 /* fail to load RAS TA */
1821 psp->ras_context.context.initialized = false;
1822 }
1823
1824 return ret;
1825 }
1826
psp_ras_trigger_error(struct psp_context * psp,struct ta_ras_trigger_error_input * info,uint32_t instance_mask)1827 int psp_ras_trigger_error(struct psp_context *psp,
1828 struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1829 {
1830 struct amdgpu_device *adev = psp->adev;
1831 int ret;
1832 uint32_t dev_mask;
1833 uint32_t ras_status = 0;
1834
1835 if (!psp->ras_context.context.initialized || !info)
1836 return -EINVAL;
1837
1838 switch (info->block_id) {
1839 case TA_RAS_BLOCK__GFX:
1840 dev_mask = GET_MASK(GC, instance_mask);
1841 break;
1842 case TA_RAS_BLOCK__SDMA:
1843 dev_mask = GET_MASK(SDMA0, instance_mask);
1844 break;
1845 case TA_RAS_BLOCK__VCN:
1846 case TA_RAS_BLOCK__JPEG:
1847 dev_mask = GET_MASK(VCN, instance_mask);
1848 break;
1849 default:
1850 dev_mask = instance_mask;
1851 break;
1852 }
1853
1854 /* reuse sub_block_index for backward compatibility */
1855 dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1856 dev_mask &= AMDGPU_RAS_INST_MASK;
1857 info->sub_block_index |= dev_mask;
1858
1859 ret = psp_ras_send_cmd(psp,
1860 TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
1861 if (ret)
1862 return -EINVAL;
1863
1864 /* If err_event_athub occurs error inject was successful, however
1865 * return status from TA is no long reliable
1866 */
1867 if (amdgpu_ras_intr_triggered())
1868 return 0;
1869
1870 if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1871 return -EACCES;
1872 else if (ras_status)
1873 return -EINVAL;
1874
1875 return 0;
1876 }
1877
psp_ras_query_address(struct psp_context * psp,struct ta_ras_query_address_input * addr_in,struct ta_ras_query_address_output * addr_out)1878 int psp_ras_query_address(struct psp_context *psp,
1879 struct ta_ras_query_address_input *addr_in,
1880 struct ta_ras_query_address_output *addr_out)
1881 {
1882 int ret;
1883
1884 if (!psp->ras_context.context.initialized ||
1885 !addr_in || !addr_out)
1886 return -EINVAL;
1887
1888 ret = psp_ras_send_cmd(psp,
1889 TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
1890
1891 return ret;
1892 }
1893 // ras end
1894
1895 // HDCP start
psp_hdcp_initialize(struct psp_context * psp)1896 static int psp_hdcp_initialize(struct psp_context *psp)
1897 {
1898 int ret;
1899
1900 /*
1901 * TODO: bypass the initialize in sriov for now
1902 */
1903 if (amdgpu_sriov_vf(psp->adev))
1904 return 0;
1905
1906 /* bypass hdcp initialization if dmu is harvested */
1907 if (!amdgpu_device_has_display_hardware(psp->adev))
1908 return 0;
1909
1910 if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1911 !psp->hdcp_context.context.bin_desc.start_addr) {
1912 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1913 return 0;
1914 }
1915
1916 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1917 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1918
1919 if (!psp->hdcp_context.context.mem_context.shared_buf) {
1920 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1921 if (ret)
1922 return ret;
1923 }
1924
1925 ret = psp_ta_load(psp, &psp->hdcp_context.context);
1926 if (!ret) {
1927 psp->hdcp_context.context.initialized = true;
1928 mutex_init(&psp->hdcp_context.mutex);
1929 }
1930
1931 return ret;
1932 }
1933
psp_hdcp_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1934 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1935 {
1936 /*
1937 * TODO: bypass the loading in sriov for now
1938 */
1939 if (amdgpu_sriov_vf(psp->adev))
1940 return 0;
1941
1942 if (!psp->hdcp_context.context.initialized)
1943 return 0;
1944
1945 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
1946 }
1947
psp_hdcp_terminate(struct psp_context * psp)1948 static int psp_hdcp_terminate(struct psp_context *psp)
1949 {
1950 int ret;
1951
1952 /*
1953 * TODO: bypass the terminate in sriov for now
1954 */
1955 if (amdgpu_sriov_vf(psp->adev))
1956 return 0;
1957
1958 if (!psp->hdcp_context.context.initialized)
1959 return 0;
1960
1961 ret = psp_ta_unload(psp, &psp->hdcp_context.context);
1962
1963 psp->hdcp_context.context.initialized = false;
1964
1965 return ret;
1966 }
1967 // HDCP end
1968
1969 // DTM start
psp_dtm_initialize(struct psp_context * psp)1970 static int psp_dtm_initialize(struct psp_context *psp)
1971 {
1972 int ret;
1973
1974 /*
1975 * TODO: bypass the initialize in sriov for now
1976 */
1977 if (amdgpu_sriov_vf(psp->adev))
1978 return 0;
1979
1980 /* bypass dtm initialization if dmu is harvested */
1981 if (!amdgpu_device_has_display_hardware(psp->adev))
1982 return 0;
1983
1984 if (!psp->dtm_context.context.bin_desc.size_bytes ||
1985 !psp->dtm_context.context.bin_desc.start_addr) {
1986 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1987 return 0;
1988 }
1989
1990 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
1991 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1992
1993 if (!psp->dtm_context.context.mem_context.shared_buf) {
1994 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
1995 if (ret)
1996 return ret;
1997 }
1998
1999 ret = psp_ta_load(psp, &psp->dtm_context.context);
2000 if (!ret) {
2001 psp->dtm_context.context.initialized = true;
2002 mutex_init(&psp->dtm_context.mutex);
2003 }
2004
2005 return ret;
2006 }
2007
psp_dtm_invoke(struct psp_context * psp,uint32_t ta_cmd_id)2008 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2009 {
2010 /*
2011 * TODO: bypass the loading in sriov for now
2012 */
2013 if (amdgpu_sriov_vf(psp->adev))
2014 return 0;
2015
2016 if (!psp->dtm_context.context.initialized)
2017 return 0;
2018
2019 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
2020 }
2021
psp_dtm_terminate(struct psp_context * psp)2022 static int psp_dtm_terminate(struct psp_context *psp)
2023 {
2024 int ret;
2025
2026 /*
2027 * TODO: bypass the terminate in sriov for now
2028 */
2029 if (amdgpu_sriov_vf(psp->adev))
2030 return 0;
2031
2032 if (!psp->dtm_context.context.initialized)
2033 return 0;
2034
2035 ret = psp_ta_unload(psp, &psp->dtm_context.context);
2036
2037 psp->dtm_context.context.initialized = false;
2038
2039 return ret;
2040 }
2041 // DTM end
2042
2043 // RAP start
psp_rap_initialize(struct psp_context * psp)2044 static int psp_rap_initialize(struct psp_context *psp)
2045 {
2046 int ret;
2047 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2048
2049 /*
2050 * TODO: bypass the initialize in sriov for now
2051 */
2052 if (amdgpu_sriov_vf(psp->adev))
2053 return 0;
2054
2055 if (!psp->rap_context.context.bin_desc.size_bytes ||
2056 !psp->rap_context.context.bin_desc.start_addr) {
2057 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2058 return 0;
2059 }
2060
2061 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2062 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2063
2064 if (!psp->rap_context.context.mem_context.shared_buf) {
2065 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2066 if (ret)
2067 return ret;
2068 }
2069
2070 ret = psp_ta_load(psp, &psp->rap_context.context);
2071 if (!ret) {
2072 psp->rap_context.context.initialized = true;
2073 mutex_init(&psp->rap_context.mutex);
2074 } else
2075 return ret;
2076
2077 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2078 if (ret || status != TA_RAP_STATUS__SUCCESS) {
2079 psp_rap_terminate(psp);
2080 /* free rap shared memory */
2081 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2082
2083 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2084 ret, status);
2085
2086 return ret;
2087 }
2088
2089 return 0;
2090 }
2091
psp_rap_terminate(struct psp_context * psp)2092 static int psp_rap_terminate(struct psp_context *psp)
2093 {
2094 int ret;
2095
2096 if (!psp->rap_context.context.initialized)
2097 return 0;
2098
2099 ret = psp_ta_unload(psp, &psp->rap_context.context);
2100
2101 psp->rap_context.context.initialized = false;
2102
2103 return ret;
2104 }
2105
psp_rap_invoke(struct psp_context * psp,uint32_t ta_cmd_id,enum ta_rap_status * status)2106 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2107 {
2108 struct ta_rap_shared_memory *rap_cmd;
2109 int ret = 0;
2110
2111 if (!psp->rap_context.context.initialized)
2112 return 0;
2113
2114 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2115 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2116 return -EINVAL;
2117
2118 mutex_lock(&psp->rap_context.mutex);
2119
2120 rap_cmd = (struct ta_rap_shared_memory *)
2121 psp->rap_context.context.mem_context.shared_buf;
2122 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2123
2124 rap_cmd->cmd_id = ta_cmd_id;
2125 rap_cmd->validation_method_id = METHOD_A;
2126
2127 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2128 if (ret)
2129 goto out_unlock;
2130
2131 if (status)
2132 *status = rap_cmd->rap_status;
2133
2134 out_unlock:
2135 mutex_unlock(&psp->rap_context.mutex);
2136
2137 return ret;
2138 }
2139 // RAP end
2140
2141 /* securedisplay start */
psp_securedisplay_initialize(struct psp_context * psp)2142 static int psp_securedisplay_initialize(struct psp_context *psp)
2143 {
2144 int ret;
2145 struct ta_securedisplay_cmd *securedisplay_cmd;
2146
2147 /*
2148 * TODO: bypass the initialize in sriov for now
2149 */
2150 if (amdgpu_sriov_vf(psp->adev))
2151 return 0;
2152
2153 /* bypass securedisplay initialization if dmu is harvested */
2154 if (!amdgpu_device_has_display_hardware(psp->adev))
2155 return 0;
2156
2157 if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2158 !psp->securedisplay_context.context.bin_desc.start_addr) {
2159 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
2160 return 0;
2161 }
2162
2163 psp->securedisplay_context.context.mem_context.shared_mem_size =
2164 PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2165 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2166
2167 if (!psp->securedisplay_context.context.initialized) {
2168 ret = psp_ta_init_shared_buf(psp,
2169 &psp->securedisplay_context.context.mem_context);
2170 if (ret)
2171 return ret;
2172 }
2173
2174 ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2175 if (!ret) {
2176 psp->securedisplay_context.context.initialized = true;
2177 mutex_init(&psp->securedisplay_context.mutex);
2178 } else
2179 return ret;
2180
2181 mutex_lock(&psp->securedisplay_context.mutex);
2182
2183 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2184 TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2185
2186 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2187
2188 mutex_unlock(&psp->securedisplay_context.mutex);
2189
2190 if (ret) {
2191 psp_securedisplay_terminate(psp);
2192 /* free securedisplay shared memory */
2193 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2194 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2195 return -EINVAL;
2196 }
2197
2198 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2199 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2200 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2201 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2202 /* don't try again */
2203 psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2204 }
2205
2206 return 0;
2207 }
2208
psp_securedisplay_terminate(struct psp_context * psp)2209 static int psp_securedisplay_terminate(struct psp_context *psp)
2210 {
2211 int ret;
2212
2213 /*
2214 * TODO:bypass the terminate in sriov for now
2215 */
2216 if (amdgpu_sriov_vf(psp->adev))
2217 return 0;
2218
2219 if (!psp->securedisplay_context.context.initialized)
2220 return 0;
2221
2222 ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2223
2224 psp->securedisplay_context.context.initialized = false;
2225
2226 return ret;
2227 }
2228
psp_securedisplay_invoke(struct psp_context * psp,uint32_t ta_cmd_id)2229 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2230 {
2231 int ret;
2232
2233 if (!psp->securedisplay_context.context.initialized)
2234 return -EINVAL;
2235
2236 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2237 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
2238 return -EINVAL;
2239
2240 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2241
2242 return ret;
2243 }
2244 /* SECUREDISPLAY end */
2245
amdgpu_psp_wait_for_bootloader(struct amdgpu_device * adev)2246 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2247 {
2248 struct psp_context *psp = &adev->psp;
2249 int ret = 0;
2250
2251 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2252 ret = psp->funcs->wait_for_bootloader(psp);
2253
2254 return ret;
2255 }
2256
amdgpu_psp_get_ras_capability(struct psp_context * psp)2257 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2258 {
2259 if (psp->funcs &&
2260 psp->funcs->get_ras_capability) {
2261 return psp->funcs->get_ras_capability(psp);
2262 } else {
2263 return false;
2264 }
2265 }
2266
psp_hw_start(struct psp_context * psp)2267 static int psp_hw_start(struct psp_context *psp)
2268 {
2269 struct amdgpu_device *adev = psp->adev;
2270 int ret;
2271
2272 if (!amdgpu_sriov_vf(adev)) {
2273 if ((is_psp_fw_valid(psp->kdb)) &&
2274 (psp->funcs->bootloader_load_kdb != NULL)) {
2275 ret = psp_bootloader_load_kdb(psp);
2276 if (ret) {
2277 dev_err(adev->dev, "PSP load kdb failed!\n");
2278 return ret;
2279 }
2280 }
2281
2282 if ((is_psp_fw_valid(psp->spl)) &&
2283 (psp->funcs->bootloader_load_spl != NULL)) {
2284 ret = psp_bootloader_load_spl(psp);
2285 if (ret) {
2286 dev_err(adev->dev, "PSP load spl failed!\n");
2287 return ret;
2288 }
2289 }
2290
2291 if ((is_psp_fw_valid(psp->sys)) &&
2292 (psp->funcs->bootloader_load_sysdrv != NULL)) {
2293 ret = psp_bootloader_load_sysdrv(psp);
2294 if (ret) {
2295 dev_err(adev->dev, "PSP load sys drv failed!\n");
2296 return ret;
2297 }
2298 }
2299
2300 if ((is_psp_fw_valid(psp->soc_drv)) &&
2301 (psp->funcs->bootloader_load_soc_drv != NULL)) {
2302 ret = psp_bootloader_load_soc_drv(psp);
2303 if (ret) {
2304 dev_err(adev->dev, "PSP load soc drv failed!\n");
2305 return ret;
2306 }
2307 }
2308
2309 if ((is_psp_fw_valid(psp->intf_drv)) &&
2310 (psp->funcs->bootloader_load_intf_drv != NULL)) {
2311 ret = psp_bootloader_load_intf_drv(psp);
2312 if (ret) {
2313 dev_err(adev->dev, "PSP load intf drv failed!\n");
2314 return ret;
2315 }
2316 }
2317
2318 if ((is_psp_fw_valid(psp->dbg_drv)) &&
2319 (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2320 ret = psp_bootloader_load_dbg_drv(psp);
2321 if (ret) {
2322 dev_err(adev->dev, "PSP load dbg drv failed!\n");
2323 return ret;
2324 }
2325 }
2326
2327 if ((is_psp_fw_valid(psp->ras_drv)) &&
2328 (psp->funcs->bootloader_load_ras_drv != NULL)) {
2329 ret = psp_bootloader_load_ras_drv(psp);
2330 if (ret) {
2331 dev_err(adev->dev, "PSP load ras_drv failed!\n");
2332 return ret;
2333 }
2334 }
2335
2336 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2337 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2338 ret = psp_bootloader_load_ipkeymgr_drv(psp);
2339 if (ret) {
2340 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2341 return ret;
2342 }
2343 }
2344
2345 if ((is_psp_fw_valid(psp->sos)) &&
2346 (psp->funcs->bootloader_load_sos != NULL)) {
2347 ret = psp_bootloader_load_sos(psp);
2348 if (ret) {
2349 dev_err(adev->dev, "PSP load sos failed!\n");
2350 return ret;
2351 }
2352 }
2353 }
2354
2355 ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2356 if (ret) {
2357 dev_err(adev->dev, "PSP create ring failed!\n");
2358 return ret;
2359 }
2360
2361 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2362 goto skip_pin_bo;
2363
2364 if (!psp->boot_time_tmr || psp->autoload_supported) {
2365 ret = psp_tmr_init(psp);
2366 if (ret) {
2367 dev_err(adev->dev, "PSP tmr init failed!\n");
2368 return ret;
2369 }
2370 }
2371
2372 skip_pin_bo:
2373 /*
2374 * For ASICs with DF Cstate management centralized
2375 * to PMFW, TMR setup should be performed after PMFW
2376 * loaded and before other non-psp firmware loaded.
2377 */
2378 if (psp->pmfw_centralized_cstate_management) {
2379 ret = psp_load_smu_fw(psp);
2380 if (ret)
2381 return ret;
2382 }
2383
2384 if (!psp->boot_time_tmr || !psp->autoload_supported) {
2385 ret = psp_tmr_load(psp);
2386 if (ret) {
2387 dev_err(adev->dev, "PSP load tmr failed!\n");
2388 return ret;
2389 }
2390 }
2391
2392 return 0;
2393 }
2394
psp_get_fw_type(struct amdgpu_firmware_info * ucode,enum psp_gfx_fw_type * type)2395 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2396 enum psp_gfx_fw_type *type)
2397 {
2398 switch (ucode->ucode_id) {
2399 case AMDGPU_UCODE_ID_CAP:
2400 *type = GFX_FW_TYPE_CAP;
2401 break;
2402 case AMDGPU_UCODE_ID_SDMA0:
2403 *type = GFX_FW_TYPE_SDMA0;
2404 break;
2405 case AMDGPU_UCODE_ID_SDMA1:
2406 *type = GFX_FW_TYPE_SDMA1;
2407 break;
2408 case AMDGPU_UCODE_ID_SDMA2:
2409 *type = GFX_FW_TYPE_SDMA2;
2410 break;
2411 case AMDGPU_UCODE_ID_SDMA3:
2412 *type = GFX_FW_TYPE_SDMA3;
2413 break;
2414 case AMDGPU_UCODE_ID_SDMA4:
2415 *type = GFX_FW_TYPE_SDMA4;
2416 break;
2417 case AMDGPU_UCODE_ID_SDMA5:
2418 *type = GFX_FW_TYPE_SDMA5;
2419 break;
2420 case AMDGPU_UCODE_ID_SDMA6:
2421 *type = GFX_FW_TYPE_SDMA6;
2422 break;
2423 case AMDGPU_UCODE_ID_SDMA7:
2424 *type = GFX_FW_TYPE_SDMA7;
2425 break;
2426 case AMDGPU_UCODE_ID_CP_MES:
2427 *type = GFX_FW_TYPE_CP_MES;
2428 break;
2429 case AMDGPU_UCODE_ID_CP_MES_DATA:
2430 *type = GFX_FW_TYPE_MES_STACK;
2431 break;
2432 case AMDGPU_UCODE_ID_CP_MES1:
2433 *type = GFX_FW_TYPE_CP_MES_KIQ;
2434 break;
2435 case AMDGPU_UCODE_ID_CP_MES1_DATA:
2436 *type = GFX_FW_TYPE_MES_KIQ_STACK;
2437 break;
2438 case AMDGPU_UCODE_ID_CP_CE:
2439 *type = GFX_FW_TYPE_CP_CE;
2440 break;
2441 case AMDGPU_UCODE_ID_CP_PFP:
2442 *type = GFX_FW_TYPE_CP_PFP;
2443 break;
2444 case AMDGPU_UCODE_ID_CP_ME:
2445 *type = GFX_FW_TYPE_CP_ME;
2446 break;
2447 case AMDGPU_UCODE_ID_CP_MEC1:
2448 *type = GFX_FW_TYPE_CP_MEC;
2449 break;
2450 case AMDGPU_UCODE_ID_CP_MEC1_JT:
2451 *type = GFX_FW_TYPE_CP_MEC_ME1;
2452 break;
2453 case AMDGPU_UCODE_ID_CP_MEC2:
2454 *type = GFX_FW_TYPE_CP_MEC;
2455 break;
2456 case AMDGPU_UCODE_ID_CP_MEC2_JT:
2457 *type = GFX_FW_TYPE_CP_MEC_ME2;
2458 break;
2459 case AMDGPU_UCODE_ID_RLC_P:
2460 *type = GFX_FW_TYPE_RLC_P;
2461 break;
2462 case AMDGPU_UCODE_ID_RLC_V:
2463 *type = GFX_FW_TYPE_RLC_V;
2464 break;
2465 case AMDGPU_UCODE_ID_RLC_G:
2466 *type = GFX_FW_TYPE_RLC_G;
2467 break;
2468 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2469 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2470 break;
2471 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2472 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2473 break;
2474 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2475 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2476 break;
2477 case AMDGPU_UCODE_ID_RLC_IRAM:
2478 *type = GFX_FW_TYPE_RLC_IRAM;
2479 break;
2480 case AMDGPU_UCODE_ID_RLC_DRAM:
2481 *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2482 break;
2483 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2484 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2485 break;
2486 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2487 *type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2488 break;
2489 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2490 *type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2491 break;
2492 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2493 *type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2494 break;
2495 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2496 *type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2497 break;
2498 case AMDGPU_UCODE_ID_SMC:
2499 *type = GFX_FW_TYPE_SMU;
2500 break;
2501 case AMDGPU_UCODE_ID_PPTABLE:
2502 *type = GFX_FW_TYPE_PPTABLE;
2503 break;
2504 case AMDGPU_UCODE_ID_UVD:
2505 *type = GFX_FW_TYPE_UVD;
2506 break;
2507 case AMDGPU_UCODE_ID_UVD1:
2508 *type = GFX_FW_TYPE_UVD1;
2509 break;
2510 case AMDGPU_UCODE_ID_VCE:
2511 *type = GFX_FW_TYPE_VCE;
2512 break;
2513 case AMDGPU_UCODE_ID_VCN:
2514 *type = GFX_FW_TYPE_VCN;
2515 break;
2516 case AMDGPU_UCODE_ID_VCN1:
2517 *type = GFX_FW_TYPE_VCN1;
2518 break;
2519 case AMDGPU_UCODE_ID_DMCU_ERAM:
2520 *type = GFX_FW_TYPE_DMCU_ERAM;
2521 break;
2522 case AMDGPU_UCODE_ID_DMCU_INTV:
2523 *type = GFX_FW_TYPE_DMCU_ISR;
2524 break;
2525 case AMDGPU_UCODE_ID_VCN0_RAM:
2526 *type = GFX_FW_TYPE_VCN0_RAM;
2527 break;
2528 case AMDGPU_UCODE_ID_VCN1_RAM:
2529 *type = GFX_FW_TYPE_VCN1_RAM;
2530 break;
2531 case AMDGPU_UCODE_ID_DMCUB:
2532 *type = GFX_FW_TYPE_DMUB;
2533 break;
2534 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2535 case AMDGPU_UCODE_ID_SDMA_RS64:
2536 *type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2537 break;
2538 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2539 *type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2540 break;
2541 case AMDGPU_UCODE_ID_IMU_I:
2542 *type = GFX_FW_TYPE_IMU_I;
2543 break;
2544 case AMDGPU_UCODE_ID_IMU_D:
2545 *type = GFX_FW_TYPE_IMU_D;
2546 break;
2547 case AMDGPU_UCODE_ID_CP_RS64_PFP:
2548 *type = GFX_FW_TYPE_RS64_PFP;
2549 break;
2550 case AMDGPU_UCODE_ID_CP_RS64_ME:
2551 *type = GFX_FW_TYPE_RS64_ME;
2552 break;
2553 case AMDGPU_UCODE_ID_CP_RS64_MEC:
2554 *type = GFX_FW_TYPE_RS64_MEC;
2555 break;
2556 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2557 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2558 break;
2559 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2560 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2561 break;
2562 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2563 *type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2564 break;
2565 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2566 *type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2567 break;
2568 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2569 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2570 break;
2571 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2572 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2573 break;
2574 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2575 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2576 break;
2577 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2578 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2579 break;
2580 case AMDGPU_UCODE_ID_VPE_CTX:
2581 *type = GFX_FW_TYPE_VPEC_FW1;
2582 break;
2583 case AMDGPU_UCODE_ID_VPE_CTL:
2584 *type = GFX_FW_TYPE_VPEC_FW2;
2585 break;
2586 case AMDGPU_UCODE_ID_VPE:
2587 *type = GFX_FW_TYPE_VPE;
2588 break;
2589 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2590 *type = GFX_FW_TYPE_UMSCH_UCODE;
2591 break;
2592 case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2593 *type = GFX_FW_TYPE_UMSCH_DATA;
2594 break;
2595 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2596 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2597 break;
2598 case AMDGPU_UCODE_ID_P2S_TABLE:
2599 *type = GFX_FW_TYPE_P2S_TABLE;
2600 break;
2601 case AMDGPU_UCODE_ID_JPEG_RAM:
2602 *type = GFX_FW_TYPE_JPEG_RAM;
2603 break;
2604 case AMDGPU_UCODE_ID_ISP:
2605 *type = GFX_FW_TYPE_ISP;
2606 break;
2607 case AMDGPU_UCODE_ID_MAXIMUM:
2608 default:
2609 return -EINVAL;
2610 }
2611
2612 return 0;
2613 }
2614
psp_print_fw_hdr(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2615 static void psp_print_fw_hdr(struct psp_context *psp,
2616 struct amdgpu_firmware_info *ucode)
2617 {
2618 struct amdgpu_device *adev = psp->adev;
2619 struct common_firmware_header *hdr;
2620
2621 switch (ucode->ucode_id) {
2622 case AMDGPU_UCODE_ID_SDMA0:
2623 case AMDGPU_UCODE_ID_SDMA1:
2624 case AMDGPU_UCODE_ID_SDMA2:
2625 case AMDGPU_UCODE_ID_SDMA3:
2626 case AMDGPU_UCODE_ID_SDMA4:
2627 case AMDGPU_UCODE_ID_SDMA5:
2628 case AMDGPU_UCODE_ID_SDMA6:
2629 case AMDGPU_UCODE_ID_SDMA7:
2630 hdr = (struct common_firmware_header *)
2631 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2632 amdgpu_ucode_print_sdma_hdr(hdr);
2633 break;
2634 case AMDGPU_UCODE_ID_CP_CE:
2635 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2636 amdgpu_ucode_print_gfx_hdr(hdr);
2637 break;
2638 case AMDGPU_UCODE_ID_CP_PFP:
2639 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2640 amdgpu_ucode_print_gfx_hdr(hdr);
2641 break;
2642 case AMDGPU_UCODE_ID_CP_ME:
2643 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2644 amdgpu_ucode_print_gfx_hdr(hdr);
2645 break;
2646 case AMDGPU_UCODE_ID_CP_MEC1:
2647 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2648 amdgpu_ucode_print_gfx_hdr(hdr);
2649 break;
2650 case AMDGPU_UCODE_ID_RLC_G:
2651 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2652 amdgpu_ucode_print_rlc_hdr(hdr);
2653 break;
2654 case AMDGPU_UCODE_ID_SMC:
2655 hdr = (struct common_firmware_header *)adev->pm.fw->data;
2656 amdgpu_ucode_print_smc_hdr(hdr);
2657 break;
2658 default:
2659 break;
2660 }
2661 }
2662
psp_prep_load_ip_fw_cmd_buf(struct psp_context * psp,struct amdgpu_firmware_info * ucode,struct psp_gfx_cmd_resp * cmd)2663 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2664 struct amdgpu_firmware_info *ucode,
2665 struct psp_gfx_cmd_resp *cmd)
2666 {
2667 int ret;
2668 uint64_t fw_mem_mc_addr = ucode->mc_addr;
2669
2670 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2671 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2672 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2673 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2674
2675 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2676 if (ret)
2677 dev_err(psp->adev->dev, "Unknown firmware type\n");
2678
2679 return ret;
2680 }
2681
psp_execute_ip_fw_load(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2682 int psp_execute_ip_fw_load(struct psp_context *psp,
2683 struct amdgpu_firmware_info *ucode)
2684 {
2685 int ret = 0;
2686 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2687
2688 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2689 if (!ret) {
2690 ret = psp_cmd_submit_buf(psp, ucode, cmd,
2691 psp->fence_buf_mc_addr);
2692 }
2693
2694 release_psp_cmd_buf(psp);
2695
2696 return ret;
2697 }
2698
psp_load_p2s_table(struct psp_context * psp)2699 static int psp_load_p2s_table(struct psp_context *psp)
2700 {
2701 int ret;
2702 struct amdgpu_device *adev = psp->adev;
2703 struct amdgpu_firmware_info *ucode =
2704 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2705
2706 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2707 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2708 return 0;
2709
2710 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2711 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2712 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2713 0x0036003C;
2714 if (psp->sos.fw_version < supp_vers)
2715 return 0;
2716 }
2717
2718 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2719 return 0;
2720
2721 ret = psp_execute_ip_fw_load(psp, ucode);
2722
2723 return ret;
2724 }
2725
psp_load_smu_fw(struct psp_context * psp)2726 static int psp_load_smu_fw(struct psp_context *psp)
2727 {
2728 int ret;
2729 struct amdgpu_device *adev = psp->adev;
2730 struct amdgpu_firmware_info *ucode =
2731 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2732 struct amdgpu_ras *ras = psp->ras_context.ras;
2733
2734 /*
2735 * Skip SMU FW reloading in case of using BACO for runpm only,
2736 * as SMU is always alive.
2737 */
2738 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2739 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2740 return 0;
2741
2742 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2743 return 0;
2744
2745 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2746 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2747 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2748 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2749 if (ret)
2750 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2751 }
2752
2753 ret = psp_execute_ip_fw_load(psp, ucode);
2754
2755 if (ret)
2756 dev_err(adev->dev, "PSP load smu failed!\n");
2757
2758 return ret;
2759 }
2760
fw_load_skip_check(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2761 static bool fw_load_skip_check(struct psp_context *psp,
2762 struct amdgpu_firmware_info *ucode)
2763 {
2764 if (!ucode->fw || !ucode->ucode_size)
2765 return true;
2766
2767 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2768 return true;
2769
2770 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2771 (psp_smu_reload_quirk(psp) ||
2772 psp->autoload_supported ||
2773 psp->pmfw_centralized_cstate_management))
2774 return true;
2775
2776 if (amdgpu_sriov_vf(psp->adev) &&
2777 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2778 return true;
2779
2780 if (psp->autoload_supported &&
2781 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2782 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2783 /* skip mec JT when autoload is enabled */
2784 return true;
2785
2786 return false;
2787 }
2788
psp_load_fw_list(struct psp_context * psp,struct amdgpu_firmware_info ** ucode_list,int ucode_count)2789 int psp_load_fw_list(struct psp_context *psp,
2790 struct amdgpu_firmware_info **ucode_list, int ucode_count)
2791 {
2792 int ret = 0, i;
2793 struct amdgpu_firmware_info *ucode;
2794
2795 for (i = 0; i < ucode_count; ++i) {
2796 ucode = ucode_list[i];
2797 psp_print_fw_hdr(psp, ucode);
2798 ret = psp_execute_ip_fw_load(psp, ucode);
2799 if (ret)
2800 return ret;
2801 }
2802 return ret;
2803 }
2804
psp_load_non_psp_fw(struct psp_context * psp)2805 static int psp_load_non_psp_fw(struct psp_context *psp)
2806 {
2807 int i, ret;
2808 struct amdgpu_firmware_info *ucode;
2809 struct amdgpu_device *adev = psp->adev;
2810
2811 if (psp->autoload_supported &&
2812 !psp->pmfw_centralized_cstate_management) {
2813 ret = psp_load_smu_fw(psp);
2814 if (ret)
2815 return ret;
2816 }
2817
2818 /* Load P2S table first if it's available */
2819 psp_load_p2s_table(psp);
2820
2821 for (i = 0; i < adev->firmware.max_ucodes; i++) {
2822 ucode = &adev->firmware.ucode[i];
2823
2824 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2825 !fw_load_skip_check(psp, ucode)) {
2826 ret = psp_load_smu_fw(psp);
2827 if (ret)
2828 return ret;
2829 continue;
2830 }
2831
2832 if (fw_load_skip_check(psp, ucode))
2833 continue;
2834
2835 if (psp->autoload_supported &&
2836 (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2837 IP_VERSION(11, 0, 7) ||
2838 amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2839 IP_VERSION(11, 0, 11) ||
2840 amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2841 IP_VERSION(11, 0, 12)) &&
2842 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2843 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2844 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2845 /* PSP only receive one SDMA fw for sienna_cichlid,
2846 * as all four sdma fw are same
2847 */
2848 continue;
2849
2850 psp_print_fw_hdr(psp, ucode);
2851
2852 ret = psp_execute_ip_fw_load(psp, ucode);
2853 if (ret)
2854 return ret;
2855
2856 /* Start rlc autoload after psp received all the gfx firmware */
2857 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2858 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2859 ret = psp_rlc_autoload_start(psp);
2860 if (ret) {
2861 dev_err(adev->dev, "Failed to start rlc autoload\n");
2862 return ret;
2863 }
2864 }
2865 }
2866
2867 return 0;
2868 }
2869
psp_load_fw(struct amdgpu_device * adev)2870 static int psp_load_fw(struct amdgpu_device *adev)
2871 {
2872 int ret;
2873 struct psp_context *psp = &adev->psp;
2874
2875 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2876 /* should not destroy ring, only stop */
2877 psp_ring_stop(psp, PSP_RING_TYPE__KM);
2878 } else {
2879 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2880
2881 ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2882 if (ret) {
2883 dev_err(adev->dev, "PSP ring init failed!\n");
2884 goto failed;
2885 }
2886 }
2887
2888 ret = psp_hw_start(psp);
2889 if (ret)
2890 goto failed;
2891
2892 ret = psp_load_non_psp_fw(psp);
2893 if (ret)
2894 goto failed1;
2895
2896 ret = psp_asd_initialize(psp);
2897 if (ret) {
2898 dev_err(adev->dev, "PSP load asd failed!\n");
2899 goto failed1;
2900 }
2901
2902 ret = psp_rl_load(adev);
2903 if (ret) {
2904 dev_err(adev->dev, "PSP load RL failed!\n");
2905 goto failed1;
2906 }
2907
2908 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2909 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2910 ret = psp_xgmi_initialize(psp, false, true);
2911 /* Warning the XGMI seesion initialize failure
2912 * Instead of stop driver initialization
2913 */
2914 if (ret)
2915 dev_err(psp->adev->dev,
2916 "XGMI: Failed to initialize XGMI session\n");
2917 }
2918 }
2919
2920 if (psp->ta_fw) {
2921 ret = psp_ras_initialize(psp);
2922 if (ret)
2923 dev_err(psp->adev->dev,
2924 "RAS: Failed to initialize RAS\n");
2925
2926 ret = psp_hdcp_initialize(psp);
2927 if (ret)
2928 dev_err(psp->adev->dev,
2929 "HDCP: Failed to initialize HDCP\n");
2930
2931 ret = psp_dtm_initialize(psp);
2932 if (ret)
2933 dev_err(psp->adev->dev,
2934 "DTM: Failed to initialize DTM\n");
2935
2936 ret = psp_rap_initialize(psp);
2937 if (ret)
2938 dev_err(psp->adev->dev,
2939 "RAP: Failed to initialize RAP\n");
2940
2941 ret = psp_securedisplay_initialize(psp);
2942 if (ret)
2943 dev_err(psp->adev->dev,
2944 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2945 }
2946
2947 return 0;
2948
2949 failed1:
2950 psp_free_shared_bufs(psp);
2951 failed:
2952 /*
2953 * all cleanup jobs (xgmi terminate, ras terminate,
2954 * ring destroy, cmd/fence/fw buffers destory,
2955 * psp->cmd destory) are delayed to psp_hw_fini
2956 */
2957 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2958 return ret;
2959 }
2960
psp_hw_init(void * handle)2961 static int psp_hw_init(void *handle)
2962 {
2963 int ret;
2964 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2965
2966 mutex_lock(&adev->firmware.mutex);
2967 /*
2968 * This sequence is just used on hw_init only once, no need on
2969 * resume.
2970 */
2971 ret = amdgpu_ucode_init_bo(adev);
2972 if (ret)
2973 goto failed;
2974
2975 ret = psp_load_fw(adev);
2976 if (ret) {
2977 dev_err(adev->dev, "PSP firmware loading failed\n");
2978 goto failed;
2979 }
2980
2981 mutex_unlock(&adev->firmware.mutex);
2982 return 0;
2983
2984 failed:
2985 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
2986 mutex_unlock(&adev->firmware.mutex);
2987 return -EINVAL;
2988 }
2989
psp_hw_fini(void * handle)2990 static int psp_hw_fini(void *handle)
2991 {
2992 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2993 struct psp_context *psp = &adev->psp;
2994
2995 if (psp->ta_fw) {
2996 psp_ras_terminate(psp);
2997 psp_securedisplay_terminate(psp);
2998 psp_rap_terminate(psp);
2999 psp_dtm_terminate(psp);
3000 psp_hdcp_terminate(psp);
3001
3002 if (adev->gmc.xgmi.num_physical_nodes > 1)
3003 psp_xgmi_terminate(psp);
3004 }
3005
3006 psp_asd_terminate(psp);
3007 psp_tmr_terminate(psp);
3008
3009 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3010
3011 return 0;
3012 }
3013
psp_suspend(void * handle)3014 static int psp_suspend(void *handle)
3015 {
3016 int ret = 0;
3017 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3018 struct psp_context *psp = &adev->psp;
3019
3020 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3021 psp->xgmi_context.context.initialized) {
3022 ret = psp_xgmi_terminate(psp);
3023 if (ret) {
3024 dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3025 goto out;
3026 }
3027 }
3028
3029 if (psp->ta_fw) {
3030 ret = psp_ras_terminate(psp);
3031 if (ret) {
3032 dev_err(adev->dev, "Failed to terminate ras ta\n");
3033 goto out;
3034 }
3035 ret = psp_hdcp_terminate(psp);
3036 if (ret) {
3037 dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3038 goto out;
3039 }
3040 ret = psp_dtm_terminate(psp);
3041 if (ret) {
3042 dev_err(adev->dev, "Failed to terminate dtm ta\n");
3043 goto out;
3044 }
3045 ret = psp_rap_terminate(psp);
3046 if (ret) {
3047 dev_err(adev->dev, "Failed to terminate rap ta\n");
3048 goto out;
3049 }
3050 ret = psp_securedisplay_terminate(psp);
3051 if (ret) {
3052 dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3053 goto out;
3054 }
3055 }
3056
3057 ret = psp_asd_terminate(psp);
3058 if (ret) {
3059 dev_err(adev->dev, "Failed to terminate asd\n");
3060 goto out;
3061 }
3062
3063 ret = psp_tmr_terminate(psp);
3064 if (ret) {
3065 dev_err(adev->dev, "Failed to terminate tmr\n");
3066 goto out;
3067 }
3068
3069 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3070 if (ret)
3071 dev_err(adev->dev, "PSP ring stop failed\n");
3072
3073 out:
3074 return ret;
3075 }
3076
psp_resume(void * handle)3077 static int psp_resume(void *handle)
3078 {
3079 int ret;
3080 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3081 struct psp_context *psp = &adev->psp;
3082
3083 dev_info(adev->dev, "PSP is resuming...\n");
3084
3085 if (psp->mem_train_ctx.enable_mem_training) {
3086 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3087 if (ret) {
3088 dev_err(adev->dev, "Failed to process memory training!\n");
3089 return ret;
3090 }
3091 }
3092
3093 mutex_lock(&adev->firmware.mutex);
3094
3095 ret = psp_hw_start(psp);
3096 if (ret)
3097 goto failed;
3098
3099 ret = psp_load_non_psp_fw(psp);
3100 if (ret)
3101 goto failed;
3102
3103 ret = psp_asd_initialize(psp);
3104 if (ret) {
3105 dev_err(adev->dev, "PSP load asd failed!\n");
3106 goto failed;
3107 }
3108
3109 ret = psp_rl_load(adev);
3110 if (ret) {
3111 dev_err(adev->dev, "PSP load RL failed!\n");
3112 goto failed;
3113 }
3114
3115 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3116 ret = psp_xgmi_initialize(psp, false, true);
3117 /* Warning the XGMI seesion initialize failure
3118 * Instead of stop driver initialization
3119 */
3120 if (ret)
3121 dev_err(psp->adev->dev,
3122 "XGMI: Failed to initialize XGMI session\n");
3123 }
3124
3125 if (psp->ta_fw) {
3126 ret = psp_ras_initialize(psp);
3127 if (ret)
3128 dev_err(psp->adev->dev,
3129 "RAS: Failed to initialize RAS\n");
3130
3131 ret = psp_hdcp_initialize(psp);
3132 if (ret)
3133 dev_err(psp->adev->dev,
3134 "HDCP: Failed to initialize HDCP\n");
3135
3136 ret = psp_dtm_initialize(psp);
3137 if (ret)
3138 dev_err(psp->adev->dev,
3139 "DTM: Failed to initialize DTM\n");
3140
3141 ret = psp_rap_initialize(psp);
3142 if (ret)
3143 dev_err(psp->adev->dev,
3144 "RAP: Failed to initialize RAP\n");
3145
3146 ret = psp_securedisplay_initialize(psp);
3147 if (ret)
3148 dev_err(psp->adev->dev,
3149 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3150 }
3151
3152 mutex_unlock(&adev->firmware.mutex);
3153
3154 return 0;
3155
3156 failed:
3157 dev_err(adev->dev, "PSP resume failed\n");
3158 mutex_unlock(&adev->firmware.mutex);
3159 return ret;
3160 }
3161
psp_gpu_reset(struct amdgpu_device * adev)3162 int psp_gpu_reset(struct amdgpu_device *adev)
3163 {
3164 int ret;
3165
3166 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3167 return 0;
3168
3169 mutex_lock(&adev->psp.mutex);
3170 ret = psp_mode1_reset(&adev->psp);
3171 mutex_unlock(&adev->psp.mutex);
3172
3173 return ret;
3174 }
3175
psp_rlc_autoload_start(struct psp_context * psp)3176 int psp_rlc_autoload_start(struct psp_context *psp)
3177 {
3178 int ret;
3179 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3180
3181 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3182
3183 ret = psp_cmd_submit_buf(psp, NULL, cmd,
3184 psp->fence_buf_mc_addr);
3185
3186 release_psp_cmd_buf(psp);
3187
3188 return ret;
3189 }
3190
psp_ring_cmd_submit(struct psp_context * psp,uint64_t cmd_buf_mc_addr,uint64_t fence_mc_addr,int index)3191 int psp_ring_cmd_submit(struct psp_context *psp,
3192 uint64_t cmd_buf_mc_addr,
3193 uint64_t fence_mc_addr,
3194 int index)
3195 {
3196 unsigned int psp_write_ptr_reg = 0;
3197 struct psp_gfx_rb_frame *write_frame;
3198 struct psp_ring *ring = &psp->km_ring;
3199 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3200 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3201 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3202 struct amdgpu_device *adev = psp->adev;
3203 uint32_t ring_size_dw = ring->ring_size / 4;
3204 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3205
3206 /* KM (GPCOM) prepare write pointer */
3207 psp_write_ptr_reg = psp_ring_get_wptr(psp);
3208
3209 /* Update KM RB frame pointer to new frame */
3210 /* write_frame ptr increments by size of rb_frame in bytes */
3211 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3212 if ((psp_write_ptr_reg % ring_size_dw) == 0)
3213 write_frame = ring_buffer_start;
3214 else
3215 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3216 /* Check invalid write_frame ptr address */
3217 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3218 dev_err(adev->dev,
3219 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3220 ring_buffer_start, ring_buffer_end, write_frame);
3221 dev_err(adev->dev,
3222 "write_frame is pointing to address out of bounds\n");
3223 return -EINVAL;
3224 }
3225
3226 /* Initialize KM RB frame */
3227 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3228
3229 /* Update KM RB frame */
3230 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3231 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3232 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3233 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3234 write_frame->fence_value = index;
3235 amdgpu_device_flush_hdp(adev, NULL);
3236
3237 /* Update the write Pointer in DWORDs */
3238 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3239 psp_ring_set_wptr(psp, psp_write_ptr_reg);
3240 return 0;
3241 }
3242
psp_init_asd_microcode(struct psp_context * psp,const char * chip_name)3243 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3244 {
3245 struct amdgpu_device *adev = psp->adev;
3246 const struct psp_firmware_header_v1_0 *asd_hdr;
3247 int err = 0;
3248
3249 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, "amdgpu/%s_asd.bin", chip_name);
3250 if (err)
3251 goto out;
3252
3253 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3254 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3255 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3256 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3257 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3258 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3259 return 0;
3260 out:
3261 amdgpu_ucode_release(&adev->psp.asd_fw);
3262 return err;
3263 }
3264
psp_init_toc_microcode(struct psp_context * psp,const char * chip_name)3265 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3266 {
3267 struct amdgpu_device *adev = psp->adev;
3268 const struct psp_firmware_header_v1_0 *toc_hdr;
3269 int err = 0;
3270
3271 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, "amdgpu/%s_toc.bin", chip_name);
3272 if (err)
3273 goto out;
3274
3275 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3276 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3277 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3278 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3279 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3280 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3281 return 0;
3282 out:
3283 amdgpu_ucode_release(&adev->psp.toc_fw);
3284 return err;
3285 }
3286
parse_sos_bin_descriptor(struct psp_context * psp,const struct psp_fw_bin_desc * desc,const struct psp_firmware_header_v2_0 * sos_hdr)3287 static int parse_sos_bin_descriptor(struct psp_context *psp,
3288 const struct psp_fw_bin_desc *desc,
3289 const struct psp_firmware_header_v2_0 *sos_hdr)
3290 {
3291 uint8_t *ucode_start_addr = NULL;
3292
3293 if (!psp || !desc || !sos_hdr)
3294 return -EINVAL;
3295
3296 ucode_start_addr = (uint8_t *)sos_hdr +
3297 le32_to_cpu(desc->offset_bytes) +
3298 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3299
3300 switch (desc->fw_type) {
3301 case PSP_FW_TYPE_PSP_SOS:
3302 psp->sos.fw_version = le32_to_cpu(desc->fw_version);
3303 psp->sos.feature_version = le32_to_cpu(desc->fw_version);
3304 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
3305 psp->sos.start_addr = ucode_start_addr;
3306 break;
3307 case PSP_FW_TYPE_PSP_SYS_DRV:
3308 psp->sys.fw_version = le32_to_cpu(desc->fw_version);
3309 psp->sys.feature_version = le32_to_cpu(desc->fw_version);
3310 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes);
3311 psp->sys.start_addr = ucode_start_addr;
3312 break;
3313 case PSP_FW_TYPE_PSP_KDB:
3314 psp->kdb.fw_version = le32_to_cpu(desc->fw_version);
3315 psp->kdb.feature_version = le32_to_cpu(desc->fw_version);
3316 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes);
3317 psp->kdb.start_addr = ucode_start_addr;
3318 break;
3319 case PSP_FW_TYPE_PSP_TOC:
3320 psp->toc.fw_version = le32_to_cpu(desc->fw_version);
3321 psp->toc.feature_version = le32_to_cpu(desc->fw_version);
3322 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes);
3323 psp->toc.start_addr = ucode_start_addr;
3324 break;
3325 case PSP_FW_TYPE_PSP_SPL:
3326 psp->spl.fw_version = le32_to_cpu(desc->fw_version);
3327 psp->spl.feature_version = le32_to_cpu(desc->fw_version);
3328 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes);
3329 psp->spl.start_addr = ucode_start_addr;
3330 break;
3331 case PSP_FW_TYPE_PSP_RL:
3332 psp->rl.fw_version = le32_to_cpu(desc->fw_version);
3333 psp->rl.feature_version = le32_to_cpu(desc->fw_version);
3334 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes);
3335 psp->rl.start_addr = ucode_start_addr;
3336 break;
3337 case PSP_FW_TYPE_PSP_SOC_DRV:
3338 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version);
3339 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version);
3340 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3341 psp->soc_drv.start_addr = ucode_start_addr;
3342 break;
3343 case PSP_FW_TYPE_PSP_INTF_DRV:
3344 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version);
3345 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version);
3346 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3347 psp->intf_drv.start_addr = ucode_start_addr;
3348 break;
3349 case PSP_FW_TYPE_PSP_DBG_DRV:
3350 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version);
3351 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version);
3352 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3353 psp->dbg_drv.start_addr = ucode_start_addr;
3354 break;
3355 case PSP_FW_TYPE_PSP_RAS_DRV:
3356 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
3357 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
3358 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3359 psp->ras_drv.start_addr = ucode_start_addr;
3360 break;
3361 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3362 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version);
3363 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version);
3364 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3365 psp->ipkeymgr_drv.start_addr = ucode_start_addr;
3366 break;
3367 default:
3368 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3369 break;
3370 }
3371
3372 return 0;
3373 }
3374
psp_init_sos_base_fw(struct amdgpu_device * adev)3375 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3376 {
3377 const struct psp_firmware_header_v1_0 *sos_hdr;
3378 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3379 uint8_t *ucode_array_start_addr;
3380
3381 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3382 ucode_array_start_addr = (uint8_t *)sos_hdr +
3383 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3384
3385 if (adev->gmc.xgmi.connected_to_cpu ||
3386 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3387 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3388 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3389
3390 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3391 adev->psp.sys.start_addr = ucode_array_start_addr;
3392
3393 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3394 adev->psp.sos.start_addr = ucode_array_start_addr +
3395 le32_to_cpu(sos_hdr->sos.offset_bytes);
3396 } else {
3397 /* Load alternate PSP SOS FW */
3398 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3399
3400 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3401 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3402
3403 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3404 adev->psp.sys.start_addr = ucode_array_start_addr +
3405 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3406
3407 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3408 adev->psp.sos.start_addr = ucode_array_start_addr +
3409 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3410 }
3411
3412 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3413 dev_warn(adev->dev, "PSP SOS FW not available");
3414 return -EINVAL;
3415 }
3416
3417 return 0;
3418 }
3419
psp_init_sos_microcode(struct psp_context * psp,const char * chip_name)3420 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3421 {
3422 struct amdgpu_device *adev = psp->adev;
3423 const struct psp_firmware_header_v1_0 *sos_hdr;
3424 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3425 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3426 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3427 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3428 const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
3429 int fw_index, fw_bin_count, start_index = 0;
3430 const struct psp_fw_bin_desc *fw_bin;
3431 uint8_t *ucode_array_start_addr;
3432 int err = 0;
3433
3434 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
3435 if (err)
3436 goto out;
3437
3438 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3439 ucode_array_start_addr = (uint8_t *)sos_hdr +
3440 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3441 amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3442
3443 switch (sos_hdr->header.header_version_major) {
3444 case 1:
3445 err = psp_init_sos_base_fw(adev);
3446 if (err)
3447 goto out;
3448
3449 if (sos_hdr->header.header_version_minor == 1) {
3450 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3451 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3452 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3453 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3454 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3455 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3456 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3457 }
3458 if (sos_hdr->header.header_version_minor == 2) {
3459 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3460 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3461 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3462 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3463 }
3464 if (sos_hdr->header.header_version_minor == 3) {
3465 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3466 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3467 adev->psp.toc.start_addr = ucode_array_start_addr +
3468 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3469 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3470 adev->psp.kdb.start_addr = ucode_array_start_addr +
3471 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3472 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3473 adev->psp.spl.start_addr = ucode_array_start_addr +
3474 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3475 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3476 adev->psp.rl.start_addr = ucode_array_start_addr +
3477 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3478 }
3479 break;
3480 case 2:
3481 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3482
3483 fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
3484
3485 if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
3486 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3487 err = -EINVAL;
3488 goto out;
3489 }
3490
3491 if (sos_hdr_v2_0->header.header_version_minor == 1) {
3492 sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
3493
3494 fw_bin = sos_hdr_v2_1->psp_fw_bin;
3495
3496 if (psp_is_aux_sos_load_required(psp))
3497 start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3498 else
3499 fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3500
3501 } else {
3502 fw_bin = sos_hdr_v2_0->psp_fw_bin;
3503 }
3504
3505 for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
3506 err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
3507 sos_hdr_v2_0);
3508 if (err)
3509 goto out;
3510 }
3511 break;
3512 default:
3513 dev_err(adev->dev,
3514 "unsupported psp sos firmware\n");
3515 err = -EINVAL;
3516 goto out;
3517 }
3518
3519 return 0;
3520 out:
3521 amdgpu_ucode_release(&adev->psp.sos_fw);
3522
3523 return err;
3524 }
3525
parse_ta_bin_descriptor(struct psp_context * psp,const struct psp_fw_bin_desc * desc,const struct ta_firmware_header_v2_0 * ta_hdr)3526 static int parse_ta_bin_descriptor(struct psp_context *psp,
3527 const struct psp_fw_bin_desc *desc,
3528 const struct ta_firmware_header_v2_0 *ta_hdr)
3529 {
3530 uint8_t *ucode_start_addr = NULL;
3531
3532 if (!psp || !desc || !ta_hdr)
3533 return -EINVAL;
3534
3535 ucode_start_addr = (uint8_t *)ta_hdr +
3536 le32_to_cpu(desc->offset_bytes) +
3537 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3538
3539 switch (desc->fw_type) {
3540 case TA_FW_TYPE_PSP_ASD:
3541 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3542 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
3543 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3544 psp->asd_context.bin_desc.start_addr = ucode_start_addr;
3545 break;
3546 case TA_FW_TYPE_PSP_XGMI:
3547 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3548 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3549 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
3550 break;
3551 case TA_FW_TYPE_PSP_RAS:
3552 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3553 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3554 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
3555 break;
3556 case TA_FW_TYPE_PSP_HDCP:
3557 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3558 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3559 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
3560 break;
3561 case TA_FW_TYPE_PSP_DTM:
3562 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3563 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3564 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
3565 break;
3566 case TA_FW_TYPE_PSP_RAP:
3567 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3568 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3569 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
3570 break;
3571 case TA_FW_TYPE_PSP_SECUREDISPLAY:
3572 psp->securedisplay_context.context.bin_desc.fw_version =
3573 le32_to_cpu(desc->fw_version);
3574 psp->securedisplay_context.context.bin_desc.size_bytes =
3575 le32_to_cpu(desc->size_bytes);
3576 psp->securedisplay_context.context.bin_desc.start_addr =
3577 ucode_start_addr;
3578 break;
3579 default:
3580 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3581 break;
3582 }
3583
3584 return 0;
3585 }
3586
parse_ta_v1_microcode(struct psp_context * psp)3587 static int parse_ta_v1_microcode(struct psp_context *psp)
3588 {
3589 const struct ta_firmware_header_v1_0 *ta_hdr;
3590 struct amdgpu_device *adev = psp->adev;
3591
3592 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3593
3594 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3595 return -EINVAL;
3596
3597 adev->psp.xgmi_context.context.bin_desc.fw_version =
3598 le32_to_cpu(ta_hdr->xgmi.fw_version);
3599 adev->psp.xgmi_context.context.bin_desc.size_bytes =
3600 le32_to_cpu(ta_hdr->xgmi.size_bytes);
3601 adev->psp.xgmi_context.context.bin_desc.start_addr =
3602 (uint8_t *)ta_hdr +
3603 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3604
3605 adev->psp.ras_context.context.bin_desc.fw_version =
3606 le32_to_cpu(ta_hdr->ras.fw_version);
3607 adev->psp.ras_context.context.bin_desc.size_bytes =
3608 le32_to_cpu(ta_hdr->ras.size_bytes);
3609 adev->psp.ras_context.context.bin_desc.start_addr =
3610 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3611 le32_to_cpu(ta_hdr->ras.offset_bytes);
3612
3613 adev->psp.hdcp_context.context.bin_desc.fw_version =
3614 le32_to_cpu(ta_hdr->hdcp.fw_version);
3615 adev->psp.hdcp_context.context.bin_desc.size_bytes =
3616 le32_to_cpu(ta_hdr->hdcp.size_bytes);
3617 adev->psp.hdcp_context.context.bin_desc.start_addr =
3618 (uint8_t *)ta_hdr +
3619 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3620
3621 adev->psp.dtm_context.context.bin_desc.fw_version =
3622 le32_to_cpu(ta_hdr->dtm.fw_version);
3623 adev->psp.dtm_context.context.bin_desc.size_bytes =
3624 le32_to_cpu(ta_hdr->dtm.size_bytes);
3625 adev->psp.dtm_context.context.bin_desc.start_addr =
3626 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3627 le32_to_cpu(ta_hdr->dtm.offset_bytes);
3628
3629 adev->psp.securedisplay_context.context.bin_desc.fw_version =
3630 le32_to_cpu(ta_hdr->securedisplay.fw_version);
3631 adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3632 le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3633 adev->psp.securedisplay_context.context.bin_desc.start_addr =
3634 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3635 le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3636
3637 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3638
3639 return 0;
3640 }
3641
parse_ta_v2_microcode(struct psp_context * psp)3642 static int parse_ta_v2_microcode(struct psp_context *psp)
3643 {
3644 const struct ta_firmware_header_v2_0 *ta_hdr;
3645 struct amdgpu_device *adev = psp->adev;
3646 int err = 0;
3647 int ta_index = 0;
3648
3649 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3650
3651 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3652 return -EINVAL;
3653
3654 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3655 dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3656 return -EINVAL;
3657 }
3658
3659 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3660 err = parse_ta_bin_descriptor(psp,
3661 &ta_hdr->ta_fw_bin[ta_index],
3662 ta_hdr);
3663 if (err)
3664 return err;
3665 }
3666
3667 return 0;
3668 }
3669
psp_init_ta_microcode(struct psp_context * psp,const char * chip_name)3670 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3671 {
3672 const struct common_firmware_header *hdr;
3673 struct amdgpu_device *adev = psp->adev;
3674 int err;
3675
3676 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name);
3677 if (err)
3678 return err;
3679
3680 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3681 switch (le16_to_cpu(hdr->header_version_major)) {
3682 case 1:
3683 err = parse_ta_v1_microcode(psp);
3684 break;
3685 case 2:
3686 err = parse_ta_v2_microcode(psp);
3687 break;
3688 default:
3689 dev_err(adev->dev, "unsupported TA header version\n");
3690 err = -EINVAL;
3691 }
3692
3693 if (err)
3694 amdgpu_ucode_release(&adev->psp.ta_fw);
3695
3696 return err;
3697 }
3698
psp_init_cap_microcode(struct psp_context * psp,const char * chip_name)3699 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3700 {
3701 struct amdgpu_device *adev = psp->adev;
3702 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3703 struct amdgpu_firmware_info *info = NULL;
3704 int err = 0;
3705
3706 if (!amdgpu_sriov_vf(adev)) {
3707 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3708 return -EINVAL;
3709 }
3710
3711 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, "amdgpu/%s_cap.bin", chip_name);
3712 if (err) {
3713 if (err == -ENODEV) {
3714 dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3715 err = 0;
3716 goto out;
3717 }
3718 dev_err(adev->dev, "fail to initialize cap microcode\n");
3719 }
3720
3721 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3722 info->ucode_id = AMDGPU_UCODE_ID_CAP;
3723 info->fw = adev->psp.cap_fw;
3724 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3725 adev->psp.cap_fw->data;
3726 adev->firmware.fw_size += ALIGN(
3727 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3728 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3729 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3730 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3731
3732 return 0;
3733
3734 out:
3735 amdgpu_ucode_release(&adev->psp.cap_fw);
3736 return err;
3737 }
3738
psp_set_clockgating_state(void * handle,enum amd_clockgating_state state)3739 static int psp_set_clockgating_state(void *handle,
3740 enum amd_clockgating_state state)
3741 {
3742 return 0;
3743 }
3744
psp_set_powergating_state(void * handle,enum amd_powergating_state state)3745 static int psp_set_powergating_state(void *handle,
3746 enum amd_powergating_state state)
3747 {
3748 return 0;
3749 }
3750
psp_usbc_pd_fw_sysfs_read(struct device * dev,struct device_attribute * attr,char * buf)3751 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3752 struct device_attribute *attr,
3753 char *buf)
3754 {
3755 struct drm_device *ddev = dev_get_drvdata(dev);
3756 struct amdgpu_device *adev = drm_to_adev(ddev);
3757 uint32_t fw_ver;
3758 int ret;
3759
3760 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3761 dev_info(adev->dev, "PSP block is not ready yet\n.");
3762 return -EBUSY;
3763 }
3764
3765 mutex_lock(&adev->psp.mutex);
3766 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3767 mutex_unlock(&adev->psp.mutex);
3768
3769 if (ret) {
3770 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
3771 return ret;
3772 }
3773
3774 return sysfs_emit(buf, "%x\n", fw_ver);
3775 }
3776
psp_usbc_pd_fw_sysfs_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3777 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3778 struct device_attribute *attr,
3779 const char *buf,
3780 size_t count)
3781 {
3782 struct drm_device *ddev = dev_get_drvdata(dev);
3783 struct amdgpu_device *adev = drm_to_adev(ddev);
3784 int ret, idx;
3785 const struct firmware *usbc_pd_fw;
3786 struct amdgpu_bo *fw_buf_bo = NULL;
3787 uint64_t fw_pri_mc_addr;
3788 void *fw_pri_cpu_addr;
3789
3790 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3791 dev_err(adev->dev, "PSP block is not ready yet.");
3792 return -EBUSY;
3793 }
3794
3795 if (!drm_dev_enter(ddev, &idx))
3796 return -ENODEV;
3797
3798 ret = amdgpu_ucode_request(adev, &usbc_pd_fw, "amdgpu/%s", buf);
3799 if (ret)
3800 goto fail;
3801
3802 /* LFB address which is aligned to 1MB boundary per PSP request */
3803 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3804 AMDGPU_GEM_DOMAIN_VRAM |
3805 AMDGPU_GEM_DOMAIN_GTT,
3806 &fw_buf_bo, &fw_pri_mc_addr,
3807 &fw_pri_cpu_addr);
3808 if (ret)
3809 goto rel_buf;
3810
3811 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3812
3813 mutex_lock(&adev->psp.mutex);
3814 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3815 mutex_unlock(&adev->psp.mutex);
3816
3817 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3818
3819 rel_buf:
3820 amdgpu_ucode_release(&usbc_pd_fw);
3821 fail:
3822 if (ret) {
3823 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
3824 count = ret;
3825 }
3826
3827 drm_dev_exit(idx);
3828 return count;
3829 }
3830
psp_copy_fw(struct psp_context * psp,uint8_t * start_addr,uint32_t bin_size)3831 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3832 {
3833 int idx;
3834
3835 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
3836 return;
3837
3838 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3839 memcpy(psp->fw_pri_buf, start_addr, bin_size);
3840
3841 drm_dev_exit(idx);
3842 }
3843
3844 /**
3845 * DOC: usbc_pd_fw
3846 * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3847 * this file will trigger the update process.
3848 */
3849 static DEVICE_ATTR(usbc_pd_fw, 0644,
3850 psp_usbc_pd_fw_sysfs_read,
3851 psp_usbc_pd_fw_sysfs_write);
3852
is_psp_fw_valid(struct psp_bin_desc bin)3853 int is_psp_fw_valid(struct psp_bin_desc bin)
3854 {
3855 return bin.size_bytes;
3856 }
3857
amdgpu_psp_vbflash_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buffer,loff_t pos,size_t count)3858 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
3859 struct bin_attribute *bin_attr,
3860 char *buffer, loff_t pos, size_t count)
3861 {
3862 struct device *dev = kobj_to_dev(kobj);
3863 struct drm_device *ddev = dev_get_drvdata(dev);
3864 struct amdgpu_device *adev = drm_to_adev(ddev);
3865
3866 adev->psp.vbflash_done = false;
3867
3868 /* Safeguard against memory drain */
3869 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
3870 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
3871 kvfree(adev->psp.vbflash_tmp_buf);
3872 adev->psp.vbflash_tmp_buf = NULL;
3873 adev->psp.vbflash_image_size = 0;
3874 return -ENOMEM;
3875 }
3876
3877 /* TODO Just allocate max for now and optimize to realloc later if needed */
3878 if (!adev->psp.vbflash_tmp_buf) {
3879 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
3880 if (!adev->psp.vbflash_tmp_buf)
3881 return -ENOMEM;
3882 }
3883
3884 mutex_lock(&adev->psp.mutex);
3885 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
3886 adev->psp.vbflash_image_size += count;
3887 mutex_unlock(&adev->psp.mutex);
3888
3889 dev_dbg(adev->dev, "IFWI staged for update\n");
3890
3891 return count;
3892 }
3893
amdgpu_psp_vbflash_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buffer,loff_t pos,size_t count)3894 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
3895 struct bin_attribute *bin_attr, char *buffer,
3896 loff_t pos, size_t count)
3897 {
3898 struct device *dev = kobj_to_dev(kobj);
3899 struct drm_device *ddev = dev_get_drvdata(dev);
3900 struct amdgpu_device *adev = drm_to_adev(ddev);
3901 struct amdgpu_bo *fw_buf_bo = NULL;
3902 uint64_t fw_pri_mc_addr;
3903 void *fw_pri_cpu_addr;
3904 int ret;
3905
3906 if (adev->psp.vbflash_image_size == 0)
3907 return -EINVAL;
3908
3909 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
3910
3911 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
3912 AMDGPU_GPU_PAGE_SIZE,
3913 AMDGPU_GEM_DOMAIN_VRAM,
3914 &fw_buf_bo,
3915 &fw_pri_mc_addr,
3916 &fw_pri_cpu_addr);
3917 if (ret)
3918 goto rel_buf;
3919
3920 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
3921
3922 mutex_lock(&adev->psp.mutex);
3923 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
3924 mutex_unlock(&adev->psp.mutex);
3925
3926 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3927
3928 rel_buf:
3929 kvfree(adev->psp.vbflash_tmp_buf);
3930 adev->psp.vbflash_tmp_buf = NULL;
3931 adev->psp.vbflash_image_size = 0;
3932
3933 if (ret) {
3934 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
3935 return ret;
3936 }
3937
3938 dev_dbg(adev->dev, "PSP IFWI flash process done\n");
3939 return 0;
3940 }
3941
3942 /**
3943 * DOC: psp_vbflash
3944 * Writing to this file will stage an IFWI for update. Reading from this file
3945 * will trigger the update process.
3946 */
3947 static struct bin_attribute psp_vbflash_bin_attr = {
3948 .attr = {.name = "psp_vbflash", .mode = 0660},
3949 .size = 0,
3950 .write = amdgpu_psp_vbflash_write,
3951 .read = amdgpu_psp_vbflash_read,
3952 };
3953
3954 /**
3955 * DOC: psp_vbflash_status
3956 * The status of the flash process.
3957 * 0: IFWI flash not complete.
3958 * 1: IFWI flash complete.
3959 */
amdgpu_psp_vbflash_status(struct device * dev,struct device_attribute * attr,char * buf)3960 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
3961 struct device_attribute *attr,
3962 char *buf)
3963 {
3964 struct drm_device *ddev = dev_get_drvdata(dev);
3965 struct amdgpu_device *adev = drm_to_adev(ddev);
3966 uint32_t vbflash_status;
3967
3968 vbflash_status = psp_vbflash_status(&adev->psp);
3969 if (!adev->psp.vbflash_done)
3970 vbflash_status = 0;
3971 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
3972 vbflash_status = 1;
3973
3974 return sysfs_emit(buf, "0x%x\n", vbflash_status);
3975 }
3976 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
3977
3978 static struct bin_attribute *bin_flash_attrs[] = {
3979 &psp_vbflash_bin_attr,
3980 NULL
3981 };
3982
3983 static struct attribute *flash_attrs[] = {
3984 &dev_attr_psp_vbflash_status.attr,
3985 &dev_attr_usbc_pd_fw.attr,
3986 NULL
3987 };
3988
amdgpu_flash_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)3989 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
3990 {
3991 struct device *dev = kobj_to_dev(kobj);
3992 struct drm_device *ddev = dev_get_drvdata(dev);
3993 struct amdgpu_device *adev = drm_to_adev(ddev);
3994
3995 if (attr == &dev_attr_usbc_pd_fw.attr)
3996 return adev->psp.sup_pd_fw_up ? 0660 : 0;
3997
3998 return adev->psp.sup_ifwi_up ? 0440 : 0;
3999 }
4000
amdgpu_bin_flash_attr_is_visible(struct kobject * kobj,struct bin_attribute * attr,int idx)4001 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
4002 struct bin_attribute *attr,
4003 int idx)
4004 {
4005 struct device *dev = kobj_to_dev(kobj);
4006 struct drm_device *ddev = dev_get_drvdata(dev);
4007 struct amdgpu_device *adev = drm_to_adev(ddev);
4008
4009 return adev->psp.sup_ifwi_up ? 0660 : 0;
4010 }
4011
4012 const struct attribute_group amdgpu_flash_attr_group = {
4013 .attrs = flash_attrs,
4014 .bin_attrs = bin_flash_attrs,
4015 .is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4016 .is_visible = amdgpu_flash_attr_is_visible,
4017 };
4018
4019 const struct amd_ip_funcs psp_ip_funcs = {
4020 .name = "psp",
4021 .early_init = psp_early_init,
4022 .late_init = NULL,
4023 .sw_init = psp_sw_init,
4024 .sw_fini = psp_sw_fini,
4025 .hw_init = psp_hw_init,
4026 .hw_fini = psp_hw_fini,
4027 .suspend = psp_suspend,
4028 .resume = psp_resume,
4029 .is_idle = NULL,
4030 .check_soft_reset = NULL,
4031 .wait_for_idle = NULL,
4032 .soft_reset = NULL,
4033 .set_clockgating_state = psp_set_clockgating_state,
4034 .set_powergating_state = psp_set_powergating_state,
4035 };
4036
4037 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4038 .type = AMD_IP_BLOCK_TYPE_PSP,
4039 .major = 3,
4040 .minor = 1,
4041 .rev = 0,
4042 .funcs = &psp_ip_funcs,
4043 };
4044
4045 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4046 .type = AMD_IP_BLOCK_TYPE_PSP,
4047 .major = 10,
4048 .minor = 0,
4049 .rev = 0,
4050 .funcs = &psp_ip_funcs,
4051 };
4052
4053 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4054 .type = AMD_IP_BLOCK_TYPE_PSP,
4055 .major = 11,
4056 .minor = 0,
4057 .rev = 0,
4058 .funcs = &psp_ip_funcs,
4059 };
4060
4061 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4062 .type = AMD_IP_BLOCK_TYPE_PSP,
4063 .major = 11,
4064 .minor = 0,
4065 .rev = 8,
4066 .funcs = &psp_ip_funcs,
4067 };
4068
4069 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4070 .type = AMD_IP_BLOCK_TYPE_PSP,
4071 .major = 12,
4072 .minor = 0,
4073 .rev = 0,
4074 .funcs = &psp_ip_funcs,
4075 };
4076
4077 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4078 .type = AMD_IP_BLOCK_TYPE_PSP,
4079 .major = 13,
4080 .minor = 0,
4081 .rev = 0,
4082 .funcs = &psp_ip_funcs,
4083 };
4084
4085 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4086 .type = AMD_IP_BLOCK_TYPE_PSP,
4087 .major = 13,
4088 .minor = 0,
4089 .rev = 4,
4090 .funcs = &psp_ip_funcs,
4091 };
4092
4093 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4094 .type = AMD_IP_BLOCK_TYPE_PSP,
4095 .major = 14,
4096 .minor = 0,
4097 .rev = 0,
4098 .funcs = &psp_ip_funcs,
4099 };
4100