1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34
amdgpu_mes_doorbell_process_slice(struct amdgpu_device * adev)35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 PAGE_SIZE);
40 }
41
amdgpu_mes_doorbell_init(struct amdgpu_device * adev)42 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
43 {
44 int i;
45 struct amdgpu_mes *mes = &adev->mes;
46
47 /* Bitmap for dynamic allocation of kernel doorbells */
48 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
49 if (!mes->doorbell_bitmap) {
50 dev_err(adev->dev, "Failed to allocate MES doorbell bitmap\n");
51 return -ENOMEM;
52 }
53
54 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
55 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
56 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
57 set_bit(i, mes->doorbell_bitmap);
58 }
59
60 return 0;
61 }
62
amdgpu_mes_event_log_init(struct amdgpu_device * adev)63 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
64 {
65 int r;
66
67 if (!amdgpu_mes_log_enable)
68 return 0;
69
70 r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
71 AMDGPU_GEM_DOMAIN_VRAM,
72 &adev->mes.event_log_gpu_obj,
73 &adev->mes.event_log_gpu_addr,
74 &adev->mes.event_log_cpu_addr);
75 if (r) {
76 dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
77 return r;
78 }
79
80 memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
81
82 return 0;
83
84 }
85
amdgpu_mes_doorbell_free(struct amdgpu_device * adev)86 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
87 {
88 bitmap_free(adev->mes.doorbell_bitmap);
89 }
90
amdgpu_mes_init(struct amdgpu_device * adev)91 int amdgpu_mes_init(struct amdgpu_device *adev)
92 {
93 int i, r, num_pipes;
94
95 adev->mes.adev = adev;
96
97 idr_init(&adev->mes.pasid_idr);
98 idr_init(&adev->mes.gang_id_idr);
99 idr_init(&adev->mes.queue_id_idr);
100 ida_init(&adev->mes.doorbell_ida);
101 spin_lock_init(&adev->mes.queue_id_lock);
102 mutex_init(&adev->mes.mutex_hidden);
103
104 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
105 spin_lock_init(&adev->mes.ring_lock[i]);
106
107 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
108 adev->mes.vmid_mask_mmhub = 0xffffff00;
109 adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00;
110
111 num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
112 if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
113 dev_warn(adev->dev, "more gfx pipes than supported by MES! (%d vs %d)\n",
114 num_pipes, AMDGPU_MES_MAX_GFX_PIPES);
115
116 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) {
117 if (i >= num_pipes)
118 break;
119 if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
120 IP_VERSION(12, 0, 0))
121 /*
122 * GFX V12 has only one GFX pipe, but 8 queues in it.
123 * GFX pipe 0 queue 0 is being used by Kernel queue.
124 * Set GFX pipe 0 queue 1-7 for MES scheduling
125 * mask = 1111 1110b
126 */
127 adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0xFF : 0xFE;
128 else
129 /*
130 * GFX pipe 0 queue 0 is being used by Kernel queue.
131 * Set GFX pipe 0 queue 1 for MES scheduling
132 * mask = 10b
133 */
134 adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0x3 : 0x2;
135 }
136
137 num_pipes = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec;
138 if (num_pipes > AMDGPU_MES_MAX_COMPUTE_PIPES)
139 dev_warn(adev->dev, "more compute pipes than supported by MES! (%d vs %d)\n",
140 num_pipes, AMDGPU_MES_MAX_COMPUTE_PIPES);
141
142 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
143 if (i >= num_pipes)
144 break;
145 adev->mes.compute_hqd_mask[i] = adev->gfx.disable_kq ? 0xF : 0xC;
146 }
147
148 num_pipes = adev->sdma.num_instances;
149 if (num_pipes > AMDGPU_MES_MAX_SDMA_PIPES)
150 dev_warn(adev->dev, "more SDMA pipes than supported by MES! (%d vs %d)\n",
151 num_pipes, AMDGPU_MES_MAX_SDMA_PIPES);
152
153 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
154 if (i >= num_pipes)
155 break;
156 adev->mes.sdma_hqd_mask[i] = 0xfc;
157 }
158
159 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
160 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
161 if (r) {
162 dev_err(adev->dev,
163 "(%d) ring trail_fence_offs wb alloc failed\n",
164 r);
165 goto error;
166 }
167 adev->mes.sch_ctx_gpu_addr[i] =
168 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
169 adev->mes.sch_ctx_ptr[i] =
170 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
171
172 r = amdgpu_device_wb_get(adev,
173 &adev->mes.query_status_fence_offs[i]);
174 if (r) {
175 dev_err(adev->dev,
176 "(%d) query_status_fence_offs wb alloc failed\n",
177 r);
178 goto error;
179 }
180 adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
181 (adev->mes.query_status_fence_offs[i] * 4);
182 adev->mes.query_status_fence_ptr[i] =
183 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
184 }
185
186 r = amdgpu_mes_doorbell_init(adev);
187 if (r)
188 goto error;
189
190 r = amdgpu_mes_event_log_init(adev);
191 if (r)
192 goto error_doorbell;
193
194 if (adev->mes.hung_queue_db_array_size) {
195 r = amdgpu_bo_create_kernel(adev,
196 adev->mes.hung_queue_db_array_size * sizeof(u32),
197 PAGE_SIZE,
198 AMDGPU_GEM_DOMAIN_GTT,
199 &adev->mes.hung_queue_db_array_gpu_obj,
200 &adev->mes.hung_queue_db_array_gpu_addr,
201 &adev->mes.hung_queue_db_array_cpu_addr);
202 if (r) {
203 dev_warn(adev->dev, "failed to create MES hung db array buffer (%d)", r);
204 goto error_doorbell;
205 }
206 }
207
208 return 0;
209
210 error_doorbell:
211 amdgpu_mes_doorbell_free(adev);
212 error:
213 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
214 if (adev->mes.sch_ctx_ptr[i])
215 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
216 if (adev->mes.query_status_fence_ptr[i])
217 amdgpu_device_wb_free(adev,
218 adev->mes.query_status_fence_offs[i]);
219 }
220
221 idr_destroy(&adev->mes.pasid_idr);
222 idr_destroy(&adev->mes.gang_id_idr);
223 idr_destroy(&adev->mes.queue_id_idr);
224 ida_destroy(&adev->mes.doorbell_ida);
225 mutex_destroy(&adev->mes.mutex_hidden);
226 return r;
227 }
228
amdgpu_mes_fini(struct amdgpu_device * adev)229 void amdgpu_mes_fini(struct amdgpu_device *adev)
230 {
231 int i;
232
233 amdgpu_bo_free_kernel(&adev->mes.hung_queue_db_array_gpu_obj,
234 &adev->mes.hung_queue_db_array_gpu_addr,
235 &adev->mes.hung_queue_db_array_cpu_addr);
236
237 amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
238 &adev->mes.event_log_gpu_addr,
239 &adev->mes.event_log_cpu_addr);
240
241 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
242 if (adev->mes.sch_ctx_ptr[i])
243 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
244 if (adev->mes.query_status_fence_ptr[i])
245 amdgpu_device_wb_free(adev,
246 adev->mes.query_status_fence_offs[i]);
247 }
248
249 amdgpu_mes_doorbell_free(adev);
250
251 idr_destroy(&adev->mes.pasid_idr);
252 idr_destroy(&adev->mes.gang_id_idr);
253 idr_destroy(&adev->mes.queue_id_idr);
254 ida_destroy(&adev->mes.doorbell_ida);
255 mutex_destroy(&adev->mes.mutex_hidden);
256 }
257
amdgpu_mes_suspend(struct amdgpu_device * adev)258 int amdgpu_mes_suspend(struct amdgpu_device *adev)
259 {
260 struct mes_suspend_gang_input input;
261 int r;
262
263 if (!amdgpu_mes_suspend_resume_all_supported(adev))
264 return 0;
265
266 memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
267 input.suspend_all_gangs = 1;
268
269 /*
270 * Avoid taking any other locks under MES lock to avoid circular
271 * lock dependencies.
272 */
273 amdgpu_mes_lock(&adev->mes);
274 r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
275 amdgpu_mes_unlock(&adev->mes);
276 if (r)
277 dev_err(adev->dev, "failed to suspend all gangs");
278
279 return r;
280 }
281
amdgpu_mes_resume(struct amdgpu_device * adev)282 int amdgpu_mes_resume(struct amdgpu_device *adev)
283 {
284 struct mes_resume_gang_input input;
285 int r;
286
287 if (!amdgpu_mes_suspend_resume_all_supported(adev))
288 return 0;
289
290 memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
291 input.resume_all_gangs = 1;
292
293 /*
294 * Avoid taking any other locks under MES lock to avoid circular
295 * lock dependencies.
296 */
297 amdgpu_mes_lock(&adev->mes);
298 r = adev->mes.funcs->resume_gang(&adev->mes, &input);
299 amdgpu_mes_unlock(&adev->mes);
300 if (r)
301 dev_err(adev->dev, "failed to resume all gangs");
302
303 return r;
304 }
305
amdgpu_mes_map_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring)306 int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
307 struct amdgpu_ring *ring)
308 {
309 struct mes_map_legacy_queue_input queue_input;
310 int r;
311
312 memset(&queue_input, 0, sizeof(queue_input));
313
314 queue_input.queue_type = ring->funcs->type;
315 queue_input.doorbell_offset = ring->doorbell_index;
316 queue_input.pipe_id = ring->pipe;
317 queue_input.queue_id = ring->queue;
318 queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
319 queue_input.wptr_addr = ring->wptr_gpu_addr;
320
321 amdgpu_mes_lock(&adev->mes);
322 r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
323 amdgpu_mes_unlock(&adev->mes);
324 if (r)
325 dev_err(adev->dev, "failed to map legacy queue\n");
326
327 return r;
328 }
329
amdgpu_mes_unmap_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)330 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
331 struct amdgpu_ring *ring,
332 enum amdgpu_unmap_queues_action action,
333 u64 gpu_addr, u64 seq)
334 {
335 struct mes_unmap_legacy_queue_input queue_input;
336 int r;
337
338 queue_input.action = action;
339 queue_input.queue_type = ring->funcs->type;
340 queue_input.doorbell_offset = ring->doorbell_index;
341 queue_input.pipe_id = ring->pipe;
342 queue_input.queue_id = ring->queue;
343 queue_input.trail_fence_addr = gpu_addr;
344 queue_input.trail_fence_data = seq;
345
346 amdgpu_mes_lock(&adev->mes);
347 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
348 amdgpu_mes_unlock(&adev->mes);
349 if (r)
350 dev_err(adev->dev, "failed to unmap legacy queue\n");
351
352 return r;
353 }
354
amdgpu_mes_reset_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int vmid,bool use_mmio)355 int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
356 struct amdgpu_ring *ring,
357 unsigned int vmid,
358 bool use_mmio)
359 {
360 struct mes_reset_queue_input queue_input;
361 int r;
362
363 memset(&queue_input, 0, sizeof(queue_input));
364
365 queue_input.queue_type = ring->funcs->type;
366 queue_input.doorbell_offset = ring->doorbell_index;
367 queue_input.me_id = ring->me;
368 queue_input.pipe_id = ring->pipe;
369 queue_input.queue_id = ring->queue;
370 queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0;
371 queue_input.wptr_addr = ring->wptr_gpu_addr;
372 queue_input.vmid = vmid;
373 queue_input.use_mmio = use_mmio;
374 queue_input.is_kq = true;
375 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX)
376 queue_input.legacy_gfx = true;
377
378 amdgpu_mes_lock(&adev->mes);
379 r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
380 amdgpu_mes_unlock(&adev->mes);
381 if (r)
382 dev_err(adev->dev, "failed to reset legacy queue\n");
383
384 return r;
385 }
386
amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device * adev)387 int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev)
388 {
389 return adev->mes.hung_queue_db_array_size;
390 }
391
amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device * adev,int queue_type,bool detect_only,unsigned int * hung_db_num,u32 * hung_db_array)392 int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
393 int queue_type,
394 bool detect_only,
395 unsigned int *hung_db_num,
396 u32 *hung_db_array)
397
398 {
399 struct mes_detect_and_reset_queue_input input;
400 u32 *db_array = adev->mes.hung_queue_db_array_cpu_addr;
401 int r, i;
402
403 if (!hung_db_num || !hung_db_array)
404 return -EINVAL;
405
406 if ((queue_type != AMDGPU_RING_TYPE_GFX) &&
407 (queue_type != AMDGPU_RING_TYPE_COMPUTE) &&
408 (queue_type != AMDGPU_RING_TYPE_SDMA))
409 return -EINVAL;
410
411 /* Clear the doorbell array before detection */
412 memset(adev->mes.hung_queue_db_array_cpu_addr, AMDGPU_MES_INVALID_DB_OFFSET,
413 adev->mes.hung_queue_db_array_size * sizeof(u32));
414 input.queue_type = queue_type;
415 input.detect_only = detect_only;
416
417 r = adev->mes.funcs->detect_and_reset_hung_queues(&adev->mes,
418 &input);
419 if (r) {
420 dev_err(adev->dev, "failed to detect and reset\n");
421 } else {
422 *hung_db_num = 0;
423 for (i = 0; i < adev->mes.hung_queue_hqd_info_offset; i++) {
424 if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) {
425 hung_db_array[i] = db_array[i];
426 *hung_db_num += 1;
427 }
428 }
429
430 /*
431 * TODO: return HQD info for MES scheduled user compute queue reset cases
432 * stored in hung_db_array hqd info offset to full array size
433 */
434 }
435
436 return r;
437 }
438
amdgpu_mes_rreg(struct amdgpu_device * adev,uint32_t reg)439 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
440 {
441 struct mes_misc_op_input op_input;
442 int r, val = 0;
443 uint32_t addr_offset = 0;
444 uint64_t read_val_gpu_addr;
445 uint32_t *read_val_ptr;
446
447 if (amdgpu_device_wb_get(adev, &addr_offset)) {
448 dev_err(adev->dev, "critical bug! too many mes readers\n");
449 goto error;
450 }
451 read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
452 read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset];
453 op_input.op = MES_MISC_OP_READ_REG;
454 op_input.read_reg.reg_offset = reg;
455 op_input.read_reg.buffer_addr = read_val_gpu_addr;
456
457 if (!adev->mes.funcs->misc_op) {
458 dev_err(adev->dev, "mes rreg is not supported!\n");
459 goto error;
460 }
461
462 amdgpu_mes_lock(&adev->mes);
463 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
464 amdgpu_mes_unlock(&adev->mes);
465 if (r)
466 dev_err(adev->dev, "failed to read reg (0x%x)\n", reg);
467 else
468 val = *(read_val_ptr);
469
470 error:
471 if (addr_offset)
472 amdgpu_device_wb_free(adev, addr_offset);
473 return val;
474 }
475
amdgpu_mes_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t val)476 int amdgpu_mes_wreg(struct amdgpu_device *adev,
477 uint32_t reg, uint32_t val)
478 {
479 struct mes_misc_op_input op_input;
480 int r;
481
482 op_input.op = MES_MISC_OP_WRITE_REG;
483 op_input.write_reg.reg_offset = reg;
484 op_input.write_reg.reg_value = val;
485
486 if (!adev->mes.funcs->misc_op) {
487 dev_err(adev->dev, "mes wreg is not supported!\n");
488 r = -EINVAL;
489 goto error;
490 }
491
492 amdgpu_mes_lock(&adev->mes);
493 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
494 amdgpu_mes_unlock(&adev->mes);
495 if (r)
496 dev_err(adev->dev, "failed to write reg (0x%x)\n", reg);
497
498 error:
499 return r;
500 }
501
amdgpu_mes_reg_write_reg_wait(struct amdgpu_device * adev,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)502 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
503 uint32_t reg0, uint32_t reg1,
504 uint32_t ref, uint32_t mask)
505 {
506 struct mes_misc_op_input op_input;
507 int r;
508
509 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
510 op_input.wrm_reg.reg0 = reg0;
511 op_input.wrm_reg.reg1 = reg1;
512 op_input.wrm_reg.ref = ref;
513 op_input.wrm_reg.mask = mask;
514
515 if (!adev->mes.funcs->misc_op) {
516 dev_err(adev->dev, "mes reg_write_reg_wait is not supported!\n");
517 r = -EINVAL;
518 goto error;
519 }
520
521 amdgpu_mes_lock(&adev->mes);
522 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
523 amdgpu_mes_unlock(&adev->mes);
524 if (r)
525 dev_err(adev->dev, "failed to reg_write_reg_wait\n");
526
527 error:
528 return r;
529 }
530
amdgpu_mes_set_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr,uint32_t spi_gdbg_per_vmid_cntl,const uint32_t * tcp_watch_cntl,uint32_t flags,bool trap_en)531 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
532 uint64_t process_context_addr,
533 uint32_t spi_gdbg_per_vmid_cntl,
534 const uint32_t *tcp_watch_cntl,
535 uint32_t flags,
536 bool trap_en)
537 {
538 struct mes_misc_op_input op_input = {0};
539 int r;
540
541 if (!adev->mes.funcs->misc_op) {
542 dev_err(adev->dev,
543 "mes set shader debugger is not supported!\n");
544 return -EINVAL;
545 }
546
547 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
548 op_input.set_shader_debugger.process_context_addr = process_context_addr;
549 op_input.set_shader_debugger.flags.u32all = flags;
550
551 /* use amdgpu mes_flush_shader_debugger instead */
552 if (op_input.set_shader_debugger.flags.process_ctx_flush)
553 return -EINVAL;
554
555 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
556 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
557 sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
558
559 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
560 AMDGPU_MES_API_VERSION_SHIFT) >= 14)
561 op_input.set_shader_debugger.trap_en = trap_en;
562
563 amdgpu_mes_lock(&adev->mes);
564
565 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
566 if (r)
567 dev_err(adev->dev, "failed to set_shader_debugger\n");
568
569 amdgpu_mes_unlock(&adev->mes);
570
571 return r;
572 }
573
amdgpu_mes_flush_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr)574 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
575 uint64_t process_context_addr)
576 {
577 struct mes_misc_op_input op_input = {0};
578 int r;
579
580 if (!adev->mes.funcs->misc_op) {
581 dev_err(adev->dev,
582 "mes flush shader debugger is not supported!\n");
583 return -EINVAL;
584 }
585
586 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
587 op_input.set_shader_debugger.process_context_addr = process_context_addr;
588 op_input.set_shader_debugger.flags.process_ctx_flush = true;
589
590 amdgpu_mes_lock(&adev->mes);
591
592 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
593 if (r)
594 dev_err(adev->dev, "failed to set_shader_debugger\n");
595
596 amdgpu_mes_unlock(&adev->mes);
597
598 return r;
599 }
600
amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device * adev,enum amdgpu_mes_priority_level prio)601 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
602 enum amdgpu_mes_priority_level prio)
603 {
604 return adev->mes.aggregated_doorbells[prio];
605 }
606
amdgpu_mes_init_microcode(struct amdgpu_device * adev,int pipe)607 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
608 {
609 const struct mes_firmware_header_v1_0 *mes_hdr;
610 struct amdgpu_firmware_info *info;
611 char ucode_prefix[30];
612 char fw_name[50];
613 bool need_retry = false;
614 u32 *ucode_ptr;
615 int r;
616
617 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
618 sizeof(ucode_prefix));
619 if (adev->enable_uni_mes) {
620 snprintf(fw_name, sizeof(fw_name),
621 "amdgpu/%s_uni_mes.bin", ucode_prefix);
622 } else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
623 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
624 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
625 ucode_prefix,
626 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
627 need_retry = true;
628 } else {
629 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
630 ucode_prefix,
631 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
632 }
633
634 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], AMDGPU_UCODE_REQUIRED,
635 "%s", fw_name);
636 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
637 dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
638 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
639 AMDGPU_UCODE_REQUIRED,
640 "amdgpu/%s_mes.bin", ucode_prefix);
641 }
642
643 if (r)
644 goto out;
645
646 mes_hdr = (const struct mes_firmware_header_v1_0 *)
647 adev->mes.fw[pipe]->data;
648 adev->mes.uc_start_addr[pipe] =
649 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
650 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
651 adev->mes.data_start_addr[pipe] =
652 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
653 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
654 ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data +
655 sizeof(union amdgpu_firmware_header));
656 adev->mes.fw_version[pipe] =
657 le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK;
658
659 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
660 int ucode, ucode_data;
661
662 if (pipe == AMDGPU_MES_SCHED_PIPE) {
663 ucode = AMDGPU_UCODE_ID_CP_MES;
664 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
665 } else {
666 ucode = AMDGPU_UCODE_ID_CP_MES1;
667 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
668 }
669
670 info = &adev->firmware.ucode[ucode];
671 info->ucode_id = ucode;
672 info->fw = adev->mes.fw[pipe];
673 adev->firmware.fw_size +=
674 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
675 PAGE_SIZE);
676
677 info = &adev->firmware.ucode[ucode_data];
678 info->ucode_id = ucode_data;
679 info->fw = adev->mes.fw[pipe];
680 adev->firmware.fw_size +=
681 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
682 PAGE_SIZE);
683 }
684
685 return 0;
686 out:
687 amdgpu_ucode_release(&adev->mes.fw[pipe]);
688 return r;
689 }
690
amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device * adev)691 bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
692 {
693 uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
694
695 return ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
696 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
697 mes_rev >= 0x63) ||
698 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0));
699 }
700
701 /* Fix me -- node_id is used to identify the correct MES instances in the future */
amdgpu_mes_set_enforce_isolation(struct amdgpu_device * adev,uint32_t node_id,bool enable)702 static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
703 uint32_t node_id, bool enable)
704 {
705 struct mes_misc_op_input op_input = {0};
706 int r;
707
708 op_input.op = MES_MISC_OP_CHANGE_CONFIG;
709 op_input.change_config.option.limit_single_process = enable ? 1 : 0;
710
711 if (!adev->mes.funcs->misc_op) {
712 dev_err(adev->dev, "mes change config is not supported!\n");
713 r = -EINVAL;
714 goto error;
715 }
716
717 amdgpu_mes_lock(&adev->mes);
718 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
719 amdgpu_mes_unlock(&adev->mes);
720 if (r)
721 dev_err(adev->dev, "failed to change_config.\n");
722
723 error:
724 return r;
725 }
726
amdgpu_mes_update_enforce_isolation(struct amdgpu_device * adev)727 int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
728 {
729 int i, r = 0;
730
731 if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
732 mutex_lock(&adev->enforce_isolation_mutex);
733 for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
734 if (adev->enforce_isolation[i] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
735 r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
736 else
737 r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
738 }
739 mutex_unlock(&adev->enforce_isolation_mutex);
740 }
741 return r;
742 }
743
744 #if defined(CONFIG_DEBUG_FS)
745
amdgpu_debugfs_mes_event_log_show(struct seq_file * m,void * unused)746 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
747 {
748 struct amdgpu_device *adev = m->private;
749 uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
750
751 seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
752 mem, adev->mes.event_log_size, false);
753
754 return 0;
755 }
756
757 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
758
759 #endif
760
amdgpu_debugfs_mes_event_log_init(struct amdgpu_device * adev)761 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
762 {
763
764 #if defined(CONFIG_DEBUG_FS)
765 struct drm_minor *minor = adev_to_drm(adev)->primary;
766 struct dentry *root = minor->debugfs_root;
767 if (adev->enable_mes && amdgpu_mes_log_enable)
768 debugfs_create_file("amdgpu_mes_event_log", 0444, root,
769 adev, &amdgpu_debugfs_mes_event_log_fops);
770
771 #endif
772 }
773