1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34
amdgpu_mes_doorbell_process_slice(struct amdgpu_device * adev)35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 PAGE_SIZE);
40 }
41
amdgpu_mes_kernel_doorbell_get(struct amdgpu_device * adev,int ip_type,uint64_t * doorbell_index)42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 int ip_type, uint64_t *doorbell_index)
44 {
45 unsigned int offset, found;
46 struct amdgpu_mes *mes = &adev->mes;
47
48 if (ip_type == AMDGPU_RING_TYPE_SDMA)
49 offset = adev->doorbell_index.sdma_engine[0];
50 else
51 offset = 0;
52
53 found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
54 if (found >= mes->num_mes_dbs) {
55 DRM_WARN("No doorbell available\n");
56 return -ENOSPC;
57 }
58
59 set_bit(found, mes->doorbell_bitmap);
60
61 /* Get the absolute doorbell index on BAR */
62 *doorbell_index = mes->db_start_dw_offset + found * 2;
63 return 0;
64 }
65
amdgpu_mes_kernel_doorbell_free(struct amdgpu_device * adev,uint32_t doorbell_index)66 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
67 uint32_t doorbell_index)
68 {
69 unsigned int old, rel_index;
70 struct amdgpu_mes *mes = &adev->mes;
71
72 /* Find the relative index of the doorbell in this object */
73 rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
74 old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
75 WARN_ON(!old);
76 }
77
amdgpu_mes_doorbell_init(struct amdgpu_device * adev)78 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
79 {
80 int i;
81 struct amdgpu_mes *mes = &adev->mes;
82
83 /* Bitmap for dynamic allocation of kernel doorbells */
84 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
85 if (!mes->doorbell_bitmap) {
86 DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
87 return -ENOMEM;
88 }
89
90 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
91 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
92 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
93 set_bit(i, mes->doorbell_bitmap);
94 }
95
96 return 0;
97 }
98
amdgpu_mes_event_log_init(struct amdgpu_device * adev)99 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
100 {
101 int r;
102
103 if (!amdgpu_mes_log_enable)
104 return 0;
105
106 r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
107 AMDGPU_GEM_DOMAIN_VRAM,
108 &adev->mes.event_log_gpu_obj,
109 &adev->mes.event_log_gpu_addr,
110 &adev->mes.event_log_cpu_addr);
111 if (r) {
112 dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
113 return r;
114 }
115
116 memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
117
118 return 0;
119
120 }
121
amdgpu_mes_doorbell_free(struct amdgpu_device * adev)122 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
123 {
124 bitmap_free(adev->mes.doorbell_bitmap);
125 }
126
amdgpu_mes_init(struct amdgpu_device * adev)127 int amdgpu_mes_init(struct amdgpu_device *adev)
128 {
129 int i, r;
130
131 adev->mes.adev = adev;
132
133 idr_init(&adev->mes.pasid_idr);
134 idr_init(&adev->mes.gang_id_idr);
135 idr_init(&adev->mes.queue_id_idr);
136 ida_init(&adev->mes.doorbell_ida);
137 spin_lock_init(&adev->mes.queue_id_lock);
138 mutex_init(&adev->mes.mutex_hidden);
139
140 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
141 spin_lock_init(&adev->mes.ring_lock[i]);
142
143 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
144 adev->mes.vmid_mask_mmhub = 0xffffff00;
145 adev->mes.vmid_mask_gfxhub = 0xffffff00;
146
147 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
148 /* use only 1st MEC pipes */
149 if (i >= adev->gfx.mec.num_pipe_per_mec)
150 continue;
151 adev->mes.compute_hqd_mask[i] = 0xc;
152 }
153
154 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
155 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
156
157 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
158 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
159 IP_VERSION(6, 0, 0))
160 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
161 /* zero sdma_hqd_mask for non-existent engine */
162 else if (adev->sdma.num_instances == 1)
163 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
164 else
165 adev->mes.sdma_hqd_mask[i] = 0xfc;
166 }
167
168 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
169 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
170 if (r) {
171 dev_err(adev->dev,
172 "(%d) ring trail_fence_offs wb alloc failed\n",
173 r);
174 goto error;
175 }
176 adev->mes.sch_ctx_gpu_addr[i] =
177 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
178 adev->mes.sch_ctx_ptr[i] =
179 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
180
181 r = amdgpu_device_wb_get(adev,
182 &adev->mes.query_status_fence_offs[i]);
183 if (r) {
184 dev_err(adev->dev,
185 "(%d) query_status_fence_offs wb alloc failed\n",
186 r);
187 goto error;
188 }
189 adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
190 (adev->mes.query_status_fence_offs[i] * 4);
191 adev->mes.query_status_fence_ptr[i] =
192 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
193 }
194
195 r = amdgpu_mes_doorbell_init(adev);
196 if (r)
197 goto error;
198
199 r = amdgpu_mes_event_log_init(adev);
200 if (r)
201 goto error_doorbell;
202
203 return 0;
204
205 error_doorbell:
206 amdgpu_mes_doorbell_free(adev);
207 error:
208 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
209 if (adev->mes.sch_ctx_ptr[i])
210 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
211 if (adev->mes.query_status_fence_ptr[i])
212 amdgpu_device_wb_free(adev,
213 adev->mes.query_status_fence_offs[i]);
214 }
215
216 idr_destroy(&adev->mes.pasid_idr);
217 idr_destroy(&adev->mes.gang_id_idr);
218 idr_destroy(&adev->mes.queue_id_idr);
219 ida_destroy(&adev->mes.doorbell_ida);
220 mutex_destroy(&adev->mes.mutex_hidden);
221 return r;
222 }
223
amdgpu_mes_fini(struct amdgpu_device * adev)224 void amdgpu_mes_fini(struct amdgpu_device *adev)
225 {
226 int i;
227
228 amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
229 &adev->mes.event_log_gpu_addr,
230 &adev->mes.event_log_cpu_addr);
231
232 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
233 if (adev->mes.sch_ctx_ptr[i])
234 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
235 if (adev->mes.query_status_fence_ptr[i])
236 amdgpu_device_wb_free(adev,
237 adev->mes.query_status_fence_offs[i]);
238 }
239
240 amdgpu_mes_doorbell_free(adev);
241
242 idr_destroy(&adev->mes.pasid_idr);
243 idr_destroy(&adev->mes.gang_id_idr);
244 idr_destroy(&adev->mes.queue_id_idr);
245 ida_destroy(&adev->mes.doorbell_ida);
246 mutex_destroy(&adev->mes.mutex_hidden);
247 }
248
amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue * q)249 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
250 {
251 amdgpu_bo_free_kernel(&q->mqd_obj,
252 &q->mqd_gpu_addr,
253 &q->mqd_cpu_ptr);
254 }
255
amdgpu_mes_create_process(struct amdgpu_device * adev,int pasid,struct amdgpu_vm * vm)256 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
257 struct amdgpu_vm *vm)
258 {
259 struct amdgpu_mes_process *process;
260 int r;
261
262 /* allocate the mes process buffer */
263 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
264 if (!process) {
265 DRM_ERROR("no more memory to create mes process\n");
266 return -ENOMEM;
267 }
268
269 /* allocate the process context bo and map it */
270 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
271 AMDGPU_GEM_DOMAIN_GTT,
272 &process->proc_ctx_bo,
273 &process->proc_ctx_gpu_addr,
274 &process->proc_ctx_cpu_ptr);
275 if (r) {
276 DRM_ERROR("failed to allocate process context bo\n");
277 goto clean_up_memory;
278 }
279 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
280
281 /*
282 * Avoid taking any other locks under MES lock to avoid circular
283 * lock dependencies.
284 */
285 amdgpu_mes_lock(&adev->mes);
286
287 /* add the mes process to idr list */
288 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
289 GFP_KERNEL);
290 if (r < 0) {
291 DRM_ERROR("failed to lock pasid=%d\n", pasid);
292 goto clean_up_ctx;
293 }
294
295 INIT_LIST_HEAD(&process->gang_list);
296 process->vm = vm;
297 process->pasid = pasid;
298 process->process_quantum = adev->mes.default_process_quantum;
299 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
300
301 amdgpu_mes_unlock(&adev->mes);
302 return 0;
303
304 clean_up_ctx:
305 amdgpu_mes_unlock(&adev->mes);
306 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
307 &process->proc_ctx_gpu_addr,
308 &process->proc_ctx_cpu_ptr);
309 clean_up_memory:
310 kfree(process);
311 return r;
312 }
313
amdgpu_mes_destroy_process(struct amdgpu_device * adev,int pasid)314 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
315 {
316 struct amdgpu_mes_process *process;
317 struct amdgpu_mes_gang *gang, *tmp1;
318 struct amdgpu_mes_queue *queue, *tmp2;
319 struct mes_remove_queue_input queue_input;
320 unsigned long flags;
321 int r;
322
323 /*
324 * Avoid taking any other locks under MES lock to avoid circular
325 * lock dependencies.
326 */
327 amdgpu_mes_lock(&adev->mes);
328
329 process = idr_find(&adev->mes.pasid_idr, pasid);
330 if (!process) {
331 DRM_WARN("pasid %d doesn't exist\n", pasid);
332 amdgpu_mes_unlock(&adev->mes);
333 return;
334 }
335
336 /* Remove all queues from hardware */
337 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
338 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
339 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
340 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
341 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
342
343 queue_input.doorbell_offset = queue->doorbell_off;
344 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
345
346 r = adev->mes.funcs->remove_hw_queue(&adev->mes,
347 &queue_input);
348 if (r)
349 DRM_WARN("failed to remove hardware queue\n");
350 }
351
352 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
353 }
354
355 idr_remove(&adev->mes.pasid_idr, pasid);
356 amdgpu_mes_unlock(&adev->mes);
357
358 /* free all memory allocated by the process */
359 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
360 /* free all queues in the gang */
361 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
362 amdgpu_mes_queue_free_mqd(queue);
363 list_del(&queue->list);
364 kfree(queue);
365 }
366 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
367 &gang->gang_ctx_gpu_addr,
368 &gang->gang_ctx_cpu_ptr);
369 list_del(&gang->list);
370 kfree(gang);
371
372 }
373 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
374 &process->proc_ctx_gpu_addr,
375 &process->proc_ctx_cpu_ptr);
376 kfree(process);
377 }
378
amdgpu_mes_add_gang(struct amdgpu_device * adev,int pasid,struct amdgpu_mes_gang_properties * gprops,int * gang_id)379 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
380 struct amdgpu_mes_gang_properties *gprops,
381 int *gang_id)
382 {
383 struct amdgpu_mes_process *process;
384 struct amdgpu_mes_gang *gang;
385 int r;
386
387 /* allocate the mes gang buffer */
388 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
389 if (!gang) {
390 return -ENOMEM;
391 }
392
393 /* allocate the gang context bo and map it to cpu space */
394 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
395 AMDGPU_GEM_DOMAIN_GTT,
396 &gang->gang_ctx_bo,
397 &gang->gang_ctx_gpu_addr,
398 &gang->gang_ctx_cpu_ptr);
399 if (r) {
400 DRM_ERROR("failed to allocate process context bo\n");
401 goto clean_up_mem;
402 }
403 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
404
405 /*
406 * Avoid taking any other locks under MES lock to avoid circular
407 * lock dependencies.
408 */
409 amdgpu_mes_lock(&adev->mes);
410
411 process = idr_find(&adev->mes.pasid_idr, pasid);
412 if (!process) {
413 DRM_ERROR("pasid %d doesn't exist\n", pasid);
414 r = -EINVAL;
415 goto clean_up_ctx;
416 }
417
418 /* add the mes gang to idr list */
419 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
420 GFP_KERNEL);
421 if (r < 0) {
422 DRM_ERROR("failed to allocate idr for gang\n");
423 goto clean_up_ctx;
424 }
425
426 gang->gang_id = r;
427 *gang_id = r;
428
429 INIT_LIST_HEAD(&gang->queue_list);
430 gang->process = process;
431 gang->priority = gprops->priority;
432 gang->gang_quantum = gprops->gang_quantum ?
433 gprops->gang_quantum : adev->mes.default_gang_quantum;
434 gang->global_priority_level = gprops->global_priority_level;
435 gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
436 list_add_tail(&gang->list, &process->gang_list);
437
438 amdgpu_mes_unlock(&adev->mes);
439 return 0;
440
441 clean_up_ctx:
442 amdgpu_mes_unlock(&adev->mes);
443 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
444 &gang->gang_ctx_gpu_addr,
445 &gang->gang_ctx_cpu_ptr);
446 clean_up_mem:
447 kfree(gang);
448 return r;
449 }
450
amdgpu_mes_remove_gang(struct amdgpu_device * adev,int gang_id)451 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
452 {
453 struct amdgpu_mes_gang *gang;
454
455 /*
456 * Avoid taking any other locks under MES lock to avoid circular
457 * lock dependencies.
458 */
459 amdgpu_mes_lock(&adev->mes);
460
461 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
462 if (!gang) {
463 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
464 amdgpu_mes_unlock(&adev->mes);
465 return -EINVAL;
466 }
467
468 if (!list_empty(&gang->queue_list)) {
469 DRM_ERROR("queue list is not empty\n");
470 amdgpu_mes_unlock(&adev->mes);
471 return -EBUSY;
472 }
473
474 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
475 list_del(&gang->list);
476 amdgpu_mes_unlock(&adev->mes);
477
478 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
479 &gang->gang_ctx_gpu_addr,
480 &gang->gang_ctx_cpu_ptr);
481
482 kfree(gang);
483
484 return 0;
485 }
486
amdgpu_mes_suspend(struct amdgpu_device * adev)487 int amdgpu_mes_suspend(struct amdgpu_device *adev)
488 {
489 struct mes_suspend_gang_input input;
490 int r;
491
492 if (!amdgpu_mes_suspend_resume_all_supported(adev))
493 return 0;
494
495 memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
496 input.suspend_all_gangs = 1;
497
498 /*
499 * Avoid taking any other locks under MES lock to avoid circular
500 * lock dependencies.
501 */
502 amdgpu_mes_lock(&adev->mes);
503 r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
504 amdgpu_mes_unlock(&adev->mes);
505 if (r)
506 DRM_ERROR("failed to suspend all gangs");
507
508 return r;
509 }
510
amdgpu_mes_resume(struct amdgpu_device * adev)511 int amdgpu_mes_resume(struct amdgpu_device *adev)
512 {
513 struct mes_resume_gang_input input;
514 int r;
515
516 if (!amdgpu_mes_suspend_resume_all_supported(adev))
517 return 0;
518
519 memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
520 input.resume_all_gangs = 1;
521
522 /*
523 * Avoid taking any other locks under MES lock to avoid circular
524 * lock dependencies.
525 */
526 amdgpu_mes_lock(&adev->mes);
527 r = adev->mes.funcs->resume_gang(&adev->mes, &input);
528 amdgpu_mes_unlock(&adev->mes);
529 if (r)
530 DRM_ERROR("failed to resume all gangs");
531
532 return r;
533 }
534
amdgpu_mes_queue_alloc_mqd(struct amdgpu_device * adev,struct amdgpu_mes_queue * q,struct amdgpu_mes_queue_properties * p)535 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
536 struct amdgpu_mes_queue *q,
537 struct amdgpu_mes_queue_properties *p)
538 {
539 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
540 u32 mqd_size = mqd_mgr->mqd_size;
541 int r;
542
543 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
544 AMDGPU_GEM_DOMAIN_GTT,
545 &q->mqd_obj,
546 &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
547 if (r) {
548 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
549 return r;
550 }
551 memset(q->mqd_cpu_ptr, 0, mqd_size);
552
553 r = amdgpu_bo_reserve(q->mqd_obj, false);
554 if (unlikely(r != 0))
555 goto clean_up;
556
557 return 0;
558
559 clean_up:
560 amdgpu_bo_free_kernel(&q->mqd_obj,
561 &q->mqd_gpu_addr,
562 &q->mqd_cpu_ptr);
563 return r;
564 }
565
amdgpu_mes_queue_init_mqd(struct amdgpu_device * adev,struct amdgpu_mes_queue * q,struct amdgpu_mes_queue_properties * p)566 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
567 struct amdgpu_mes_queue *q,
568 struct amdgpu_mes_queue_properties *p)
569 {
570 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
571 struct amdgpu_mqd_prop mqd_prop = {0};
572
573 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
574 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
575 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
576 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
577 mqd_prop.queue_size = p->queue_size;
578 mqd_prop.use_doorbell = true;
579 mqd_prop.doorbell_index = p->doorbell_off;
580 mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
581 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
582 mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
583 mqd_prop.hqd_active = false;
584
585 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
586 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
587 mutex_lock(&adev->srbm_mutex);
588 amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
589 }
590
591 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
592
593 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
594 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
595 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
596 mutex_unlock(&adev->srbm_mutex);
597 }
598
599 amdgpu_bo_unreserve(q->mqd_obj);
600 }
601
amdgpu_mes_add_hw_queue(struct amdgpu_device * adev,int gang_id,struct amdgpu_mes_queue_properties * qprops,int * queue_id)602 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
603 struct amdgpu_mes_queue_properties *qprops,
604 int *queue_id)
605 {
606 struct amdgpu_mes_queue *queue;
607 struct amdgpu_mes_gang *gang;
608 struct mes_add_queue_input queue_input;
609 unsigned long flags;
610 int r;
611
612 memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
613
614 /* allocate the mes queue buffer */
615 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
616 if (!queue) {
617 DRM_ERROR("Failed to allocate memory for queue\n");
618 return -ENOMEM;
619 }
620
621 /* Allocate the queue mqd */
622 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
623 if (r)
624 goto clean_up_memory;
625
626 /*
627 * Avoid taking any other locks under MES lock to avoid circular
628 * lock dependencies.
629 */
630 amdgpu_mes_lock(&adev->mes);
631
632 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
633 if (!gang) {
634 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
635 r = -EINVAL;
636 goto clean_up_mqd;
637 }
638
639 /* add the mes gang to idr list */
640 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
641 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
642 GFP_ATOMIC);
643 if (r < 0) {
644 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
645 goto clean_up_mqd;
646 }
647 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
648 *queue_id = queue->queue_id = r;
649
650 /* allocate a doorbell index for the queue */
651 r = amdgpu_mes_kernel_doorbell_get(adev,
652 qprops->queue_type,
653 &qprops->doorbell_off);
654 if (r)
655 goto clean_up_queue_id;
656
657 /* initialize the queue mqd */
658 amdgpu_mes_queue_init_mqd(adev, queue, qprops);
659
660 /* add hw queue to mes */
661 queue_input.process_id = gang->process->pasid;
662
663 queue_input.page_table_base_addr =
664 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
665 adev->gmc.vram_start;
666
667 queue_input.process_va_start = 0;
668 queue_input.process_va_end =
669 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
670 queue_input.process_quantum = gang->process->process_quantum;
671 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
672 queue_input.gang_quantum = gang->gang_quantum;
673 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
674 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
675 queue_input.gang_global_priority_level = gang->global_priority_level;
676 queue_input.doorbell_offset = qprops->doorbell_off;
677 queue_input.mqd_addr = queue->mqd_gpu_addr;
678 queue_input.wptr_addr = qprops->wptr_gpu_addr;
679 queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
680 queue_input.queue_type = qprops->queue_type;
681 queue_input.paging = qprops->paging;
682 queue_input.is_kfd_process = 0;
683
684 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
685 if (r) {
686 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
687 qprops->doorbell_off);
688 goto clean_up_doorbell;
689 }
690
691 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
692 "queue type=%d, doorbell=0x%llx\n",
693 gang->process->pasid, gang_id, qprops->queue_type,
694 qprops->doorbell_off);
695
696 queue->ring = qprops->ring;
697 queue->doorbell_off = qprops->doorbell_off;
698 queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
699 queue->queue_type = qprops->queue_type;
700 queue->paging = qprops->paging;
701 queue->gang = gang;
702 queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
703 list_add_tail(&queue->list, &gang->queue_list);
704
705 amdgpu_mes_unlock(&adev->mes);
706 return 0;
707
708 clean_up_doorbell:
709 amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
710 clean_up_queue_id:
711 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
712 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
713 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
714 clean_up_mqd:
715 amdgpu_mes_unlock(&adev->mes);
716 amdgpu_mes_queue_free_mqd(queue);
717 clean_up_memory:
718 kfree(queue);
719 return r;
720 }
721
amdgpu_mes_remove_hw_queue(struct amdgpu_device * adev,int queue_id)722 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
723 {
724 unsigned long flags;
725 struct amdgpu_mes_queue *queue;
726 struct amdgpu_mes_gang *gang;
727 struct mes_remove_queue_input queue_input;
728 int r;
729
730 /*
731 * Avoid taking any other locks under MES lock to avoid circular
732 * lock dependencies.
733 */
734 amdgpu_mes_lock(&adev->mes);
735
736 /* remove the mes gang from idr list */
737 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
738
739 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
740 if (!queue) {
741 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
742 amdgpu_mes_unlock(&adev->mes);
743 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
744 return -EINVAL;
745 }
746
747 idr_remove(&adev->mes.queue_id_idr, queue_id);
748 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
749
750 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
751 queue->doorbell_off);
752
753 gang = queue->gang;
754 queue_input.doorbell_offset = queue->doorbell_off;
755 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
756
757 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
758 if (r)
759 DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
760 queue_id);
761
762 list_del(&queue->list);
763 amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
764 amdgpu_mes_unlock(&adev->mes);
765
766 amdgpu_mes_queue_free_mqd(queue);
767 kfree(queue);
768 return 0;
769 }
770
amdgpu_mes_reset_hw_queue(struct amdgpu_device * adev,int queue_id)771 int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
772 {
773 unsigned long flags;
774 struct amdgpu_mes_queue *queue;
775 struct amdgpu_mes_gang *gang;
776 struct mes_reset_queue_input queue_input;
777 int r;
778
779 /*
780 * Avoid taking any other locks under MES lock to avoid circular
781 * lock dependencies.
782 */
783 amdgpu_mes_lock(&adev->mes);
784
785 /* remove the mes gang from idr list */
786 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
787
788 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
789 if (!queue) {
790 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
791 amdgpu_mes_unlock(&adev->mes);
792 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
793 return -EINVAL;
794 }
795 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
796
797 DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
798 queue->doorbell_off);
799
800 gang = queue->gang;
801 queue_input.doorbell_offset = queue->doorbell_off;
802 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
803
804 r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
805 if (r)
806 DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
807 queue_id);
808
809 amdgpu_mes_unlock(&adev->mes);
810
811 return 0;
812 }
813
amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device * adev,int queue_type,int me_id,int pipe_id,int queue_id,int vmid)814 int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
815 int me_id, int pipe_id, int queue_id, int vmid)
816 {
817 struct mes_reset_queue_input queue_input;
818 int r;
819
820 queue_input.queue_type = queue_type;
821 queue_input.use_mmio = true;
822 queue_input.me_id = me_id;
823 queue_input.pipe_id = pipe_id;
824 queue_input.queue_id = queue_id;
825 queue_input.vmid = vmid;
826 r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
827 if (r)
828 DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
829 queue_id);
830 return r;
831 }
832
amdgpu_mes_map_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring)833 int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
834 struct amdgpu_ring *ring)
835 {
836 struct mes_map_legacy_queue_input queue_input;
837 int r;
838
839 memset(&queue_input, 0, sizeof(queue_input));
840
841 queue_input.queue_type = ring->funcs->type;
842 queue_input.doorbell_offset = ring->doorbell_index;
843 queue_input.pipe_id = ring->pipe;
844 queue_input.queue_id = ring->queue;
845 queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
846 queue_input.wptr_addr = ring->wptr_gpu_addr;
847
848 r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
849 if (r)
850 DRM_ERROR("failed to map legacy queue\n");
851
852 return r;
853 }
854
amdgpu_mes_unmap_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)855 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
856 struct amdgpu_ring *ring,
857 enum amdgpu_unmap_queues_action action,
858 u64 gpu_addr, u64 seq)
859 {
860 struct mes_unmap_legacy_queue_input queue_input;
861 int r;
862
863 queue_input.action = action;
864 queue_input.queue_type = ring->funcs->type;
865 queue_input.doorbell_offset = ring->doorbell_index;
866 queue_input.pipe_id = ring->pipe;
867 queue_input.queue_id = ring->queue;
868 queue_input.trail_fence_addr = gpu_addr;
869 queue_input.trail_fence_data = seq;
870
871 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
872 if (r)
873 DRM_ERROR("failed to unmap legacy queue\n");
874
875 return r;
876 }
877
amdgpu_mes_reset_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int vmid,bool use_mmio)878 int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
879 struct amdgpu_ring *ring,
880 unsigned int vmid,
881 bool use_mmio)
882 {
883 struct mes_reset_legacy_queue_input queue_input;
884 int r;
885
886 memset(&queue_input, 0, sizeof(queue_input));
887
888 queue_input.queue_type = ring->funcs->type;
889 queue_input.doorbell_offset = ring->doorbell_index;
890 queue_input.me_id = ring->me;
891 queue_input.pipe_id = ring->pipe;
892 queue_input.queue_id = ring->queue;
893 queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0;
894 queue_input.wptr_addr = ring->wptr_gpu_addr;
895 queue_input.vmid = vmid;
896 queue_input.use_mmio = use_mmio;
897
898 r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
899 if (r)
900 DRM_ERROR("failed to reset legacy queue\n");
901
902 return r;
903 }
904
amdgpu_mes_rreg(struct amdgpu_device * adev,uint32_t reg)905 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
906 {
907 struct mes_misc_op_input op_input;
908 int r, val = 0;
909 uint32_t addr_offset = 0;
910 uint64_t read_val_gpu_addr;
911 uint32_t *read_val_ptr;
912
913 if (amdgpu_device_wb_get(adev, &addr_offset)) {
914 DRM_ERROR("critical bug! too many mes readers\n");
915 goto error;
916 }
917 read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
918 read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset];
919 op_input.op = MES_MISC_OP_READ_REG;
920 op_input.read_reg.reg_offset = reg;
921 op_input.read_reg.buffer_addr = read_val_gpu_addr;
922
923 if (!adev->mes.funcs->misc_op) {
924 DRM_ERROR("mes rreg is not supported!\n");
925 goto error;
926 }
927
928 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
929 if (r)
930 DRM_ERROR("failed to read reg (0x%x)\n", reg);
931 else
932 val = *(read_val_ptr);
933
934 error:
935 if (addr_offset)
936 amdgpu_device_wb_free(adev, addr_offset);
937 return val;
938 }
939
amdgpu_mes_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t val)940 int amdgpu_mes_wreg(struct amdgpu_device *adev,
941 uint32_t reg, uint32_t val)
942 {
943 struct mes_misc_op_input op_input;
944 int r;
945
946 op_input.op = MES_MISC_OP_WRITE_REG;
947 op_input.write_reg.reg_offset = reg;
948 op_input.write_reg.reg_value = val;
949
950 if (!adev->mes.funcs->misc_op) {
951 DRM_ERROR("mes wreg is not supported!\n");
952 r = -EINVAL;
953 goto error;
954 }
955
956 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
957 if (r)
958 DRM_ERROR("failed to write reg (0x%x)\n", reg);
959
960 error:
961 return r;
962 }
963
amdgpu_mes_reg_write_reg_wait(struct amdgpu_device * adev,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)964 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
965 uint32_t reg0, uint32_t reg1,
966 uint32_t ref, uint32_t mask)
967 {
968 struct mes_misc_op_input op_input;
969 int r;
970
971 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
972 op_input.wrm_reg.reg0 = reg0;
973 op_input.wrm_reg.reg1 = reg1;
974 op_input.wrm_reg.ref = ref;
975 op_input.wrm_reg.mask = mask;
976
977 if (!adev->mes.funcs->misc_op) {
978 DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
979 r = -EINVAL;
980 goto error;
981 }
982
983 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
984 if (r)
985 DRM_ERROR("failed to reg_write_reg_wait\n");
986
987 error:
988 return r;
989 }
990
amdgpu_mes_reg_wait(struct amdgpu_device * adev,uint32_t reg,uint32_t val,uint32_t mask)991 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
992 uint32_t val, uint32_t mask)
993 {
994 struct mes_misc_op_input op_input;
995 int r;
996
997 op_input.op = MES_MISC_OP_WRM_REG_WAIT;
998 op_input.wrm_reg.reg0 = reg;
999 op_input.wrm_reg.ref = val;
1000 op_input.wrm_reg.mask = mask;
1001
1002 if (!adev->mes.funcs->misc_op) {
1003 DRM_ERROR("mes reg wait is not supported!\n");
1004 r = -EINVAL;
1005 goto error;
1006 }
1007
1008 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1009 if (r)
1010 DRM_ERROR("failed to reg_write_reg_wait\n");
1011
1012 error:
1013 return r;
1014 }
1015
amdgpu_mes_set_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr,uint32_t spi_gdbg_per_vmid_cntl,const uint32_t * tcp_watch_cntl,uint32_t flags,bool trap_en)1016 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
1017 uint64_t process_context_addr,
1018 uint32_t spi_gdbg_per_vmid_cntl,
1019 const uint32_t *tcp_watch_cntl,
1020 uint32_t flags,
1021 bool trap_en)
1022 {
1023 struct mes_misc_op_input op_input = {0};
1024 int r;
1025
1026 if (!adev->mes.funcs->misc_op) {
1027 DRM_ERROR("mes set shader debugger is not supported!\n");
1028 return -EINVAL;
1029 }
1030
1031 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1032 op_input.set_shader_debugger.process_context_addr = process_context_addr;
1033 op_input.set_shader_debugger.flags.u32all = flags;
1034
1035 /* use amdgpu mes_flush_shader_debugger instead */
1036 if (op_input.set_shader_debugger.flags.process_ctx_flush)
1037 return -EINVAL;
1038
1039 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
1040 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
1041 sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
1042
1043 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
1044 AMDGPU_MES_API_VERSION_SHIFT) >= 14)
1045 op_input.set_shader_debugger.trap_en = trap_en;
1046
1047 amdgpu_mes_lock(&adev->mes);
1048
1049 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1050 if (r)
1051 DRM_ERROR("failed to set_shader_debugger\n");
1052
1053 amdgpu_mes_unlock(&adev->mes);
1054
1055 return r;
1056 }
1057
amdgpu_mes_flush_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr)1058 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
1059 uint64_t process_context_addr)
1060 {
1061 struct mes_misc_op_input op_input = {0};
1062 int r;
1063
1064 if (!adev->mes.funcs->misc_op) {
1065 DRM_ERROR("mes flush shader debugger is not supported!\n");
1066 return -EINVAL;
1067 }
1068
1069 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1070 op_input.set_shader_debugger.process_context_addr = process_context_addr;
1071 op_input.set_shader_debugger.flags.process_ctx_flush = true;
1072
1073 amdgpu_mes_lock(&adev->mes);
1074
1075 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1076 if (r)
1077 DRM_ERROR("failed to set_shader_debugger\n");
1078
1079 amdgpu_mes_unlock(&adev->mes);
1080
1081 return r;
1082 }
1083
1084 static void
amdgpu_mes_ring_to_queue_props(struct amdgpu_device * adev,struct amdgpu_ring * ring,struct amdgpu_mes_queue_properties * props)1085 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
1086 struct amdgpu_ring *ring,
1087 struct amdgpu_mes_queue_properties *props)
1088 {
1089 props->queue_type = ring->funcs->type;
1090 props->hqd_base_gpu_addr = ring->gpu_addr;
1091 props->rptr_gpu_addr = ring->rptr_gpu_addr;
1092 props->wptr_gpu_addr = ring->wptr_gpu_addr;
1093 props->wptr_mc_addr =
1094 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
1095 props->queue_size = ring->ring_size;
1096 props->eop_gpu_addr = ring->eop_gpu_addr;
1097 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
1098 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
1099 props->paging = false;
1100 props->ring = ring;
1101 }
1102
1103 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
1104 do { \
1105 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
1106 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1107 _eng[ring->idx].slots[id_offs]); \
1108 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
1109 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1110 _eng[ring->idx].ring); \
1111 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
1112 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1113 _eng[ring->idx].ib); \
1114 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
1115 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1116 _eng[ring->idx].padding); \
1117 } while(0)
1118
amdgpu_mes_ctx_get_offs(struct amdgpu_ring * ring,unsigned int id_offs)1119 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1120 {
1121 switch (ring->funcs->type) {
1122 case AMDGPU_RING_TYPE_GFX:
1123 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1124 break;
1125 case AMDGPU_RING_TYPE_COMPUTE:
1126 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1127 break;
1128 case AMDGPU_RING_TYPE_SDMA:
1129 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1130 break;
1131 default:
1132 break;
1133 }
1134
1135 WARN_ON(1);
1136 return -EINVAL;
1137 }
1138
amdgpu_mes_add_ring(struct amdgpu_device * adev,int gang_id,int queue_type,int idx,struct amdgpu_mes_ctx_data * ctx_data,struct amdgpu_ring ** out)1139 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1140 int queue_type, int idx,
1141 struct amdgpu_mes_ctx_data *ctx_data,
1142 struct amdgpu_ring **out)
1143 {
1144 struct amdgpu_ring *ring;
1145 struct amdgpu_mes_gang *gang;
1146 struct amdgpu_mes_queue_properties qprops = {0};
1147 int r, queue_id, pasid;
1148
1149 /*
1150 * Avoid taking any other locks under MES lock to avoid circular
1151 * lock dependencies.
1152 */
1153 amdgpu_mes_lock(&adev->mes);
1154 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1155 if (!gang) {
1156 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1157 amdgpu_mes_unlock(&adev->mes);
1158 return -EINVAL;
1159 }
1160 pasid = gang->process->pasid;
1161
1162 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1163 if (!ring) {
1164 amdgpu_mes_unlock(&adev->mes);
1165 return -ENOMEM;
1166 }
1167
1168 ring->ring_obj = NULL;
1169 ring->use_doorbell = true;
1170 ring->is_mes_queue = true;
1171 ring->mes_ctx = ctx_data;
1172 ring->idx = idx;
1173 ring->no_scheduler = true;
1174
1175 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1176 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1177 compute[ring->idx].mec_hpd);
1178 ring->eop_gpu_addr =
1179 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1180 }
1181
1182 switch (queue_type) {
1183 case AMDGPU_RING_TYPE_GFX:
1184 ring->funcs = adev->gfx.gfx_ring[0].funcs;
1185 ring->me = adev->gfx.gfx_ring[0].me;
1186 ring->pipe = adev->gfx.gfx_ring[0].pipe;
1187 break;
1188 case AMDGPU_RING_TYPE_COMPUTE:
1189 ring->funcs = adev->gfx.compute_ring[0].funcs;
1190 ring->me = adev->gfx.compute_ring[0].me;
1191 ring->pipe = adev->gfx.compute_ring[0].pipe;
1192 break;
1193 case AMDGPU_RING_TYPE_SDMA:
1194 ring->funcs = adev->sdma.instance[0].ring.funcs;
1195 break;
1196 default:
1197 BUG();
1198 }
1199
1200 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1201 AMDGPU_RING_PRIO_DEFAULT, NULL);
1202 if (r) {
1203 amdgpu_mes_unlock(&adev->mes);
1204 goto clean_up_memory;
1205 }
1206
1207 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1208
1209 dma_fence_wait(gang->process->vm->last_update, false);
1210 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1211 amdgpu_mes_unlock(&adev->mes);
1212
1213 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1214 if (r)
1215 goto clean_up_ring;
1216
1217 ring->hw_queue_id = queue_id;
1218 ring->doorbell_index = qprops.doorbell_off;
1219
1220 if (queue_type == AMDGPU_RING_TYPE_GFX)
1221 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1222 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1223 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1224 queue_id);
1225 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1226 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1227 queue_id);
1228 else
1229 BUG();
1230
1231 *out = ring;
1232 return 0;
1233
1234 clean_up_ring:
1235 amdgpu_ring_fini(ring);
1236 clean_up_memory:
1237 kfree(ring);
1238 return r;
1239 }
1240
amdgpu_mes_remove_ring(struct amdgpu_device * adev,struct amdgpu_ring * ring)1241 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1242 struct amdgpu_ring *ring)
1243 {
1244 if (!ring)
1245 return;
1246
1247 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1248 del_timer_sync(&ring->fence_drv.fallback_timer);
1249 amdgpu_ring_fini(ring);
1250 kfree(ring);
1251 }
1252
amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device * adev,enum amdgpu_mes_priority_level prio)1253 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1254 enum amdgpu_mes_priority_level prio)
1255 {
1256 return adev->mes.aggregated_doorbells[prio];
1257 }
1258
amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device * adev,struct amdgpu_mes_ctx_data * ctx_data)1259 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1260 struct amdgpu_mes_ctx_data *ctx_data)
1261 {
1262 int r;
1263
1264 r = amdgpu_bo_create_kernel(adev,
1265 sizeof(struct amdgpu_mes_ctx_meta_data),
1266 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1267 &ctx_data->meta_data_obj,
1268 &ctx_data->meta_data_mc_addr,
1269 &ctx_data->meta_data_ptr);
1270 if (r) {
1271 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1272 return r;
1273 }
1274
1275 if (!ctx_data->meta_data_obj)
1276 return -ENOMEM;
1277
1278 memset(ctx_data->meta_data_ptr, 0,
1279 sizeof(struct amdgpu_mes_ctx_meta_data));
1280
1281 return 0;
1282 }
1283
amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data * ctx_data)1284 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1285 {
1286 if (ctx_data->meta_data_obj)
1287 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1288 &ctx_data->meta_data_mc_addr,
1289 &ctx_data->meta_data_ptr);
1290 }
1291
amdgpu_mes_ctx_map_meta_data(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_mes_ctx_data * ctx_data)1292 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1293 struct amdgpu_vm *vm,
1294 struct amdgpu_mes_ctx_data *ctx_data)
1295 {
1296 struct amdgpu_bo_va *bo_va;
1297 struct amdgpu_sync sync;
1298 struct drm_exec exec;
1299 int r;
1300
1301 amdgpu_sync_create(&sync);
1302
1303 drm_exec_init(&exec, 0, 0);
1304 drm_exec_until_all_locked(&exec) {
1305 r = drm_exec_lock_obj(&exec,
1306 &ctx_data->meta_data_obj->tbo.base);
1307 drm_exec_retry_on_contention(&exec);
1308 if (unlikely(r))
1309 goto error_fini_exec;
1310
1311 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1312 drm_exec_retry_on_contention(&exec);
1313 if (unlikely(r))
1314 goto error_fini_exec;
1315 }
1316
1317 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1318 if (!bo_va) {
1319 DRM_ERROR("failed to create bo_va for meta data BO\n");
1320 r = -ENOMEM;
1321 goto error_fini_exec;
1322 }
1323
1324 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1325 sizeof(struct amdgpu_mes_ctx_meta_data),
1326 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1327 AMDGPU_PTE_EXECUTABLE);
1328
1329 if (r) {
1330 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1331 goto error_del_bo_va;
1332 }
1333
1334 r = amdgpu_vm_bo_update(adev, bo_va, false);
1335 if (r) {
1336 DRM_ERROR("failed to do vm_bo_update on meta data\n");
1337 goto error_del_bo_va;
1338 }
1339 amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1340
1341 r = amdgpu_vm_update_pdes(adev, vm, false);
1342 if (r) {
1343 DRM_ERROR("failed to update pdes on meta data\n");
1344 goto error_del_bo_va;
1345 }
1346 amdgpu_sync_fence(&sync, vm->last_update);
1347
1348 amdgpu_sync_wait(&sync, false);
1349 drm_exec_fini(&exec);
1350
1351 amdgpu_sync_free(&sync);
1352 ctx_data->meta_data_va = bo_va;
1353 return 0;
1354
1355 error_del_bo_va:
1356 amdgpu_vm_bo_del(adev, bo_va);
1357
1358 error_fini_exec:
1359 drm_exec_fini(&exec);
1360 amdgpu_sync_free(&sync);
1361 return r;
1362 }
1363
amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device * adev,struct amdgpu_mes_ctx_data * ctx_data)1364 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1365 struct amdgpu_mes_ctx_data *ctx_data)
1366 {
1367 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1368 struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1369 struct amdgpu_vm *vm = bo_va->base.vm;
1370 struct dma_fence *fence;
1371 struct drm_exec exec;
1372 long r;
1373
1374 drm_exec_init(&exec, 0, 0);
1375 drm_exec_until_all_locked(&exec) {
1376 r = drm_exec_lock_obj(&exec,
1377 &ctx_data->meta_data_obj->tbo.base);
1378 drm_exec_retry_on_contention(&exec);
1379 if (unlikely(r))
1380 goto out_unlock;
1381
1382 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1383 drm_exec_retry_on_contention(&exec);
1384 if (unlikely(r))
1385 goto out_unlock;
1386 }
1387
1388 amdgpu_vm_bo_del(adev, bo_va);
1389 if (!amdgpu_vm_ready(vm))
1390 goto out_unlock;
1391
1392 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1393 &fence);
1394 if (r)
1395 goto out_unlock;
1396 if (fence) {
1397 amdgpu_bo_fence(bo, fence, true);
1398 fence = NULL;
1399 }
1400
1401 r = amdgpu_vm_clear_freed(adev, vm, &fence);
1402 if (r || !fence)
1403 goto out_unlock;
1404
1405 dma_fence_wait(fence, false);
1406 amdgpu_bo_fence(bo, fence, true);
1407 dma_fence_put(fence);
1408
1409 out_unlock:
1410 if (unlikely(r < 0))
1411 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1412 drm_exec_fini(&exec);
1413
1414 return r;
1415 }
1416
amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device * adev,int pasid,int * gang_id,int queue_type,int num_queue,struct amdgpu_ring ** added_rings,struct amdgpu_mes_ctx_data * ctx_data)1417 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1418 int pasid, int *gang_id,
1419 int queue_type, int num_queue,
1420 struct amdgpu_ring **added_rings,
1421 struct amdgpu_mes_ctx_data *ctx_data)
1422 {
1423 struct amdgpu_ring *ring;
1424 struct amdgpu_mes_gang_properties gprops = {0};
1425 int r, j;
1426
1427 /* create a gang for the process */
1428 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1429 gprops.gang_quantum = adev->mes.default_gang_quantum;
1430 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1431 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1432 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1433
1434 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1435 if (r) {
1436 DRM_ERROR("failed to add gang\n");
1437 return r;
1438 }
1439
1440 /* create queues for the gang */
1441 for (j = 0; j < num_queue; j++) {
1442 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1443 ctx_data, &ring);
1444 if (r) {
1445 DRM_ERROR("failed to add ring\n");
1446 break;
1447 }
1448
1449 DRM_INFO("ring %s was added\n", ring->name);
1450 added_rings[j] = ring;
1451 }
1452
1453 return 0;
1454 }
1455
amdgpu_mes_test_queues(struct amdgpu_ring ** added_rings)1456 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1457 {
1458 struct amdgpu_ring *ring;
1459 int i, r;
1460
1461 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1462 ring = added_rings[i];
1463 if (!ring)
1464 continue;
1465
1466 r = amdgpu_ring_test_helper(ring);
1467 if (r)
1468 return r;
1469
1470 r = amdgpu_ring_test_ib(ring, 1000 * 10);
1471 if (r) {
1472 DRM_DEV_ERROR(ring->adev->dev,
1473 "ring %s ib test failed (%d)\n",
1474 ring->name, r);
1475 return r;
1476 } else
1477 DRM_INFO("ring %s ib test pass\n", ring->name);
1478 }
1479
1480 return 0;
1481 }
1482
amdgpu_mes_self_test(struct amdgpu_device * adev)1483 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1484 {
1485 struct amdgpu_vm *vm = NULL;
1486 struct amdgpu_mes_ctx_data ctx_data = {0};
1487 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1488 int gang_ids[3] = {0};
1489 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1490 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1491 { AMDGPU_RING_TYPE_SDMA, 1} };
1492 int i, r, pasid, k = 0;
1493
1494 pasid = amdgpu_pasid_alloc(16);
1495 if (pasid < 0) {
1496 dev_warn(adev->dev, "No more PASIDs available!");
1497 pasid = 0;
1498 }
1499
1500 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1501 if (!vm) {
1502 r = -ENOMEM;
1503 goto error_pasid;
1504 }
1505
1506 r = amdgpu_vm_init(adev, vm, -1);
1507 if (r) {
1508 DRM_ERROR("failed to initialize vm\n");
1509 goto error_pasid;
1510 }
1511
1512 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1513 if (r) {
1514 DRM_ERROR("failed to alloc ctx meta data\n");
1515 goto error_fini;
1516 }
1517
1518 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
1519 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1520 if (r) {
1521 DRM_ERROR("failed to map ctx meta data\n");
1522 goto error_vm;
1523 }
1524
1525 r = amdgpu_mes_create_process(adev, pasid, vm);
1526 if (r) {
1527 DRM_ERROR("failed to create MES process\n");
1528 goto error_vm;
1529 }
1530
1531 for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1532 /* On GFX v10.3, fw hasn't supported to map sdma queue. */
1533 if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1534 IP_VERSION(10, 3, 0) &&
1535 amdgpu_ip_version(adev, GC_HWIP, 0) <
1536 IP_VERSION(11, 0, 0) &&
1537 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1538 continue;
1539
1540 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1541 &gang_ids[i],
1542 queue_types[i][0],
1543 queue_types[i][1],
1544 &added_rings[k],
1545 &ctx_data);
1546 if (r)
1547 goto error_queues;
1548
1549 k += queue_types[i][1];
1550 }
1551
1552 /* start ring test and ib test for MES queues */
1553 amdgpu_mes_test_queues(added_rings);
1554
1555 error_queues:
1556 /* remove all queues */
1557 for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1558 if (!added_rings[i])
1559 continue;
1560 amdgpu_mes_remove_ring(adev, added_rings[i]);
1561 }
1562
1563 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1564 if (!gang_ids[i])
1565 continue;
1566 amdgpu_mes_remove_gang(adev, gang_ids[i]);
1567 }
1568
1569 amdgpu_mes_destroy_process(adev, pasid);
1570
1571 error_vm:
1572 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1573
1574 error_fini:
1575 amdgpu_vm_fini(adev, vm);
1576
1577 error_pasid:
1578 if (pasid)
1579 amdgpu_pasid_free(pasid);
1580
1581 amdgpu_mes_ctx_free_meta_data(&ctx_data);
1582 kfree(vm);
1583 return 0;
1584 }
1585
amdgpu_mes_init_microcode(struct amdgpu_device * adev,int pipe)1586 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1587 {
1588 const struct mes_firmware_header_v1_0 *mes_hdr;
1589 struct amdgpu_firmware_info *info;
1590 char ucode_prefix[30];
1591 char fw_name[50];
1592 bool need_retry = false;
1593 u32 *ucode_ptr;
1594 int r;
1595
1596 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1597 sizeof(ucode_prefix));
1598 if (adev->enable_uni_mes) {
1599 snprintf(fw_name, sizeof(fw_name),
1600 "amdgpu/%s_uni_mes.bin", ucode_prefix);
1601 } else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1602 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
1603 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1604 ucode_prefix,
1605 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1606 need_retry = true;
1607 } else {
1608 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1609 ucode_prefix,
1610 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1611 }
1612
1613 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name);
1614 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1615 dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
1616 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1617 "amdgpu/%s_mes.bin", ucode_prefix);
1618 }
1619
1620 if (r)
1621 goto out;
1622
1623 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1624 adev->mes.fw[pipe]->data;
1625 adev->mes.uc_start_addr[pipe] =
1626 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1627 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1628 adev->mes.data_start_addr[pipe] =
1629 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1630 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1631 ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data +
1632 sizeof(union amdgpu_firmware_header));
1633 adev->mes.fw_version[pipe] =
1634 le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK;
1635
1636 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1637 int ucode, ucode_data;
1638
1639 if (pipe == AMDGPU_MES_SCHED_PIPE) {
1640 ucode = AMDGPU_UCODE_ID_CP_MES;
1641 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1642 } else {
1643 ucode = AMDGPU_UCODE_ID_CP_MES1;
1644 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1645 }
1646
1647 info = &adev->firmware.ucode[ucode];
1648 info->ucode_id = ucode;
1649 info->fw = adev->mes.fw[pipe];
1650 adev->firmware.fw_size +=
1651 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1652 PAGE_SIZE);
1653
1654 info = &adev->firmware.ucode[ucode_data];
1655 info->ucode_id = ucode_data;
1656 info->fw = adev->mes.fw[pipe];
1657 adev->firmware.fw_size +=
1658 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1659 PAGE_SIZE);
1660 }
1661
1662 return 0;
1663 out:
1664 amdgpu_ucode_release(&adev->mes.fw[pipe]);
1665 return r;
1666 }
1667
amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device * adev)1668 bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
1669 {
1670 uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
1671 bool is_supported = false;
1672
1673 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1674 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
1675 mes_rev >= 0x63)
1676 is_supported = true;
1677
1678 return is_supported;
1679 }
1680
1681 /* Fix me -- node_id is used to identify the correct MES instances in the future */
amdgpu_mes_set_enforce_isolation(struct amdgpu_device * adev,uint32_t node_id,bool enable)1682 int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable)
1683 {
1684 struct mes_misc_op_input op_input = {0};
1685 int r;
1686
1687 op_input.op = MES_MISC_OP_CHANGE_CONFIG;
1688 op_input.change_config.option.limit_single_process = enable ? 1 : 0;
1689
1690 if (!adev->mes.funcs->misc_op) {
1691 dev_err(adev->dev, "mes change config is not supported!\n");
1692 r = -EINVAL;
1693 goto error;
1694 }
1695
1696 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1697 if (r)
1698 dev_err(adev->dev, "failed to change_config.\n");
1699
1700 error:
1701 return r;
1702 }
1703
1704 #if defined(CONFIG_DEBUG_FS)
1705
amdgpu_debugfs_mes_event_log_show(struct seq_file * m,void * unused)1706 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1707 {
1708 struct amdgpu_device *adev = m->private;
1709 uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1710
1711 seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1712 mem, adev->mes.event_log_size, false);
1713
1714 return 0;
1715 }
1716
1717 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1718
1719 #endif
1720
amdgpu_debugfs_mes_event_log_init(struct amdgpu_device * adev)1721 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1722 {
1723
1724 #if defined(CONFIG_DEBUG_FS)
1725 struct drm_minor *minor = adev_to_drm(adev)->primary;
1726 struct dentry *root = minor->debugfs_root;
1727 if (adev->enable_mes && amdgpu_mes_log_enable)
1728 debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1729 adev, &amdgpu_debugfs_mes_event_log_fops);
1730
1731 #endif
1732 }
1733