xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c (revision 5e93d0e335e992066cf394c00808ee192da4ecf5)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26 
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31 
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34 
35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 		       PAGE_SIZE);
40 }
41 
42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 					 int ip_type, uint64_t *doorbell_index)
44 {
45 	unsigned int offset, found;
46 	struct amdgpu_mes *mes = &adev->mes;
47 
48 	if (ip_type == AMDGPU_RING_TYPE_SDMA)
49 		offset = adev->doorbell_index.sdma_engine[0];
50 	else
51 		offset = 0;
52 
53 	found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
54 	if (found >= mes->num_mes_dbs) {
55 		DRM_WARN("No doorbell available\n");
56 		return -ENOSPC;
57 	}
58 
59 	set_bit(found, mes->doorbell_bitmap);
60 
61 	/* Get the absolute doorbell index on BAR */
62 	*doorbell_index = mes->db_start_dw_offset + found * 2;
63 	return 0;
64 }
65 
66 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
67 					   uint32_t doorbell_index)
68 {
69 	unsigned int old, rel_index;
70 	struct amdgpu_mes *mes = &adev->mes;
71 
72 	/* Find the relative index of the doorbell in this object */
73 	rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
74 	old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
75 	WARN_ON(!old);
76 }
77 
78 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
79 {
80 	int i;
81 	struct amdgpu_mes *mes = &adev->mes;
82 
83 	/* Bitmap for dynamic allocation of kernel doorbells */
84 	mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
85 	if (!mes->doorbell_bitmap) {
86 		DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
87 		return -ENOMEM;
88 	}
89 
90 	mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
91 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
92 		adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
93 		set_bit(i, mes->doorbell_bitmap);
94 	}
95 
96 	return 0;
97 }
98 
99 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
100 {
101 	int r;
102 
103 	if (!amdgpu_mes_log_enable)
104 		return 0;
105 
106 	r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
107 				    AMDGPU_GEM_DOMAIN_VRAM,
108 				    &adev->mes.event_log_gpu_obj,
109 				    &adev->mes.event_log_gpu_addr,
110 				    &adev->mes.event_log_cpu_addr);
111 	if (r) {
112 		dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
113 		return r;
114 	}
115 
116 	memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
117 
118 	return  0;
119 
120 }
121 
122 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
123 {
124 	bitmap_free(adev->mes.doorbell_bitmap);
125 }
126 
127 int amdgpu_mes_init(struct amdgpu_device *adev)
128 {
129 	int i, r;
130 
131 	adev->mes.adev = adev;
132 
133 	idr_init(&adev->mes.pasid_idr);
134 	idr_init(&adev->mes.gang_id_idr);
135 	idr_init(&adev->mes.queue_id_idr);
136 	ida_init(&adev->mes.doorbell_ida);
137 	spin_lock_init(&adev->mes.queue_id_lock);
138 	mutex_init(&adev->mes.mutex_hidden);
139 
140 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
141 		spin_lock_init(&adev->mes.ring_lock[i]);
142 
143 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
144 	adev->mes.vmid_mask_mmhub = 0xffffff00;
145 	adev->mes.vmid_mask_gfxhub = 0xffffff00;
146 
147 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
148 		if (i >= (adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec))
149 			break;
150 		adev->mes.compute_hqd_mask[i] = 0xc;
151 	}
152 
153 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
154 		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
155 
156 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
157 		if (i >= adev->sdma.num_instances)
158 			break;
159 		adev->mes.sdma_hqd_mask[i] = 0xfc;
160 	}
161 
162 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
163 		r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
164 		if (r) {
165 			dev_err(adev->dev,
166 				"(%d) ring trail_fence_offs wb alloc failed\n",
167 				r);
168 			goto error;
169 		}
170 		adev->mes.sch_ctx_gpu_addr[i] =
171 			adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
172 		adev->mes.sch_ctx_ptr[i] =
173 			(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
174 
175 		r = amdgpu_device_wb_get(adev,
176 				 &adev->mes.query_status_fence_offs[i]);
177 		if (r) {
178 			dev_err(adev->dev,
179 			      "(%d) query_status_fence_offs wb alloc failed\n",
180 			      r);
181 			goto error;
182 		}
183 		adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
184 			(adev->mes.query_status_fence_offs[i] * 4);
185 		adev->mes.query_status_fence_ptr[i] =
186 			(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
187 	}
188 
189 	r = amdgpu_mes_doorbell_init(adev);
190 	if (r)
191 		goto error;
192 
193 	r = amdgpu_mes_event_log_init(adev);
194 	if (r)
195 		goto error_doorbell;
196 
197 	return 0;
198 
199 error_doorbell:
200 	amdgpu_mes_doorbell_free(adev);
201 error:
202 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
203 		if (adev->mes.sch_ctx_ptr[i])
204 			amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
205 		if (adev->mes.query_status_fence_ptr[i])
206 			amdgpu_device_wb_free(adev,
207 				      adev->mes.query_status_fence_offs[i]);
208 	}
209 
210 	idr_destroy(&adev->mes.pasid_idr);
211 	idr_destroy(&adev->mes.gang_id_idr);
212 	idr_destroy(&adev->mes.queue_id_idr);
213 	ida_destroy(&adev->mes.doorbell_ida);
214 	mutex_destroy(&adev->mes.mutex_hidden);
215 	return r;
216 }
217 
218 void amdgpu_mes_fini(struct amdgpu_device *adev)
219 {
220 	int i;
221 
222 	amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
223 			      &adev->mes.event_log_gpu_addr,
224 			      &adev->mes.event_log_cpu_addr);
225 
226 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
227 		if (adev->mes.sch_ctx_ptr[i])
228 			amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
229 		if (adev->mes.query_status_fence_ptr[i])
230 			amdgpu_device_wb_free(adev,
231 				      adev->mes.query_status_fence_offs[i]);
232 	}
233 
234 	amdgpu_mes_doorbell_free(adev);
235 
236 	idr_destroy(&adev->mes.pasid_idr);
237 	idr_destroy(&adev->mes.gang_id_idr);
238 	idr_destroy(&adev->mes.queue_id_idr);
239 	ida_destroy(&adev->mes.doorbell_ida);
240 	mutex_destroy(&adev->mes.mutex_hidden);
241 }
242 
243 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
244 {
245 	amdgpu_bo_free_kernel(&q->mqd_obj,
246 			      &q->mqd_gpu_addr,
247 			      &q->mqd_cpu_ptr);
248 }
249 
250 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
251 			      struct amdgpu_vm *vm)
252 {
253 	struct amdgpu_mes_process *process;
254 	int r;
255 
256 	/* allocate the mes process buffer */
257 	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
258 	if (!process) {
259 		DRM_ERROR("no more memory to create mes process\n");
260 		return -ENOMEM;
261 	}
262 
263 	/* allocate the process context bo and map it */
264 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
265 				    AMDGPU_GEM_DOMAIN_GTT,
266 				    &process->proc_ctx_bo,
267 				    &process->proc_ctx_gpu_addr,
268 				    &process->proc_ctx_cpu_ptr);
269 	if (r) {
270 		DRM_ERROR("failed to allocate process context bo\n");
271 		goto clean_up_memory;
272 	}
273 	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
274 
275 	/*
276 	 * Avoid taking any other locks under MES lock to avoid circular
277 	 * lock dependencies.
278 	 */
279 	amdgpu_mes_lock(&adev->mes);
280 
281 	/* add the mes process to idr list */
282 	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
283 		      GFP_KERNEL);
284 	if (r < 0) {
285 		DRM_ERROR("failed to lock pasid=%d\n", pasid);
286 		goto clean_up_ctx;
287 	}
288 
289 	INIT_LIST_HEAD(&process->gang_list);
290 	process->vm = vm;
291 	process->pasid = pasid;
292 	process->process_quantum = adev->mes.default_process_quantum;
293 	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
294 
295 	amdgpu_mes_unlock(&adev->mes);
296 	return 0;
297 
298 clean_up_ctx:
299 	amdgpu_mes_unlock(&adev->mes);
300 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
301 			      &process->proc_ctx_gpu_addr,
302 			      &process->proc_ctx_cpu_ptr);
303 clean_up_memory:
304 	kfree(process);
305 	return r;
306 }
307 
308 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
309 {
310 	struct amdgpu_mes_process *process;
311 	struct amdgpu_mes_gang *gang, *tmp1;
312 	struct amdgpu_mes_queue *queue, *tmp2;
313 	struct mes_remove_queue_input queue_input;
314 	unsigned long flags;
315 	int r;
316 
317 	/*
318 	 * Avoid taking any other locks under MES lock to avoid circular
319 	 * lock dependencies.
320 	 */
321 	amdgpu_mes_lock(&adev->mes);
322 
323 	process = idr_find(&adev->mes.pasid_idr, pasid);
324 	if (!process) {
325 		DRM_WARN("pasid %d doesn't exist\n", pasid);
326 		amdgpu_mes_unlock(&adev->mes);
327 		return;
328 	}
329 
330 	/* Remove all queues from hardware */
331 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
332 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
333 			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
334 			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
335 			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
336 
337 			queue_input.doorbell_offset = queue->doorbell_off;
338 			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
339 
340 			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
341 							     &queue_input);
342 			if (r)
343 				DRM_WARN("failed to remove hardware queue\n");
344 		}
345 
346 		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
347 	}
348 
349 	idr_remove(&adev->mes.pasid_idr, pasid);
350 	amdgpu_mes_unlock(&adev->mes);
351 
352 	/* free all memory allocated by the process */
353 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
354 		/* free all queues in the gang */
355 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
356 			amdgpu_mes_queue_free_mqd(queue);
357 			list_del(&queue->list);
358 			kfree(queue);
359 		}
360 		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
361 				      &gang->gang_ctx_gpu_addr,
362 				      &gang->gang_ctx_cpu_ptr);
363 		list_del(&gang->list);
364 		kfree(gang);
365 
366 	}
367 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
368 			      &process->proc_ctx_gpu_addr,
369 			      &process->proc_ctx_cpu_ptr);
370 	kfree(process);
371 }
372 
373 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
374 			struct amdgpu_mes_gang_properties *gprops,
375 			int *gang_id)
376 {
377 	struct amdgpu_mes_process *process;
378 	struct amdgpu_mes_gang *gang;
379 	int r;
380 
381 	/* allocate the mes gang buffer */
382 	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
383 	if (!gang) {
384 		return -ENOMEM;
385 	}
386 
387 	/* allocate the gang context bo and map it to cpu space */
388 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
389 				    AMDGPU_GEM_DOMAIN_GTT,
390 				    &gang->gang_ctx_bo,
391 				    &gang->gang_ctx_gpu_addr,
392 				    &gang->gang_ctx_cpu_ptr);
393 	if (r) {
394 		DRM_ERROR("failed to allocate process context bo\n");
395 		goto clean_up_mem;
396 	}
397 	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
398 
399 	/*
400 	 * Avoid taking any other locks under MES lock to avoid circular
401 	 * lock dependencies.
402 	 */
403 	amdgpu_mes_lock(&adev->mes);
404 
405 	process = idr_find(&adev->mes.pasid_idr, pasid);
406 	if (!process) {
407 		DRM_ERROR("pasid %d doesn't exist\n", pasid);
408 		r = -EINVAL;
409 		goto clean_up_ctx;
410 	}
411 
412 	/* add the mes gang to idr list */
413 	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
414 		      GFP_KERNEL);
415 	if (r < 0) {
416 		DRM_ERROR("failed to allocate idr for gang\n");
417 		goto clean_up_ctx;
418 	}
419 
420 	gang->gang_id = r;
421 	*gang_id = r;
422 
423 	INIT_LIST_HEAD(&gang->queue_list);
424 	gang->process = process;
425 	gang->priority = gprops->priority;
426 	gang->gang_quantum = gprops->gang_quantum ?
427 		gprops->gang_quantum : adev->mes.default_gang_quantum;
428 	gang->global_priority_level = gprops->global_priority_level;
429 	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
430 	list_add_tail(&gang->list, &process->gang_list);
431 
432 	amdgpu_mes_unlock(&adev->mes);
433 	return 0;
434 
435 clean_up_ctx:
436 	amdgpu_mes_unlock(&adev->mes);
437 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
438 			      &gang->gang_ctx_gpu_addr,
439 			      &gang->gang_ctx_cpu_ptr);
440 clean_up_mem:
441 	kfree(gang);
442 	return r;
443 }
444 
445 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
446 {
447 	struct amdgpu_mes_gang *gang;
448 
449 	/*
450 	 * Avoid taking any other locks under MES lock to avoid circular
451 	 * lock dependencies.
452 	 */
453 	amdgpu_mes_lock(&adev->mes);
454 
455 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
456 	if (!gang) {
457 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
458 		amdgpu_mes_unlock(&adev->mes);
459 		return -EINVAL;
460 	}
461 
462 	if (!list_empty(&gang->queue_list)) {
463 		DRM_ERROR("queue list is not empty\n");
464 		amdgpu_mes_unlock(&adev->mes);
465 		return -EBUSY;
466 	}
467 
468 	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
469 	list_del(&gang->list);
470 	amdgpu_mes_unlock(&adev->mes);
471 
472 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
473 			      &gang->gang_ctx_gpu_addr,
474 			      &gang->gang_ctx_cpu_ptr);
475 
476 	kfree(gang);
477 
478 	return 0;
479 }
480 
481 int amdgpu_mes_suspend(struct amdgpu_device *adev)
482 {
483 	struct mes_suspend_gang_input input;
484 	int r;
485 
486 	if (!amdgpu_mes_suspend_resume_all_supported(adev))
487 		return 0;
488 
489 	memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
490 	input.suspend_all_gangs = 1;
491 
492 	/*
493 	 * Avoid taking any other locks under MES lock to avoid circular
494 	 * lock dependencies.
495 	 */
496 	amdgpu_mes_lock(&adev->mes);
497 	r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
498 	amdgpu_mes_unlock(&adev->mes);
499 	if (r)
500 		DRM_ERROR("failed to suspend all gangs");
501 
502 	return r;
503 }
504 
505 int amdgpu_mes_resume(struct amdgpu_device *adev)
506 {
507 	struct mes_resume_gang_input input;
508 	int r;
509 
510 	if (!amdgpu_mes_suspend_resume_all_supported(adev))
511 		return 0;
512 
513 	memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
514 	input.resume_all_gangs = 1;
515 
516 	/*
517 	 * Avoid taking any other locks under MES lock to avoid circular
518 	 * lock dependencies.
519 	 */
520 	amdgpu_mes_lock(&adev->mes);
521 	r = adev->mes.funcs->resume_gang(&adev->mes, &input);
522 	amdgpu_mes_unlock(&adev->mes);
523 	if (r)
524 		DRM_ERROR("failed to resume all gangs");
525 
526 	return r;
527 }
528 
529 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
530 				     struct amdgpu_mes_queue *q,
531 				     struct amdgpu_mes_queue_properties *p)
532 {
533 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
534 	u32 mqd_size = mqd_mgr->mqd_size;
535 	int r;
536 
537 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
538 				    AMDGPU_GEM_DOMAIN_GTT,
539 				    &q->mqd_obj,
540 				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
541 	if (r) {
542 		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
543 		return r;
544 	}
545 	memset(q->mqd_cpu_ptr, 0, mqd_size);
546 
547 	r = amdgpu_bo_reserve(q->mqd_obj, false);
548 	if (unlikely(r != 0))
549 		goto clean_up;
550 
551 	return 0;
552 
553 clean_up:
554 	amdgpu_bo_free_kernel(&q->mqd_obj,
555 			      &q->mqd_gpu_addr,
556 			      &q->mqd_cpu_ptr);
557 	return r;
558 }
559 
560 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
561 				     struct amdgpu_mes_queue *q,
562 				     struct amdgpu_mes_queue_properties *p)
563 {
564 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
565 	struct amdgpu_mqd_prop mqd_prop = {0};
566 
567 	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
568 	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
569 	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
570 	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
571 	mqd_prop.queue_size = p->queue_size;
572 	mqd_prop.use_doorbell = true;
573 	mqd_prop.doorbell_index = p->doorbell_off;
574 	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
575 	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
576 	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
577 	mqd_prop.hqd_active = false;
578 
579 	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
580 	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
581 		mutex_lock(&adev->srbm_mutex);
582 		amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
583 	}
584 
585 	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
586 
587 	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
588 	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
589 		amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
590 		mutex_unlock(&adev->srbm_mutex);
591 	}
592 
593 	amdgpu_bo_unreserve(q->mqd_obj);
594 }
595 
596 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
597 			    struct amdgpu_mes_queue_properties *qprops,
598 			    int *queue_id)
599 {
600 	struct amdgpu_mes_queue *queue;
601 	struct amdgpu_mes_gang *gang;
602 	struct mes_add_queue_input queue_input;
603 	unsigned long flags;
604 	int r;
605 
606 	memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
607 
608 	/* allocate the mes queue buffer */
609 	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
610 	if (!queue) {
611 		DRM_ERROR("Failed to allocate memory for queue\n");
612 		return -ENOMEM;
613 	}
614 
615 	/* Allocate the queue mqd */
616 	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
617 	if (r)
618 		goto clean_up_memory;
619 
620 	/*
621 	 * Avoid taking any other locks under MES lock to avoid circular
622 	 * lock dependencies.
623 	 */
624 	amdgpu_mes_lock(&adev->mes);
625 
626 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
627 	if (!gang) {
628 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
629 		r = -EINVAL;
630 		goto clean_up_mqd;
631 	}
632 
633 	/* add the mes gang to idr list */
634 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
635 	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
636 		      GFP_ATOMIC);
637 	if (r < 0) {
638 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
639 		goto clean_up_mqd;
640 	}
641 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
642 	*queue_id = queue->queue_id = r;
643 
644 	/* allocate a doorbell index for the queue */
645 	r = amdgpu_mes_kernel_doorbell_get(adev,
646 					  qprops->queue_type,
647 					  &qprops->doorbell_off);
648 	if (r)
649 		goto clean_up_queue_id;
650 
651 	/* initialize the queue mqd */
652 	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
653 
654 	/* add hw queue to mes */
655 	queue_input.process_id = gang->process->pasid;
656 
657 	queue_input.page_table_base_addr =
658 		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
659 		adev->gmc.vram_start;
660 
661 	queue_input.process_va_start = 0;
662 	queue_input.process_va_end =
663 		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
664 	queue_input.process_quantum = gang->process->process_quantum;
665 	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
666 	queue_input.gang_quantum = gang->gang_quantum;
667 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
668 	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
669 	queue_input.gang_global_priority_level = gang->global_priority_level;
670 	queue_input.doorbell_offset = qprops->doorbell_off;
671 	queue_input.mqd_addr = queue->mqd_gpu_addr;
672 	queue_input.wptr_addr = qprops->wptr_gpu_addr;
673 	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
674 	queue_input.queue_type = qprops->queue_type;
675 	queue_input.paging = qprops->paging;
676 	queue_input.is_kfd_process = 0;
677 
678 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
679 	if (r) {
680 		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
681 			  qprops->doorbell_off);
682 		goto clean_up_doorbell;
683 	}
684 
685 	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
686 		  "queue type=%d, doorbell=0x%llx\n",
687 		  gang->process->pasid, gang_id, qprops->queue_type,
688 		  qprops->doorbell_off);
689 
690 	queue->ring = qprops->ring;
691 	queue->doorbell_off = qprops->doorbell_off;
692 	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
693 	queue->queue_type = qprops->queue_type;
694 	queue->paging = qprops->paging;
695 	queue->gang = gang;
696 	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
697 	list_add_tail(&queue->list, &gang->queue_list);
698 
699 	amdgpu_mes_unlock(&adev->mes);
700 	return 0;
701 
702 clean_up_doorbell:
703 	amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
704 clean_up_queue_id:
705 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
706 	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
707 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
708 clean_up_mqd:
709 	amdgpu_mes_unlock(&adev->mes);
710 	amdgpu_mes_queue_free_mqd(queue);
711 clean_up_memory:
712 	kfree(queue);
713 	return r;
714 }
715 
716 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
717 {
718 	unsigned long flags;
719 	struct amdgpu_mes_queue *queue;
720 	struct amdgpu_mes_gang *gang;
721 	struct mes_remove_queue_input queue_input;
722 	int r;
723 
724 	/*
725 	 * Avoid taking any other locks under MES lock to avoid circular
726 	 * lock dependencies.
727 	 */
728 	amdgpu_mes_lock(&adev->mes);
729 
730 	/* remove the mes gang from idr list */
731 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
732 
733 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
734 	if (!queue) {
735 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
736 		amdgpu_mes_unlock(&adev->mes);
737 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
738 		return -EINVAL;
739 	}
740 
741 	idr_remove(&adev->mes.queue_id_idr, queue_id);
742 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
743 
744 	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
745 		  queue->doorbell_off);
746 
747 	gang = queue->gang;
748 	queue_input.doorbell_offset = queue->doorbell_off;
749 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
750 
751 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
752 	if (r)
753 		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
754 			  queue_id);
755 
756 	list_del(&queue->list);
757 	amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
758 	amdgpu_mes_unlock(&adev->mes);
759 
760 	amdgpu_mes_queue_free_mqd(queue);
761 	kfree(queue);
762 	return 0;
763 }
764 
765 int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
766 {
767 	unsigned long flags;
768 	struct amdgpu_mes_queue *queue;
769 	struct amdgpu_mes_gang *gang;
770 	struct mes_reset_queue_input queue_input;
771 	int r;
772 
773 	/*
774 	 * Avoid taking any other locks under MES lock to avoid circular
775 	 * lock dependencies.
776 	 */
777 	amdgpu_mes_lock(&adev->mes);
778 
779 	/* remove the mes gang from idr list */
780 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
781 
782 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
783 	if (!queue) {
784 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
785 		amdgpu_mes_unlock(&adev->mes);
786 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
787 		return -EINVAL;
788 	}
789 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
790 
791 	DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
792 		  queue->doorbell_off);
793 
794 	gang = queue->gang;
795 	queue_input.doorbell_offset = queue->doorbell_off;
796 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
797 
798 	r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
799 	if (r)
800 		DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
801 			  queue_id);
802 
803 	amdgpu_mes_unlock(&adev->mes);
804 
805 	return 0;
806 }
807 
808 int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
809 				   int me_id, int pipe_id, int queue_id, int vmid)
810 {
811 	struct mes_reset_queue_input queue_input;
812 	int r;
813 
814 	queue_input.queue_type = queue_type;
815 	queue_input.use_mmio = true;
816 	queue_input.me_id = me_id;
817 	queue_input.pipe_id = pipe_id;
818 	queue_input.queue_id = queue_id;
819 	queue_input.vmid = vmid;
820 	r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
821 	if (r)
822 		DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
823 			  queue_id);
824 	return r;
825 }
826 
827 int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
828 				struct amdgpu_ring *ring)
829 {
830 	struct mes_map_legacy_queue_input queue_input;
831 	int r;
832 
833 	memset(&queue_input, 0, sizeof(queue_input));
834 
835 	queue_input.queue_type = ring->funcs->type;
836 	queue_input.doorbell_offset = ring->doorbell_index;
837 	queue_input.pipe_id = ring->pipe;
838 	queue_input.queue_id = ring->queue;
839 	queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
840 	queue_input.wptr_addr = ring->wptr_gpu_addr;
841 
842 	r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
843 	if (r)
844 		DRM_ERROR("failed to map legacy queue\n");
845 
846 	return r;
847 }
848 
849 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
850 				  struct amdgpu_ring *ring,
851 				  enum amdgpu_unmap_queues_action action,
852 				  u64 gpu_addr, u64 seq)
853 {
854 	struct mes_unmap_legacy_queue_input queue_input;
855 	int r;
856 
857 	queue_input.action = action;
858 	queue_input.queue_type = ring->funcs->type;
859 	queue_input.doorbell_offset = ring->doorbell_index;
860 	queue_input.pipe_id = ring->pipe;
861 	queue_input.queue_id = ring->queue;
862 	queue_input.trail_fence_addr = gpu_addr;
863 	queue_input.trail_fence_data = seq;
864 
865 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
866 	if (r)
867 		DRM_ERROR("failed to unmap legacy queue\n");
868 
869 	return r;
870 }
871 
872 int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
873 				  struct amdgpu_ring *ring,
874 				  unsigned int vmid,
875 				  bool use_mmio)
876 {
877 	struct mes_reset_legacy_queue_input queue_input;
878 	int r;
879 
880 	memset(&queue_input, 0, sizeof(queue_input));
881 
882 	queue_input.queue_type = ring->funcs->type;
883 	queue_input.doorbell_offset = ring->doorbell_index;
884 	queue_input.me_id = ring->me;
885 	queue_input.pipe_id = ring->pipe;
886 	queue_input.queue_id = ring->queue;
887 	queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0;
888 	queue_input.wptr_addr = ring->wptr_gpu_addr;
889 	queue_input.vmid = vmid;
890 	queue_input.use_mmio = use_mmio;
891 
892 	r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
893 	if (r)
894 		DRM_ERROR("failed to reset legacy queue\n");
895 
896 	return r;
897 }
898 
899 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
900 {
901 	struct mes_misc_op_input op_input;
902 	int r, val = 0;
903 	uint32_t addr_offset = 0;
904 	uint64_t read_val_gpu_addr;
905 	uint32_t *read_val_ptr;
906 
907 	if (amdgpu_device_wb_get(adev, &addr_offset)) {
908 		DRM_ERROR("critical bug! too many mes readers\n");
909 		goto error;
910 	}
911 	read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
912 	read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset];
913 	op_input.op = MES_MISC_OP_READ_REG;
914 	op_input.read_reg.reg_offset = reg;
915 	op_input.read_reg.buffer_addr = read_val_gpu_addr;
916 
917 	if (!adev->mes.funcs->misc_op) {
918 		DRM_ERROR("mes rreg is not supported!\n");
919 		goto error;
920 	}
921 
922 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
923 	if (r)
924 		DRM_ERROR("failed to read reg (0x%x)\n", reg);
925 	else
926 		val = *(read_val_ptr);
927 
928 error:
929 	if (addr_offset)
930 		amdgpu_device_wb_free(adev, addr_offset);
931 	return val;
932 }
933 
934 int amdgpu_mes_wreg(struct amdgpu_device *adev,
935 		    uint32_t reg, uint32_t val)
936 {
937 	struct mes_misc_op_input op_input;
938 	int r;
939 
940 	op_input.op = MES_MISC_OP_WRITE_REG;
941 	op_input.write_reg.reg_offset = reg;
942 	op_input.write_reg.reg_value = val;
943 
944 	if (!adev->mes.funcs->misc_op) {
945 		DRM_ERROR("mes wreg is not supported!\n");
946 		r = -EINVAL;
947 		goto error;
948 	}
949 
950 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
951 	if (r)
952 		DRM_ERROR("failed to write reg (0x%x)\n", reg);
953 
954 error:
955 	return r;
956 }
957 
958 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
959 				  uint32_t reg0, uint32_t reg1,
960 				  uint32_t ref, uint32_t mask)
961 {
962 	struct mes_misc_op_input op_input;
963 	int r;
964 
965 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
966 	op_input.wrm_reg.reg0 = reg0;
967 	op_input.wrm_reg.reg1 = reg1;
968 	op_input.wrm_reg.ref = ref;
969 	op_input.wrm_reg.mask = mask;
970 
971 	if (!adev->mes.funcs->misc_op) {
972 		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
973 		r = -EINVAL;
974 		goto error;
975 	}
976 
977 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
978 	if (r)
979 		DRM_ERROR("failed to reg_write_reg_wait\n");
980 
981 error:
982 	return r;
983 }
984 
985 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
986 			uint32_t val, uint32_t mask)
987 {
988 	struct mes_misc_op_input op_input;
989 	int r;
990 
991 	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
992 	op_input.wrm_reg.reg0 = reg;
993 	op_input.wrm_reg.ref = val;
994 	op_input.wrm_reg.mask = mask;
995 
996 	if (!adev->mes.funcs->misc_op) {
997 		DRM_ERROR("mes reg wait is not supported!\n");
998 		r = -EINVAL;
999 		goto error;
1000 	}
1001 
1002 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1003 	if (r)
1004 		DRM_ERROR("failed to reg_write_reg_wait\n");
1005 
1006 error:
1007 	return r;
1008 }
1009 
1010 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
1011 				uint64_t process_context_addr,
1012 				uint32_t spi_gdbg_per_vmid_cntl,
1013 				const uint32_t *tcp_watch_cntl,
1014 				uint32_t flags,
1015 				bool trap_en)
1016 {
1017 	struct mes_misc_op_input op_input = {0};
1018 	int r;
1019 
1020 	if (!adev->mes.funcs->misc_op) {
1021 		DRM_ERROR("mes set shader debugger is not supported!\n");
1022 		return -EINVAL;
1023 	}
1024 
1025 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1026 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
1027 	op_input.set_shader_debugger.flags.u32all = flags;
1028 
1029 	/* use amdgpu mes_flush_shader_debugger instead */
1030 	if (op_input.set_shader_debugger.flags.process_ctx_flush)
1031 		return -EINVAL;
1032 
1033 	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
1034 	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
1035 			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
1036 
1037 	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
1038 			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
1039 		op_input.set_shader_debugger.trap_en = trap_en;
1040 
1041 	amdgpu_mes_lock(&adev->mes);
1042 
1043 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1044 	if (r)
1045 		DRM_ERROR("failed to set_shader_debugger\n");
1046 
1047 	amdgpu_mes_unlock(&adev->mes);
1048 
1049 	return r;
1050 }
1051 
1052 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
1053 				     uint64_t process_context_addr)
1054 {
1055 	struct mes_misc_op_input op_input = {0};
1056 	int r;
1057 
1058 	if (!adev->mes.funcs->misc_op) {
1059 		DRM_ERROR("mes flush shader debugger is not supported!\n");
1060 		return -EINVAL;
1061 	}
1062 
1063 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1064 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
1065 	op_input.set_shader_debugger.flags.process_ctx_flush = true;
1066 
1067 	amdgpu_mes_lock(&adev->mes);
1068 
1069 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1070 	if (r)
1071 		DRM_ERROR("failed to set_shader_debugger\n");
1072 
1073 	amdgpu_mes_unlock(&adev->mes);
1074 
1075 	return r;
1076 }
1077 
1078 static void
1079 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
1080 			       struct amdgpu_ring *ring,
1081 			       struct amdgpu_mes_queue_properties *props)
1082 {
1083 	props->queue_type = ring->funcs->type;
1084 	props->hqd_base_gpu_addr = ring->gpu_addr;
1085 	props->rptr_gpu_addr = ring->rptr_gpu_addr;
1086 	props->wptr_gpu_addr = ring->wptr_gpu_addr;
1087 	props->wptr_mc_addr =
1088 		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
1089 	props->queue_size = ring->ring_size;
1090 	props->eop_gpu_addr = ring->eop_gpu_addr;
1091 	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
1092 	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
1093 	props->paging = false;
1094 	props->ring = ring;
1095 }
1096 
1097 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
1098 do {									\
1099        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
1100 		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
1101 				_eng[ring->idx].slots[id_offs]);        \
1102        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
1103 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1104 				_eng[ring->idx].ring);                  \
1105        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
1106 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1107 				_eng[ring->idx].ib);                    \
1108        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
1109 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1110 				_eng[ring->idx].padding);               \
1111 } while(0)
1112 
1113 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1114 {
1115 	switch (ring->funcs->type) {
1116 	case AMDGPU_RING_TYPE_GFX:
1117 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1118 		break;
1119 	case AMDGPU_RING_TYPE_COMPUTE:
1120 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1121 		break;
1122 	case AMDGPU_RING_TYPE_SDMA:
1123 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1124 		break;
1125 	default:
1126 		break;
1127 	}
1128 
1129 	WARN_ON(1);
1130 	return -EINVAL;
1131 }
1132 
1133 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1134 			int queue_type, int idx,
1135 			struct amdgpu_mes_ctx_data *ctx_data,
1136 			struct amdgpu_ring **out)
1137 {
1138 	struct amdgpu_ring *ring;
1139 	struct amdgpu_mes_gang *gang;
1140 	struct amdgpu_mes_queue_properties qprops = {0};
1141 	int r, queue_id, pasid;
1142 
1143 	/*
1144 	 * Avoid taking any other locks under MES lock to avoid circular
1145 	 * lock dependencies.
1146 	 */
1147 	amdgpu_mes_lock(&adev->mes);
1148 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1149 	if (!gang) {
1150 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1151 		amdgpu_mes_unlock(&adev->mes);
1152 		return -EINVAL;
1153 	}
1154 	pasid = gang->process->pasid;
1155 
1156 	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1157 	if (!ring) {
1158 		amdgpu_mes_unlock(&adev->mes);
1159 		return -ENOMEM;
1160 	}
1161 
1162 	ring->ring_obj = NULL;
1163 	ring->use_doorbell = true;
1164 	ring->is_mes_queue = true;
1165 	ring->mes_ctx = ctx_data;
1166 	ring->idx = idx;
1167 	ring->no_scheduler = true;
1168 
1169 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1170 		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1171 				      compute[ring->idx].mec_hpd);
1172 		ring->eop_gpu_addr =
1173 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1174 	}
1175 
1176 	switch (queue_type) {
1177 	case AMDGPU_RING_TYPE_GFX:
1178 		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1179 		ring->me = adev->gfx.gfx_ring[0].me;
1180 		ring->pipe = adev->gfx.gfx_ring[0].pipe;
1181 		break;
1182 	case AMDGPU_RING_TYPE_COMPUTE:
1183 		ring->funcs = adev->gfx.compute_ring[0].funcs;
1184 		ring->me = adev->gfx.compute_ring[0].me;
1185 		ring->pipe = adev->gfx.compute_ring[0].pipe;
1186 		break;
1187 	case AMDGPU_RING_TYPE_SDMA:
1188 		ring->funcs = adev->sdma.instance[0].ring.funcs;
1189 		break;
1190 	default:
1191 		BUG();
1192 	}
1193 
1194 	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1195 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1196 	if (r) {
1197 		amdgpu_mes_unlock(&adev->mes);
1198 		goto clean_up_memory;
1199 	}
1200 
1201 	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1202 
1203 	dma_fence_wait(gang->process->vm->last_update, false);
1204 	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1205 	amdgpu_mes_unlock(&adev->mes);
1206 
1207 	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1208 	if (r)
1209 		goto clean_up_ring;
1210 
1211 	ring->hw_queue_id = queue_id;
1212 	ring->doorbell_index = qprops.doorbell_off;
1213 
1214 	if (queue_type == AMDGPU_RING_TYPE_GFX)
1215 		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1216 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1217 		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1218 			queue_id);
1219 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1220 		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1221 			queue_id);
1222 	else
1223 		BUG();
1224 
1225 	*out = ring;
1226 	return 0;
1227 
1228 clean_up_ring:
1229 	amdgpu_ring_fini(ring);
1230 clean_up_memory:
1231 	kfree(ring);
1232 	return r;
1233 }
1234 
1235 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1236 			    struct amdgpu_ring *ring)
1237 {
1238 	if (!ring)
1239 		return;
1240 
1241 	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1242 	del_timer_sync(&ring->fence_drv.fallback_timer);
1243 	amdgpu_ring_fini(ring);
1244 	kfree(ring);
1245 }
1246 
1247 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1248 						   enum amdgpu_mes_priority_level prio)
1249 {
1250 	return adev->mes.aggregated_doorbells[prio];
1251 }
1252 
1253 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1254 				   struct amdgpu_mes_ctx_data *ctx_data)
1255 {
1256 	int r;
1257 
1258 	r = amdgpu_bo_create_kernel(adev,
1259 			    sizeof(struct amdgpu_mes_ctx_meta_data),
1260 			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1261 			    &ctx_data->meta_data_obj,
1262 			    &ctx_data->meta_data_mc_addr,
1263 			    &ctx_data->meta_data_ptr);
1264 	if (r) {
1265 		dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1266 		return r;
1267 	}
1268 
1269 	if (!ctx_data->meta_data_obj)
1270 		return -ENOMEM;
1271 
1272 	memset(ctx_data->meta_data_ptr, 0,
1273 	       sizeof(struct amdgpu_mes_ctx_meta_data));
1274 
1275 	return 0;
1276 }
1277 
1278 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1279 {
1280 	if (ctx_data->meta_data_obj)
1281 		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1282 				      &ctx_data->meta_data_mc_addr,
1283 				      &ctx_data->meta_data_ptr);
1284 }
1285 
1286 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1287 				 struct amdgpu_vm *vm,
1288 				 struct amdgpu_mes_ctx_data *ctx_data)
1289 {
1290 	struct amdgpu_bo_va *bo_va;
1291 	struct amdgpu_sync sync;
1292 	struct drm_exec exec;
1293 	int r;
1294 
1295 	amdgpu_sync_create(&sync);
1296 
1297 	drm_exec_init(&exec, 0, 0);
1298 	drm_exec_until_all_locked(&exec) {
1299 		r = drm_exec_lock_obj(&exec,
1300 				      &ctx_data->meta_data_obj->tbo.base);
1301 		drm_exec_retry_on_contention(&exec);
1302 		if (unlikely(r))
1303 			goto error_fini_exec;
1304 
1305 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1306 		drm_exec_retry_on_contention(&exec);
1307 		if (unlikely(r))
1308 			goto error_fini_exec;
1309 	}
1310 
1311 	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1312 	if (!bo_va) {
1313 		DRM_ERROR("failed to create bo_va for meta data BO\n");
1314 		r = -ENOMEM;
1315 		goto error_fini_exec;
1316 	}
1317 
1318 	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1319 			     sizeof(struct amdgpu_mes_ctx_meta_data),
1320 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1321 			     AMDGPU_PTE_EXECUTABLE);
1322 
1323 	if (r) {
1324 		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1325 		goto error_del_bo_va;
1326 	}
1327 
1328 	r = amdgpu_vm_bo_update(adev, bo_va, false);
1329 	if (r) {
1330 		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1331 		goto error_del_bo_va;
1332 	}
1333 	amdgpu_sync_fence(&sync, bo_va->last_pt_update, GFP_KERNEL);
1334 
1335 	r = amdgpu_vm_update_pdes(adev, vm, false);
1336 	if (r) {
1337 		DRM_ERROR("failed to update pdes on meta data\n");
1338 		goto error_del_bo_va;
1339 	}
1340 	amdgpu_sync_fence(&sync, vm->last_update, GFP_KERNEL);
1341 
1342 	amdgpu_sync_wait(&sync, false);
1343 	drm_exec_fini(&exec);
1344 
1345 	amdgpu_sync_free(&sync);
1346 	ctx_data->meta_data_va = bo_va;
1347 	return 0;
1348 
1349 error_del_bo_va:
1350 	amdgpu_vm_bo_del(adev, bo_va);
1351 
1352 error_fini_exec:
1353 	drm_exec_fini(&exec);
1354 	amdgpu_sync_free(&sync);
1355 	return r;
1356 }
1357 
1358 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1359 				   struct amdgpu_mes_ctx_data *ctx_data)
1360 {
1361 	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1362 	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1363 	struct amdgpu_vm *vm = bo_va->base.vm;
1364 	struct dma_fence *fence;
1365 	struct drm_exec exec;
1366 	long r;
1367 
1368 	drm_exec_init(&exec, 0, 0);
1369 	drm_exec_until_all_locked(&exec) {
1370 		r = drm_exec_lock_obj(&exec,
1371 				      &ctx_data->meta_data_obj->tbo.base);
1372 		drm_exec_retry_on_contention(&exec);
1373 		if (unlikely(r))
1374 			goto out_unlock;
1375 
1376 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1377 		drm_exec_retry_on_contention(&exec);
1378 		if (unlikely(r))
1379 			goto out_unlock;
1380 	}
1381 
1382 	amdgpu_vm_bo_del(adev, bo_va);
1383 	if (!amdgpu_vm_ready(vm))
1384 		goto out_unlock;
1385 
1386 	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1387 				   &fence);
1388 	if (r)
1389 		goto out_unlock;
1390 	if (fence) {
1391 		amdgpu_bo_fence(bo, fence, true);
1392 		fence = NULL;
1393 	}
1394 
1395 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1396 	if (r || !fence)
1397 		goto out_unlock;
1398 
1399 	dma_fence_wait(fence, false);
1400 	amdgpu_bo_fence(bo, fence, true);
1401 	dma_fence_put(fence);
1402 
1403 out_unlock:
1404 	if (unlikely(r < 0))
1405 		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1406 	drm_exec_fini(&exec);
1407 
1408 	return r;
1409 }
1410 
1411 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1412 					  int pasid, int *gang_id,
1413 					  int queue_type, int num_queue,
1414 					  struct amdgpu_ring **added_rings,
1415 					  struct amdgpu_mes_ctx_data *ctx_data)
1416 {
1417 	struct amdgpu_ring *ring;
1418 	struct amdgpu_mes_gang_properties gprops = {0};
1419 	int r, j;
1420 
1421 	/* create a gang for the process */
1422 	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1423 	gprops.gang_quantum = adev->mes.default_gang_quantum;
1424 	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1425 	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1426 	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1427 
1428 	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1429 	if (r) {
1430 		DRM_ERROR("failed to add gang\n");
1431 		return r;
1432 	}
1433 
1434 	/* create queues for the gang */
1435 	for (j = 0; j < num_queue; j++) {
1436 		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1437 					ctx_data, &ring);
1438 		if (r) {
1439 			DRM_ERROR("failed to add ring\n");
1440 			break;
1441 		}
1442 
1443 		DRM_INFO("ring %s was added\n", ring->name);
1444 		added_rings[j] = ring;
1445 	}
1446 
1447 	return 0;
1448 }
1449 
1450 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1451 {
1452 	struct amdgpu_ring *ring;
1453 	int i, r;
1454 
1455 	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1456 		ring = added_rings[i];
1457 		if (!ring)
1458 			continue;
1459 
1460 		r = amdgpu_ring_test_helper(ring);
1461 		if (r)
1462 			return r;
1463 
1464 		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1465 		if (r) {
1466 			DRM_DEV_ERROR(ring->adev->dev,
1467 				      "ring %s ib test failed (%d)\n",
1468 				      ring->name, r);
1469 			return r;
1470 		} else
1471 			DRM_INFO("ring %s ib test pass\n", ring->name);
1472 	}
1473 
1474 	return 0;
1475 }
1476 
1477 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1478 {
1479 	struct amdgpu_vm *vm = NULL;
1480 	struct amdgpu_mes_ctx_data ctx_data = {0};
1481 	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1482 	int gang_ids[3] = {0};
1483 	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1484 				 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1485 				 { AMDGPU_RING_TYPE_SDMA, 1} };
1486 	int i, r, pasid, k = 0;
1487 
1488 	pasid = amdgpu_pasid_alloc(16);
1489 	if (pasid < 0) {
1490 		dev_warn(adev->dev, "No more PASIDs available!");
1491 		pasid = 0;
1492 	}
1493 
1494 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1495 	if (!vm) {
1496 		r = -ENOMEM;
1497 		goto error_pasid;
1498 	}
1499 
1500 	r = amdgpu_vm_init(adev, vm, -1);
1501 	if (r) {
1502 		DRM_ERROR("failed to initialize vm\n");
1503 		goto error_pasid;
1504 	}
1505 
1506 	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1507 	if (r) {
1508 		DRM_ERROR("failed to alloc ctx meta data\n");
1509 		goto error_fini;
1510 	}
1511 
1512 	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
1513 	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1514 	if (r) {
1515 		DRM_ERROR("failed to map ctx meta data\n");
1516 		goto error_vm;
1517 	}
1518 
1519 	r = amdgpu_mes_create_process(adev, pasid, vm);
1520 	if (r) {
1521 		DRM_ERROR("failed to create MES process\n");
1522 		goto error_vm;
1523 	}
1524 
1525 	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1526 		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1527 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1528 			    IP_VERSION(10, 3, 0) &&
1529 		    amdgpu_ip_version(adev, GC_HWIP, 0) <
1530 			    IP_VERSION(11, 0, 0) &&
1531 		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1532 			continue;
1533 
1534 		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1535 							   &gang_ids[i],
1536 							   queue_types[i][0],
1537 							   queue_types[i][1],
1538 							   &added_rings[k],
1539 							   &ctx_data);
1540 		if (r)
1541 			goto error_queues;
1542 
1543 		k += queue_types[i][1];
1544 	}
1545 
1546 	/* start ring test and ib test for MES queues */
1547 	amdgpu_mes_test_queues(added_rings);
1548 
1549 error_queues:
1550 	/* remove all queues */
1551 	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1552 		if (!added_rings[i])
1553 			continue;
1554 		amdgpu_mes_remove_ring(adev, added_rings[i]);
1555 	}
1556 
1557 	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1558 		if (!gang_ids[i])
1559 			continue;
1560 		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1561 	}
1562 
1563 	amdgpu_mes_destroy_process(adev, pasid);
1564 
1565 error_vm:
1566 	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1567 
1568 error_fini:
1569 	amdgpu_vm_fini(adev, vm);
1570 
1571 error_pasid:
1572 	if (pasid)
1573 		amdgpu_pasid_free(pasid);
1574 
1575 	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1576 	kfree(vm);
1577 	return 0;
1578 }
1579 
1580 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1581 {
1582 	const struct mes_firmware_header_v1_0 *mes_hdr;
1583 	struct amdgpu_firmware_info *info;
1584 	char ucode_prefix[30];
1585 	char fw_name[50];
1586 	bool need_retry = false;
1587 	u32 *ucode_ptr;
1588 	int r;
1589 
1590 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1591 				       sizeof(ucode_prefix));
1592 	if (adev->enable_uni_mes) {
1593 		snprintf(fw_name, sizeof(fw_name),
1594 			 "amdgpu/%s_uni_mes.bin", ucode_prefix);
1595 	} else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1596 	    amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
1597 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1598 			 ucode_prefix,
1599 			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1600 		need_retry = true;
1601 	} else {
1602 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1603 			 ucode_prefix,
1604 			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1605 	}
1606 
1607 	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], AMDGPU_UCODE_REQUIRED,
1608 				 "%s", fw_name);
1609 	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1610 		dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
1611 		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1612 					 AMDGPU_UCODE_REQUIRED,
1613 					 "amdgpu/%s_mes.bin", ucode_prefix);
1614 	}
1615 
1616 	if (r)
1617 		goto out;
1618 
1619 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
1620 		adev->mes.fw[pipe]->data;
1621 	adev->mes.uc_start_addr[pipe] =
1622 		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1623 		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1624 	adev->mes.data_start_addr[pipe] =
1625 		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1626 		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1627 	ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data +
1628 			  sizeof(union amdgpu_firmware_header));
1629 	adev->mes.fw_version[pipe] =
1630 		le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK;
1631 
1632 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1633 		int ucode, ucode_data;
1634 
1635 		if (pipe == AMDGPU_MES_SCHED_PIPE) {
1636 			ucode = AMDGPU_UCODE_ID_CP_MES;
1637 			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1638 		} else {
1639 			ucode = AMDGPU_UCODE_ID_CP_MES1;
1640 			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1641 		}
1642 
1643 		info = &adev->firmware.ucode[ucode];
1644 		info->ucode_id = ucode;
1645 		info->fw = adev->mes.fw[pipe];
1646 		adev->firmware.fw_size +=
1647 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1648 			      PAGE_SIZE);
1649 
1650 		info = &adev->firmware.ucode[ucode_data];
1651 		info->ucode_id = ucode_data;
1652 		info->fw = adev->mes.fw[pipe];
1653 		adev->firmware.fw_size +=
1654 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1655 			      PAGE_SIZE);
1656 	}
1657 
1658 	return 0;
1659 out:
1660 	amdgpu_ucode_release(&adev->mes.fw[pipe]);
1661 	return r;
1662 }
1663 
1664 bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
1665 {
1666 	uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
1667 	bool is_supported = false;
1668 
1669 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1670 	    amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
1671 	    mes_rev >= 0x63)
1672 		is_supported = true;
1673 
1674 	return is_supported;
1675 }
1676 
1677 /* Fix me -- node_id is used to identify the correct MES instances in the future */
1678 static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
1679 					    uint32_t node_id, bool enable)
1680 {
1681 	struct mes_misc_op_input op_input = {0};
1682 	int r;
1683 
1684 	op_input.op = MES_MISC_OP_CHANGE_CONFIG;
1685 	op_input.change_config.option.limit_single_process = enable ? 1 : 0;
1686 
1687 	if (!adev->mes.funcs->misc_op) {
1688 		dev_err(adev->dev, "mes change config is not supported!\n");
1689 		r = -EINVAL;
1690 		goto error;
1691 	}
1692 
1693 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1694 	if (r)
1695 		dev_err(adev->dev, "failed to change_config.\n");
1696 
1697 error:
1698 	return r;
1699 }
1700 
1701 int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
1702 {
1703 	int i, r = 0;
1704 
1705 	if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
1706 		mutex_lock(&adev->enforce_isolation_mutex);
1707 		for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
1708 			if (adev->enforce_isolation[i])
1709 				r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
1710 			else
1711 				r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
1712 		}
1713 		mutex_unlock(&adev->enforce_isolation_mutex);
1714 	}
1715 	return r;
1716 }
1717 
1718 #if defined(CONFIG_DEBUG_FS)
1719 
1720 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1721 {
1722 	struct amdgpu_device *adev = m->private;
1723 	uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1724 
1725 	seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1726 		     mem, adev->mes.event_log_size, false);
1727 
1728 	return 0;
1729 }
1730 
1731 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1732 
1733 #endif
1734 
1735 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1736 {
1737 
1738 #if defined(CONFIG_DEBUG_FS)
1739 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1740 	struct dentry *root = minor->debugfs_root;
1741 	if (adev->enable_mes && amdgpu_mes_log_enable)
1742 		debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1743 				    adev, &amdgpu_debugfs_mes_event_log_fops);
1744 
1745 #endif
1746 }
1747