xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c (revision c435bce6af9b2a277662698875a689c389358f17)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26 
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31 
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34 
35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 		       PAGE_SIZE);
40 }
41 
42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 					 int ip_type, uint64_t *doorbell_index)
44 {
45 	unsigned int offset, found;
46 	struct amdgpu_mes *mes = &adev->mes;
47 
48 	if (ip_type == AMDGPU_RING_TYPE_SDMA)
49 		offset = adev->doorbell_index.sdma_engine[0];
50 	else
51 		offset = 0;
52 
53 	found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
54 	if (found >= mes->num_mes_dbs) {
55 		DRM_WARN("No doorbell available\n");
56 		return -ENOSPC;
57 	}
58 
59 	set_bit(found, mes->doorbell_bitmap);
60 
61 	/* Get the absolute doorbell index on BAR */
62 	*doorbell_index = mes->db_start_dw_offset + found * 2;
63 	return 0;
64 }
65 
66 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
67 					   uint32_t doorbell_index)
68 {
69 	unsigned int old, rel_index;
70 	struct amdgpu_mes *mes = &adev->mes;
71 
72 	/* Find the relative index of the doorbell in this object */
73 	rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
74 	old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
75 	WARN_ON(!old);
76 }
77 
78 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
79 {
80 	int i;
81 	struct amdgpu_mes *mes = &adev->mes;
82 
83 	/* Bitmap for dynamic allocation of kernel doorbells */
84 	mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
85 	if (!mes->doorbell_bitmap) {
86 		DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
87 		return -ENOMEM;
88 	}
89 
90 	mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
91 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
92 		adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
93 		set_bit(i, mes->doorbell_bitmap);
94 	}
95 
96 	return 0;
97 }
98 
99 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
100 {
101 	int r;
102 
103 	if (!amdgpu_mes_log_enable)
104 		return 0;
105 
106 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
107 				    AMDGPU_GEM_DOMAIN_GTT,
108 				    &adev->mes.event_log_gpu_obj,
109 				    &adev->mes.event_log_gpu_addr,
110 				    &adev->mes.event_log_cpu_addr);
111 	if (r) {
112 		dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
113 		return r;
114 	}
115 
116 	memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
117 
118 	return  0;
119 
120 }
121 
122 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
123 {
124 	bitmap_free(adev->mes.doorbell_bitmap);
125 }
126 
127 int amdgpu_mes_init(struct amdgpu_device *adev)
128 {
129 	int i, r;
130 
131 	adev->mes.adev = adev;
132 
133 	idr_init(&adev->mes.pasid_idr);
134 	idr_init(&adev->mes.gang_id_idr);
135 	idr_init(&adev->mes.queue_id_idr);
136 	ida_init(&adev->mes.doorbell_ida);
137 	spin_lock_init(&adev->mes.queue_id_lock);
138 	spin_lock_init(&adev->mes.ring_lock);
139 	mutex_init(&adev->mes.mutex_hidden);
140 
141 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
142 	adev->mes.vmid_mask_mmhub = 0xffffff00;
143 	adev->mes.vmid_mask_gfxhub = 0xffffff00;
144 
145 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
146 		/* use only 1st MEC pipes */
147 		if (i >= 4)
148 			continue;
149 		adev->mes.compute_hqd_mask[i] = 0xc;
150 	}
151 
152 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
153 		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
154 
155 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
156 		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
157 		    IP_VERSION(6, 0, 0))
158 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
159 		/* zero sdma_hqd_mask for non-existent engine */
160 		else if (adev->sdma.num_instances == 1)
161 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
162 		else
163 			adev->mes.sdma_hqd_mask[i] = 0xfc;
164 	}
165 
166 	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
167 	if (r) {
168 		dev_err(adev->dev,
169 			"(%d) ring trail_fence_offs wb alloc failed\n", r);
170 		goto error_ids;
171 	}
172 	adev->mes.sch_ctx_gpu_addr =
173 		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
174 	adev->mes.sch_ctx_ptr =
175 		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
176 
177 	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
178 	if (r) {
179 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
180 		dev_err(adev->dev,
181 			"(%d) query_status_fence_offs wb alloc failed\n", r);
182 		goto error_ids;
183 	}
184 	adev->mes.query_status_fence_gpu_addr =
185 		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
186 	adev->mes.query_status_fence_ptr =
187 		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
188 
189 	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
190 	if (r) {
191 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
192 		amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
193 		dev_err(adev->dev,
194 			"(%d) read_val_offs alloc failed\n", r);
195 		goto error_ids;
196 	}
197 	adev->mes.read_val_gpu_addr =
198 		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
199 	adev->mes.read_val_ptr =
200 		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
201 
202 	r = amdgpu_mes_doorbell_init(adev);
203 	if (r)
204 		goto error;
205 
206 	r = amdgpu_mes_event_log_init(adev);
207 	if (r)
208 		goto error_doorbell;
209 
210 	return 0;
211 
212 error_doorbell:
213 	amdgpu_mes_doorbell_free(adev);
214 error:
215 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
216 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
217 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
218 error_ids:
219 	idr_destroy(&adev->mes.pasid_idr);
220 	idr_destroy(&adev->mes.gang_id_idr);
221 	idr_destroy(&adev->mes.queue_id_idr);
222 	ida_destroy(&adev->mes.doorbell_ida);
223 	mutex_destroy(&adev->mes.mutex_hidden);
224 	return r;
225 }
226 
227 void amdgpu_mes_fini(struct amdgpu_device *adev)
228 {
229 	amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
230 			      &adev->mes.event_log_gpu_addr,
231 			      &adev->mes.event_log_cpu_addr);
232 
233 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
234 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
235 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
236 	amdgpu_mes_doorbell_free(adev);
237 
238 	idr_destroy(&adev->mes.pasid_idr);
239 	idr_destroy(&adev->mes.gang_id_idr);
240 	idr_destroy(&adev->mes.queue_id_idr);
241 	ida_destroy(&adev->mes.doorbell_ida);
242 	mutex_destroy(&adev->mes.mutex_hidden);
243 }
244 
245 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
246 {
247 	amdgpu_bo_free_kernel(&q->mqd_obj,
248 			      &q->mqd_gpu_addr,
249 			      &q->mqd_cpu_ptr);
250 }
251 
252 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
253 			      struct amdgpu_vm *vm)
254 {
255 	struct amdgpu_mes_process *process;
256 	int r;
257 
258 	/* allocate the mes process buffer */
259 	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
260 	if (!process) {
261 		DRM_ERROR("no more memory to create mes process\n");
262 		return -ENOMEM;
263 	}
264 
265 	/* allocate the process context bo and map it */
266 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
267 				    AMDGPU_GEM_DOMAIN_GTT,
268 				    &process->proc_ctx_bo,
269 				    &process->proc_ctx_gpu_addr,
270 				    &process->proc_ctx_cpu_ptr);
271 	if (r) {
272 		DRM_ERROR("failed to allocate process context bo\n");
273 		goto clean_up_memory;
274 	}
275 	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
276 
277 	/*
278 	 * Avoid taking any other locks under MES lock to avoid circular
279 	 * lock dependencies.
280 	 */
281 	amdgpu_mes_lock(&adev->mes);
282 
283 	/* add the mes process to idr list */
284 	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
285 		      GFP_KERNEL);
286 	if (r < 0) {
287 		DRM_ERROR("failed to lock pasid=%d\n", pasid);
288 		goto clean_up_ctx;
289 	}
290 
291 	INIT_LIST_HEAD(&process->gang_list);
292 	process->vm = vm;
293 	process->pasid = pasid;
294 	process->process_quantum = adev->mes.default_process_quantum;
295 	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
296 
297 	amdgpu_mes_unlock(&adev->mes);
298 	return 0;
299 
300 clean_up_ctx:
301 	amdgpu_mes_unlock(&adev->mes);
302 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
303 			      &process->proc_ctx_gpu_addr,
304 			      &process->proc_ctx_cpu_ptr);
305 clean_up_memory:
306 	kfree(process);
307 	return r;
308 }
309 
310 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
311 {
312 	struct amdgpu_mes_process *process;
313 	struct amdgpu_mes_gang *gang, *tmp1;
314 	struct amdgpu_mes_queue *queue, *tmp2;
315 	struct mes_remove_queue_input queue_input;
316 	unsigned long flags;
317 	int r;
318 
319 	/*
320 	 * Avoid taking any other locks under MES lock to avoid circular
321 	 * lock dependencies.
322 	 */
323 	amdgpu_mes_lock(&adev->mes);
324 
325 	process = idr_find(&adev->mes.pasid_idr, pasid);
326 	if (!process) {
327 		DRM_WARN("pasid %d doesn't exist\n", pasid);
328 		amdgpu_mes_unlock(&adev->mes);
329 		return;
330 	}
331 
332 	/* Remove all queues from hardware */
333 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
334 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
335 			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
336 			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
337 			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
338 
339 			queue_input.doorbell_offset = queue->doorbell_off;
340 			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
341 
342 			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
343 							     &queue_input);
344 			if (r)
345 				DRM_WARN("failed to remove hardware queue\n");
346 		}
347 
348 		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
349 	}
350 
351 	idr_remove(&adev->mes.pasid_idr, pasid);
352 	amdgpu_mes_unlock(&adev->mes);
353 
354 	/* free all memory allocated by the process */
355 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
356 		/* free all queues in the gang */
357 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
358 			amdgpu_mes_queue_free_mqd(queue);
359 			list_del(&queue->list);
360 			kfree(queue);
361 		}
362 		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
363 				      &gang->gang_ctx_gpu_addr,
364 				      &gang->gang_ctx_cpu_ptr);
365 		list_del(&gang->list);
366 		kfree(gang);
367 
368 	}
369 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
370 			      &process->proc_ctx_gpu_addr,
371 			      &process->proc_ctx_cpu_ptr);
372 	kfree(process);
373 }
374 
375 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
376 			struct amdgpu_mes_gang_properties *gprops,
377 			int *gang_id)
378 {
379 	struct amdgpu_mes_process *process;
380 	struct amdgpu_mes_gang *gang;
381 	int r;
382 
383 	/* allocate the mes gang buffer */
384 	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
385 	if (!gang) {
386 		return -ENOMEM;
387 	}
388 
389 	/* allocate the gang context bo and map it to cpu space */
390 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
391 				    AMDGPU_GEM_DOMAIN_GTT,
392 				    &gang->gang_ctx_bo,
393 				    &gang->gang_ctx_gpu_addr,
394 				    &gang->gang_ctx_cpu_ptr);
395 	if (r) {
396 		DRM_ERROR("failed to allocate process context bo\n");
397 		goto clean_up_mem;
398 	}
399 	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
400 
401 	/*
402 	 * Avoid taking any other locks under MES lock to avoid circular
403 	 * lock dependencies.
404 	 */
405 	amdgpu_mes_lock(&adev->mes);
406 
407 	process = idr_find(&adev->mes.pasid_idr, pasid);
408 	if (!process) {
409 		DRM_ERROR("pasid %d doesn't exist\n", pasid);
410 		r = -EINVAL;
411 		goto clean_up_ctx;
412 	}
413 
414 	/* add the mes gang to idr list */
415 	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
416 		      GFP_KERNEL);
417 	if (r < 0) {
418 		DRM_ERROR("failed to allocate idr for gang\n");
419 		goto clean_up_ctx;
420 	}
421 
422 	gang->gang_id = r;
423 	*gang_id = r;
424 
425 	INIT_LIST_HEAD(&gang->queue_list);
426 	gang->process = process;
427 	gang->priority = gprops->priority;
428 	gang->gang_quantum = gprops->gang_quantum ?
429 		gprops->gang_quantum : adev->mes.default_gang_quantum;
430 	gang->global_priority_level = gprops->global_priority_level;
431 	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
432 	list_add_tail(&gang->list, &process->gang_list);
433 
434 	amdgpu_mes_unlock(&adev->mes);
435 	return 0;
436 
437 clean_up_ctx:
438 	amdgpu_mes_unlock(&adev->mes);
439 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
440 			      &gang->gang_ctx_gpu_addr,
441 			      &gang->gang_ctx_cpu_ptr);
442 clean_up_mem:
443 	kfree(gang);
444 	return r;
445 }
446 
447 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
448 {
449 	struct amdgpu_mes_gang *gang;
450 
451 	/*
452 	 * Avoid taking any other locks under MES lock to avoid circular
453 	 * lock dependencies.
454 	 */
455 	amdgpu_mes_lock(&adev->mes);
456 
457 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
458 	if (!gang) {
459 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
460 		amdgpu_mes_unlock(&adev->mes);
461 		return -EINVAL;
462 	}
463 
464 	if (!list_empty(&gang->queue_list)) {
465 		DRM_ERROR("queue list is not empty\n");
466 		amdgpu_mes_unlock(&adev->mes);
467 		return -EBUSY;
468 	}
469 
470 	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
471 	list_del(&gang->list);
472 	amdgpu_mes_unlock(&adev->mes);
473 
474 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
475 			      &gang->gang_ctx_gpu_addr,
476 			      &gang->gang_ctx_cpu_ptr);
477 
478 	kfree(gang);
479 
480 	return 0;
481 }
482 
483 int amdgpu_mes_suspend(struct amdgpu_device *adev)
484 {
485 	struct idr *idp;
486 	struct amdgpu_mes_process *process;
487 	struct amdgpu_mes_gang *gang;
488 	struct mes_suspend_gang_input input;
489 	int r, pasid;
490 
491 	/*
492 	 * Avoid taking any other locks under MES lock to avoid circular
493 	 * lock dependencies.
494 	 */
495 	amdgpu_mes_lock(&adev->mes);
496 
497 	idp = &adev->mes.pasid_idr;
498 
499 	idr_for_each_entry(idp, process, pasid) {
500 		list_for_each_entry(gang, &process->gang_list, list) {
501 			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
502 			if (r)
503 				DRM_ERROR("failed to suspend pasid %d gangid %d",
504 					 pasid, gang->gang_id);
505 		}
506 	}
507 
508 	amdgpu_mes_unlock(&adev->mes);
509 	return 0;
510 }
511 
512 int amdgpu_mes_resume(struct amdgpu_device *adev)
513 {
514 	struct idr *idp;
515 	struct amdgpu_mes_process *process;
516 	struct amdgpu_mes_gang *gang;
517 	struct mes_resume_gang_input input;
518 	int r, pasid;
519 
520 	/*
521 	 * Avoid taking any other locks under MES lock to avoid circular
522 	 * lock dependencies.
523 	 */
524 	amdgpu_mes_lock(&adev->mes);
525 
526 	idp = &adev->mes.pasid_idr;
527 
528 	idr_for_each_entry(idp, process, pasid) {
529 		list_for_each_entry(gang, &process->gang_list, list) {
530 			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
531 			if (r)
532 				DRM_ERROR("failed to resume pasid %d gangid %d",
533 					 pasid, gang->gang_id);
534 		}
535 	}
536 
537 	amdgpu_mes_unlock(&adev->mes);
538 	return 0;
539 }
540 
541 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
542 				     struct amdgpu_mes_queue *q,
543 				     struct amdgpu_mes_queue_properties *p)
544 {
545 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
546 	u32 mqd_size = mqd_mgr->mqd_size;
547 	int r;
548 
549 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
550 				    AMDGPU_GEM_DOMAIN_GTT,
551 				    &q->mqd_obj,
552 				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
553 	if (r) {
554 		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
555 		return r;
556 	}
557 	memset(q->mqd_cpu_ptr, 0, mqd_size);
558 
559 	r = amdgpu_bo_reserve(q->mqd_obj, false);
560 	if (unlikely(r != 0))
561 		goto clean_up;
562 
563 	return 0;
564 
565 clean_up:
566 	amdgpu_bo_free_kernel(&q->mqd_obj,
567 			      &q->mqd_gpu_addr,
568 			      &q->mqd_cpu_ptr);
569 	return r;
570 }
571 
572 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
573 				     struct amdgpu_mes_queue *q,
574 				     struct amdgpu_mes_queue_properties *p)
575 {
576 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
577 	struct amdgpu_mqd_prop mqd_prop = {0};
578 
579 	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
580 	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
581 	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
582 	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
583 	mqd_prop.queue_size = p->queue_size;
584 	mqd_prop.use_doorbell = true;
585 	mqd_prop.doorbell_index = p->doorbell_off;
586 	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
587 	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
588 	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
589 	mqd_prop.hqd_active = false;
590 
591 	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
592 	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
593 		mutex_lock(&adev->srbm_mutex);
594 		amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
595 	}
596 
597 	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
598 
599 	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
600 	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
601 		amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
602 		mutex_unlock(&adev->srbm_mutex);
603 	}
604 
605 	amdgpu_bo_unreserve(q->mqd_obj);
606 }
607 
608 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
609 			    struct amdgpu_mes_queue_properties *qprops,
610 			    int *queue_id)
611 {
612 	struct amdgpu_mes_queue *queue;
613 	struct amdgpu_mes_gang *gang;
614 	struct mes_add_queue_input queue_input;
615 	unsigned long flags;
616 	int r;
617 
618 	memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
619 
620 	/* allocate the mes queue buffer */
621 	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
622 	if (!queue) {
623 		DRM_ERROR("Failed to allocate memory for queue\n");
624 		return -ENOMEM;
625 	}
626 
627 	/* Allocate the queue mqd */
628 	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
629 	if (r)
630 		goto clean_up_memory;
631 
632 	/*
633 	 * Avoid taking any other locks under MES lock to avoid circular
634 	 * lock dependencies.
635 	 */
636 	amdgpu_mes_lock(&adev->mes);
637 
638 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
639 	if (!gang) {
640 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
641 		r = -EINVAL;
642 		goto clean_up_mqd;
643 	}
644 
645 	/* add the mes gang to idr list */
646 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
647 	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
648 		      GFP_ATOMIC);
649 	if (r < 0) {
650 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
651 		goto clean_up_mqd;
652 	}
653 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
654 	*queue_id = queue->queue_id = r;
655 
656 	/* allocate a doorbell index for the queue */
657 	r = amdgpu_mes_kernel_doorbell_get(adev,
658 					  qprops->queue_type,
659 					  &qprops->doorbell_off);
660 	if (r)
661 		goto clean_up_queue_id;
662 
663 	/* initialize the queue mqd */
664 	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
665 
666 	/* add hw queue to mes */
667 	queue_input.process_id = gang->process->pasid;
668 
669 	queue_input.page_table_base_addr =
670 		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
671 		adev->gmc.vram_start;
672 
673 	queue_input.process_va_start = 0;
674 	queue_input.process_va_end =
675 		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
676 	queue_input.process_quantum = gang->process->process_quantum;
677 	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
678 	queue_input.gang_quantum = gang->gang_quantum;
679 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
680 	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
681 	queue_input.gang_global_priority_level = gang->global_priority_level;
682 	queue_input.doorbell_offset = qprops->doorbell_off;
683 	queue_input.mqd_addr = queue->mqd_gpu_addr;
684 	queue_input.wptr_addr = qprops->wptr_gpu_addr;
685 	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
686 	queue_input.queue_type = qprops->queue_type;
687 	queue_input.paging = qprops->paging;
688 	queue_input.is_kfd_process = 0;
689 
690 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
691 	if (r) {
692 		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
693 			  qprops->doorbell_off);
694 		goto clean_up_doorbell;
695 	}
696 
697 	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
698 		  "queue type=%d, doorbell=0x%llx\n",
699 		  gang->process->pasid, gang_id, qprops->queue_type,
700 		  qprops->doorbell_off);
701 
702 	queue->ring = qprops->ring;
703 	queue->doorbell_off = qprops->doorbell_off;
704 	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
705 	queue->queue_type = qprops->queue_type;
706 	queue->paging = qprops->paging;
707 	queue->gang = gang;
708 	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
709 	list_add_tail(&queue->list, &gang->queue_list);
710 
711 	amdgpu_mes_unlock(&adev->mes);
712 	return 0;
713 
714 clean_up_doorbell:
715 	amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
716 clean_up_queue_id:
717 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
718 	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
719 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
720 clean_up_mqd:
721 	amdgpu_mes_unlock(&adev->mes);
722 	amdgpu_mes_queue_free_mqd(queue);
723 clean_up_memory:
724 	kfree(queue);
725 	return r;
726 }
727 
728 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
729 {
730 	unsigned long flags;
731 	struct amdgpu_mes_queue *queue;
732 	struct amdgpu_mes_gang *gang;
733 	struct mes_remove_queue_input queue_input;
734 	int r;
735 
736 	/*
737 	 * Avoid taking any other locks under MES lock to avoid circular
738 	 * lock dependencies.
739 	 */
740 	amdgpu_mes_lock(&adev->mes);
741 
742 	/* remove the mes gang from idr list */
743 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
744 
745 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
746 	if (!queue) {
747 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
748 		amdgpu_mes_unlock(&adev->mes);
749 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
750 		return -EINVAL;
751 	}
752 
753 	idr_remove(&adev->mes.queue_id_idr, queue_id);
754 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
755 
756 	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
757 		  queue->doorbell_off);
758 
759 	gang = queue->gang;
760 	queue_input.doorbell_offset = queue->doorbell_off;
761 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
762 
763 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
764 	if (r)
765 		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
766 			  queue_id);
767 
768 	list_del(&queue->list);
769 	amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
770 	amdgpu_mes_unlock(&adev->mes);
771 
772 	amdgpu_mes_queue_free_mqd(queue);
773 	kfree(queue);
774 	return 0;
775 }
776 
777 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
778 				  struct amdgpu_ring *ring,
779 				  enum amdgpu_unmap_queues_action action,
780 				  u64 gpu_addr, u64 seq)
781 {
782 	struct mes_unmap_legacy_queue_input queue_input;
783 	int r;
784 
785 	queue_input.action = action;
786 	queue_input.queue_type = ring->funcs->type;
787 	queue_input.doorbell_offset = ring->doorbell_index;
788 	queue_input.pipe_id = ring->pipe;
789 	queue_input.queue_id = ring->queue;
790 	queue_input.trail_fence_addr = gpu_addr;
791 	queue_input.trail_fence_data = seq;
792 
793 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
794 	if (r)
795 		DRM_ERROR("failed to unmap legacy queue\n");
796 
797 	return r;
798 }
799 
800 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
801 {
802 	struct mes_misc_op_input op_input;
803 	int r, val = 0;
804 
805 	op_input.op = MES_MISC_OP_READ_REG;
806 	op_input.read_reg.reg_offset = reg;
807 	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
808 
809 	if (!adev->mes.funcs->misc_op) {
810 		DRM_ERROR("mes rreg is not supported!\n");
811 		goto error;
812 	}
813 
814 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
815 	if (r)
816 		DRM_ERROR("failed to read reg (0x%x)\n", reg);
817 	else
818 		val = *(adev->mes.read_val_ptr);
819 
820 error:
821 	return val;
822 }
823 
824 int amdgpu_mes_wreg(struct amdgpu_device *adev,
825 		    uint32_t reg, uint32_t val)
826 {
827 	struct mes_misc_op_input op_input;
828 	int r;
829 
830 	op_input.op = MES_MISC_OP_WRITE_REG;
831 	op_input.write_reg.reg_offset = reg;
832 	op_input.write_reg.reg_value = val;
833 
834 	if (!adev->mes.funcs->misc_op) {
835 		DRM_ERROR("mes wreg is not supported!\n");
836 		r = -EINVAL;
837 		goto error;
838 	}
839 
840 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
841 	if (r)
842 		DRM_ERROR("failed to write reg (0x%x)\n", reg);
843 
844 error:
845 	return r;
846 }
847 
848 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
849 				  uint32_t reg0, uint32_t reg1,
850 				  uint32_t ref, uint32_t mask)
851 {
852 	struct mes_misc_op_input op_input;
853 	int r;
854 
855 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
856 	op_input.wrm_reg.reg0 = reg0;
857 	op_input.wrm_reg.reg1 = reg1;
858 	op_input.wrm_reg.ref = ref;
859 	op_input.wrm_reg.mask = mask;
860 
861 	if (!adev->mes.funcs->misc_op) {
862 		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
863 		r = -EINVAL;
864 		goto error;
865 	}
866 
867 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
868 	if (r)
869 		DRM_ERROR("failed to reg_write_reg_wait\n");
870 
871 error:
872 	return r;
873 }
874 
875 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
876 			uint32_t val, uint32_t mask)
877 {
878 	struct mes_misc_op_input op_input;
879 	int r;
880 
881 	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
882 	op_input.wrm_reg.reg0 = reg;
883 	op_input.wrm_reg.ref = val;
884 	op_input.wrm_reg.mask = mask;
885 
886 	if (!adev->mes.funcs->misc_op) {
887 		DRM_ERROR("mes reg wait is not supported!\n");
888 		r = -EINVAL;
889 		goto error;
890 	}
891 
892 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
893 	if (r)
894 		DRM_ERROR("failed to reg_write_reg_wait\n");
895 
896 error:
897 	return r;
898 }
899 
900 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
901 				uint64_t process_context_addr,
902 				uint32_t spi_gdbg_per_vmid_cntl,
903 				const uint32_t *tcp_watch_cntl,
904 				uint32_t flags,
905 				bool trap_en)
906 {
907 	struct mes_misc_op_input op_input = {0};
908 	int r;
909 
910 	if (!adev->mes.funcs->misc_op) {
911 		DRM_ERROR("mes set shader debugger is not supported!\n");
912 		return -EINVAL;
913 	}
914 
915 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
916 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
917 	op_input.set_shader_debugger.flags.u32all = flags;
918 
919 	/* use amdgpu mes_flush_shader_debugger instead */
920 	if (op_input.set_shader_debugger.flags.process_ctx_flush)
921 		return -EINVAL;
922 
923 	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
924 	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
925 			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
926 
927 	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
928 			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
929 		op_input.set_shader_debugger.trap_en = trap_en;
930 
931 	amdgpu_mes_lock(&adev->mes);
932 
933 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
934 	if (r)
935 		DRM_ERROR("failed to set_shader_debugger\n");
936 
937 	amdgpu_mes_unlock(&adev->mes);
938 
939 	return r;
940 }
941 
942 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
943 				     uint64_t process_context_addr)
944 {
945 	struct mes_misc_op_input op_input = {0};
946 	int r;
947 
948 	if (!adev->mes.funcs->misc_op) {
949 		DRM_ERROR("mes flush shader debugger is not supported!\n");
950 		return -EINVAL;
951 	}
952 
953 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
954 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
955 	op_input.set_shader_debugger.flags.process_ctx_flush = true;
956 
957 	amdgpu_mes_lock(&adev->mes);
958 
959 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
960 	if (r)
961 		DRM_ERROR("failed to set_shader_debugger\n");
962 
963 	amdgpu_mes_unlock(&adev->mes);
964 
965 	return r;
966 }
967 
968 static void
969 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
970 			       struct amdgpu_ring *ring,
971 			       struct amdgpu_mes_queue_properties *props)
972 {
973 	props->queue_type = ring->funcs->type;
974 	props->hqd_base_gpu_addr = ring->gpu_addr;
975 	props->rptr_gpu_addr = ring->rptr_gpu_addr;
976 	props->wptr_gpu_addr = ring->wptr_gpu_addr;
977 	props->wptr_mc_addr =
978 		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
979 	props->queue_size = ring->ring_size;
980 	props->eop_gpu_addr = ring->eop_gpu_addr;
981 	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
982 	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
983 	props->paging = false;
984 	props->ring = ring;
985 }
986 
987 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
988 do {									\
989        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
990 		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
991 				_eng[ring->idx].slots[id_offs]);        \
992        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
993 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
994 				_eng[ring->idx].ring);                  \
995        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
996 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
997 				_eng[ring->idx].ib);                    \
998        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
999 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1000 				_eng[ring->idx].padding);               \
1001 } while(0)
1002 
1003 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1004 {
1005 	switch (ring->funcs->type) {
1006 	case AMDGPU_RING_TYPE_GFX:
1007 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1008 		break;
1009 	case AMDGPU_RING_TYPE_COMPUTE:
1010 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1011 		break;
1012 	case AMDGPU_RING_TYPE_SDMA:
1013 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1014 		break;
1015 	default:
1016 		break;
1017 	}
1018 
1019 	WARN_ON(1);
1020 	return -EINVAL;
1021 }
1022 
1023 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1024 			int queue_type, int idx,
1025 			struct amdgpu_mes_ctx_data *ctx_data,
1026 			struct amdgpu_ring **out)
1027 {
1028 	struct amdgpu_ring *ring;
1029 	struct amdgpu_mes_gang *gang;
1030 	struct amdgpu_mes_queue_properties qprops = {0};
1031 	int r, queue_id, pasid;
1032 
1033 	/*
1034 	 * Avoid taking any other locks under MES lock to avoid circular
1035 	 * lock dependencies.
1036 	 */
1037 	amdgpu_mes_lock(&adev->mes);
1038 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1039 	if (!gang) {
1040 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1041 		amdgpu_mes_unlock(&adev->mes);
1042 		return -EINVAL;
1043 	}
1044 	pasid = gang->process->pasid;
1045 
1046 	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1047 	if (!ring) {
1048 		amdgpu_mes_unlock(&adev->mes);
1049 		return -ENOMEM;
1050 	}
1051 
1052 	ring->ring_obj = NULL;
1053 	ring->use_doorbell = true;
1054 	ring->is_mes_queue = true;
1055 	ring->mes_ctx = ctx_data;
1056 	ring->idx = idx;
1057 	ring->no_scheduler = true;
1058 
1059 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1060 		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1061 				      compute[ring->idx].mec_hpd);
1062 		ring->eop_gpu_addr =
1063 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1064 	}
1065 
1066 	switch (queue_type) {
1067 	case AMDGPU_RING_TYPE_GFX:
1068 		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1069 		ring->me = adev->gfx.gfx_ring[0].me;
1070 		ring->pipe = adev->gfx.gfx_ring[0].pipe;
1071 		break;
1072 	case AMDGPU_RING_TYPE_COMPUTE:
1073 		ring->funcs = adev->gfx.compute_ring[0].funcs;
1074 		ring->me = adev->gfx.compute_ring[0].me;
1075 		ring->pipe = adev->gfx.compute_ring[0].pipe;
1076 		break;
1077 	case AMDGPU_RING_TYPE_SDMA:
1078 		ring->funcs = adev->sdma.instance[0].ring.funcs;
1079 		break;
1080 	default:
1081 		BUG();
1082 	}
1083 
1084 	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1085 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1086 	if (r)
1087 		goto clean_up_memory;
1088 
1089 	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1090 
1091 	dma_fence_wait(gang->process->vm->last_update, false);
1092 	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1093 	amdgpu_mes_unlock(&adev->mes);
1094 
1095 	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1096 	if (r)
1097 		goto clean_up_ring;
1098 
1099 	ring->hw_queue_id = queue_id;
1100 	ring->doorbell_index = qprops.doorbell_off;
1101 
1102 	if (queue_type == AMDGPU_RING_TYPE_GFX)
1103 		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1104 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1105 		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1106 			queue_id);
1107 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1108 		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1109 			queue_id);
1110 	else
1111 		BUG();
1112 
1113 	*out = ring;
1114 	return 0;
1115 
1116 clean_up_ring:
1117 	amdgpu_ring_fini(ring);
1118 clean_up_memory:
1119 	kfree(ring);
1120 	amdgpu_mes_unlock(&adev->mes);
1121 	return r;
1122 }
1123 
1124 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1125 			    struct amdgpu_ring *ring)
1126 {
1127 	if (!ring)
1128 		return;
1129 
1130 	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1131 	amdgpu_ring_fini(ring);
1132 	kfree(ring);
1133 }
1134 
1135 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1136 						   enum amdgpu_mes_priority_level prio)
1137 {
1138 	return adev->mes.aggregated_doorbells[prio];
1139 }
1140 
1141 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1142 				   struct amdgpu_mes_ctx_data *ctx_data)
1143 {
1144 	int r;
1145 
1146 	r = amdgpu_bo_create_kernel(adev,
1147 			    sizeof(struct amdgpu_mes_ctx_meta_data),
1148 			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1149 			    &ctx_data->meta_data_obj,
1150 			    &ctx_data->meta_data_mc_addr,
1151 			    &ctx_data->meta_data_ptr);
1152 	if (r) {
1153 		dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1154 		return r;
1155 	}
1156 
1157 	if (!ctx_data->meta_data_obj)
1158 		return -ENOMEM;
1159 
1160 	memset(ctx_data->meta_data_ptr, 0,
1161 	       sizeof(struct amdgpu_mes_ctx_meta_data));
1162 
1163 	return 0;
1164 }
1165 
1166 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1167 {
1168 	if (ctx_data->meta_data_obj)
1169 		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1170 				      &ctx_data->meta_data_mc_addr,
1171 				      &ctx_data->meta_data_ptr);
1172 }
1173 
1174 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1175 				 struct amdgpu_vm *vm,
1176 				 struct amdgpu_mes_ctx_data *ctx_data)
1177 {
1178 	struct amdgpu_bo_va *bo_va;
1179 	struct amdgpu_sync sync;
1180 	struct drm_exec exec;
1181 	int r;
1182 
1183 	amdgpu_sync_create(&sync);
1184 
1185 	drm_exec_init(&exec, 0, 0);
1186 	drm_exec_until_all_locked(&exec) {
1187 		r = drm_exec_lock_obj(&exec,
1188 				      &ctx_data->meta_data_obj->tbo.base);
1189 		drm_exec_retry_on_contention(&exec);
1190 		if (unlikely(r))
1191 			goto error_fini_exec;
1192 
1193 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1194 		drm_exec_retry_on_contention(&exec);
1195 		if (unlikely(r))
1196 			goto error_fini_exec;
1197 	}
1198 
1199 	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1200 	if (!bo_va) {
1201 		DRM_ERROR("failed to create bo_va for meta data BO\n");
1202 		r = -ENOMEM;
1203 		goto error_fini_exec;
1204 	}
1205 
1206 	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1207 			     sizeof(struct amdgpu_mes_ctx_meta_data),
1208 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1209 			     AMDGPU_PTE_EXECUTABLE);
1210 
1211 	if (r) {
1212 		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1213 		goto error_del_bo_va;
1214 	}
1215 
1216 	r = amdgpu_vm_bo_update(adev, bo_va, false);
1217 	if (r) {
1218 		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1219 		goto error_del_bo_va;
1220 	}
1221 	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1222 
1223 	r = amdgpu_vm_update_pdes(adev, vm, false);
1224 	if (r) {
1225 		DRM_ERROR("failed to update pdes on meta data\n");
1226 		goto error_del_bo_va;
1227 	}
1228 	amdgpu_sync_fence(&sync, vm->last_update);
1229 
1230 	amdgpu_sync_wait(&sync, false);
1231 	drm_exec_fini(&exec);
1232 
1233 	amdgpu_sync_free(&sync);
1234 	ctx_data->meta_data_va = bo_va;
1235 	return 0;
1236 
1237 error_del_bo_va:
1238 	amdgpu_vm_bo_del(adev, bo_va);
1239 
1240 error_fini_exec:
1241 	drm_exec_fini(&exec);
1242 	amdgpu_sync_free(&sync);
1243 	return r;
1244 }
1245 
1246 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1247 				   struct amdgpu_mes_ctx_data *ctx_data)
1248 {
1249 	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1250 	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1251 	struct amdgpu_vm *vm = bo_va->base.vm;
1252 	struct dma_fence *fence;
1253 	struct drm_exec exec;
1254 	long r;
1255 
1256 	drm_exec_init(&exec, 0, 0);
1257 	drm_exec_until_all_locked(&exec) {
1258 		r = drm_exec_lock_obj(&exec,
1259 				      &ctx_data->meta_data_obj->tbo.base);
1260 		drm_exec_retry_on_contention(&exec);
1261 		if (unlikely(r))
1262 			goto out_unlock;
1263 
1264 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1265 		drm_exec_retry_on_contention(&exec);
1266 		if (unlikely(r))
1267 			goto out_unlock;
1268 	}
1269 
1270 	amdgpu_vm_bo_del(adev, bo_va);
1271 	if (!amdgpu_vm_ready(vm))
1272 		goto out_unlock;
1273 
1274 	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1275 				   &fence);
1276 	if (r)
1277 		goto out_unlock;
1278 	if (fence) {
1279 		amdgpu_bo_fence(bo, fence, true);
1280 		fence = NULL;
1281 	}
1282 
1283 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1284 	if (r || !fence)
1285 		goto out_unlock;
1286 
1287 	dma_fence_wait(fence, false);
1288 	amdgpu_bo_fence(bo, fence, true);
1289 	dma_fence_put(fence);
1290 
1291 out_unlock:
1292 	if (unlikely(r < 0))
1293 		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1294 	drm_exec_fini(&exec);
1295 
1296 	return r;
1297 }
1298 
1299 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1300 					  int pasid, int *gang_id,
1301 					  int queue_type, int num_queue,
1302 					  struct amdgpu_ring **added_rings,
1303 					  struct amdgpu_mes_ctx_data *ctx_data)
1304 {
1305 	struct amdgpu_ring *ring;
1306 	struct amdgpu_mes_gang_properties gprops = {0};
1307 	int r, j;
1308 
1309 	/* create a gang for the process */
1310 	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1311 	gprops.gang_quantum = adev->mes.default_gang_quantum;
1312 	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1313 	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1314 	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1315 
1316 	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1317 	if (r) {
1318 		DRM_ERROR("failed to add gang\n");
1319 		return r;
1320 	}
1321 
1322 	/* create queues for the gang */
1323 	for (j = 0; j < num_queue; j++) {
1324 		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1325 					ctx_data, &ring);
1326 		if (r) {
1327 			DRM_ERROR("failed to add ring\n");
1328 			break;
1329 		}
1330 
1331 		DRM_INFO("ring %s was added\n", ring->name);
1332 		added_rings[j] = ring;
1333 	}
1334 
1335 	return 0;
1336 }
1337 
1338 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1339 {
1340 	struct amdgpu_ring *ring;
1341 	int i, r;
1342 
1343 	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1344 		ring = added_rings[i];
1345 		if (!ring)
1346 			continue;
1347 
1348 		r = amdgpu_ring_test_helper(ring);
1349 		if (r)
1350 			return r;
1351 
1352 		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1353 		if (r) {
1354 			DRM_DEV_ERROR(ring->adev->dev,
1355 				      "ring %s ib test failed (%d)\n",
1356 				      ring->name, r);
1357 			return r;
1358 		} else
1359 			DRM_INFO("ring %s ib test pass\n", ring->name);
1360 	}
1361 
1362 	return 0;
1363 }
1364 
1365 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1366 {
1367 	struct amdgpu_vm *vm = NULL;
1368 	struct amdgpu_mes_ctx_data ctx_data = {0};
1369 	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1370 	int gang_ids[3] = {0};
1371 	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1372 				 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1373 				 { AMDGPU_RING_TYPE_SDMA, 1} };
1374 	int i, r, pasid, k = 0;
1375 
1376 	pasid = amdgpu_pasid_alloc(16);
1377 	if (pasid < 0) {
1378 		dev_warn(adev->dev, "No more PASIDs available!");
1379 		pasid = 0;
1380 	}
1381 
1382 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1383 	if (!vm) {
1384 		r = -ENOMEM;
1385 		goto error_pasid;
1386 	}
1387 
1388 	r = amdgpu_vm_init(adev, vm, -1);
1389 	if (r) {
1390 		DRM_ERROR("failed to initialize vm\n");
1391 		goto error_pasid;
1392 	}
1393 
1394 	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1395 	if (r) {
1396 		DRM_ERROR("failed to alloc ctx meta data\n");
1397 		goto error_fini;
1398 	}
1399 
1400 	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
1401 	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1402 	if (r) {
1403 		DRM_ERROR("failed to map ctx meta data\n");
1404 		goto error_vm;
1405 	}
1406 
1407 	r = amdgpu_mes_create_process(adev, pasid, vm);
1408 	if (r) {
1409 		DRM_ERROR("failed to create MES process\n");
1410 		goto error_vm;
1411 	}
1412 
1413 	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1414 		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1415 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1416 			    IP_VERSION(10, 3, 0) &&
1417 		    amdgpu_ip_version(adev, GC_HWIP, 0) <
1418 			    IP_VERSION(11, 0, 0) &&
1419 		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1420 			continue;
1421 
1422 		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1423 							   &gang_ids[i],
1424 							   queue_types[i][0],
1425 							   queue_types[i][1],
1426 							   &added_rings[k],
1427 							   &ctx_data);
1428 		if (r)
1429 			goto error_queues;
1430 
1431 		k += queue_types[i][1];
1432 	}
1433 
1434 	/* start ring test and ib test for MES queues */
1435 	amdgpu_mes_test_queues(added_rings);
1436 
1437 error_queues:
1438 	/* remove all queues */
1439 	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1440 		if (!added_rings[i])
1441 			continue;
1442 		amdgpu_mes_remove_ring(adev, added_rings[i]);
1443 	}
1444 
1445 	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1446 		if (!gang_ids[i])
1447 			continue;
1448 		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1449 	}
1450 
1451 	amdgpu_mes_destroy_process(adev, pasid);
1452 
1453 error_vm:
1454 	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1455 
1456 error_fini:
1457 	amdgpu_vm_fini(adev, vm);
1458 
1459 error_pasid:
1460 	if (pasid)
1461 		amdgpu_pasid_free(pasid);
1462 
1463 	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1464 	kfree(vm);
1465 	return 0;
1466 }
1467 
1468 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1469 {
1470 	const struct mes_firmware_header_v1_0 *mes_hdr;
1471 	struct amdgpu_firmware_info *info;
1472 	char ucode_prefix[30];
1473 	char fw_name[50];
1474 	bool need_retry = false;
1475 	int r;
1476 
1477 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1478 				       sizeof(ucode_prefix));
1479 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1480 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1481 			 ucode_prefix,
1482 			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1483 		need_retry = true;
1484 	} else {
1485 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1486 			 ucode_prefix,
1487 			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1488 	}
1489 
1490 	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1491 	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1492 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1493 			 ucode_prefix);
1494 		DRM_INFO("try to fall back to %s\n", fw_name);
1495 		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1496 					 fw_name);
1497 	}
1498 
1499 	if (r)
1500 		goto out;
1501 
1502 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
1503 		adev->mes.fw[pipe]->data;
1504 	adev->mes.uc_start_addr[pipe] =
1505 		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1506 		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1507 	adev->mes.data_start_addr[pipe] =
1508 		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1509 		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1510 
1511 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1512 		int ucode, ucode_data;
1513 
1514 		if (pipe == AMDGPU_MES_SCHED_PIPE) {
1515 			ucode = AMDGPU_UCODE_ID_CP_MES;
1516 			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1517 		} else {
1518 			ucode = AMDGPU_UCODE_ID_CP_MES1;
1519 			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1520 		}
1521 
1522 		info = &adev->firmware.ucode[ucode];
1523 		info->ucode_id = ucode;
1524 		info->fw = adev->mes.fw[pipe];
1525 		adev->firmware.fw_size +=
1526 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1527 			      PAGE_SIZE);
1528 
1529 		info = &adev->firmware.ucode[ucode_data];
1530 		info->ucode_id = ucode_data;
1531 		info->fw = adev->mes.fw[pipe];
1532 		adev->firmware.fw_size +=
1533 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1534 			      PAGE_SIZE);
1535 	}
1536 
1537 	return 0;
1538 out:
1539 	amdgpu_ucode_release(&adev->mes.fw[pipe]);
1540 	return r;
1541 }
1542 
1543 #if defined(CONFIG_DEBUG_FS)
1544 
1545 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1546 {
1547 	struct amdgpu_device *adev = m->private;
1548 	uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1549 
1550 	seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1551 		     mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
1552 
1553 	return 0;
1554 }
1555 
1556 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1557 
1558 #endif
1559 
1560 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1561 {
1562 
1563 #if defined(CONFIG_DEBUG_FS)
1564 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1565 	struct dentry *root = minor->debugfs_root;
1566 	if (adev->enable_mes && amdgpu_mes_log_enable)
1567 		debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1568 				    adev, &amdgpu_debugfs_mes_event_log_fops);
1569 
1570 #endif
1571 }
1572