xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26 
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31 
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34 
35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 		       PAGE_SIZE);
40 }
41 
42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 					 struct amdgpu_mes_process *process,
44 					 int ip_type, uint64_t *doorbell_index)
45 {
46 	unsigned int offset, found;
47 	struct amdgpu_mes *mes = &adev->mes;
48 
49 	if (ip_type == AMDGPU_RING_TYPE_SDMA)
50 		offset = adev->doorbell_index.sdma_engine[0];
51 	else
52 		offset = 0;
53 
54 	found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
55 	if (found >= mes->num_mes_dbs) {
56 		DRM_WARN("No doorbell available\n");
57 		return -ENOSPC;
58 	}
59 
60 	set_bit(found, mes->doorbell_bitmap);
61 
62 	/* Get the absolute doorbell index on BAR */
63 	*doorbell_index = mes->db_start_dw_offset + found * 2;
64 	return 0;
65 }
66 
67 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
68 					   struct amdgpu_mes_process *process,
69 					   uint32_t doorbell_index)
70 {
71 	unsigned int old, rel_index;
72 	struct amdgpu_mes *mes = &adev->mes;
73 
74 	/* Find the relative index of the doorbell in this object */
75 	rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
76 	old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
77 	WARN_ON(!old);
78 }
79 
80 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
81 {
82 	int i;
83 	struct amdgpu_mes *mes = &adev->mes;
84 
85 	/* Bitmap for dynamic allocation of kernel doorbells */
86 	mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
87 	if (!mes->doorbell_bitmap) {
88 		DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
89 		return -ENOMEM;
90 	}
91 
92 	mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
93 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
94 		adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
95 		set_bit(i, mes->doorbell_bitmap);
96 	}
97 
98 	return 0;
99 }
100 
101 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
102 {
103 	int r;
104 
105 	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
106 				    AMDGPU_GEM_DOMAIN_GTT,
107 				    &adev->mes.event_log_gpu_obj,
108 				    &adev->mes.event_log_gpu_addr,
109 				    &adev->mes.event_log_cpu_addr);
110 	if (r) {
111 		dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
112 		return r;
113 	}
114 
115 	memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
116 
117 	return  0;
118 
119 }
120 
121 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
122 {
123 	bitmap_free(adev->mes.doorbell_bitmap);
124 }
125 
126 int amdgpu_mes_init(struct amdgpu_device *adev)
127 {
128 	int i, r;
129 
130 	adev->mes.adev = adev;
131 
132 	idr_init(&adev->mes.pasid_idr);
133 	idr_init(&adev->mes.gang_id_idr);
134 	idr_init(&adev->mes.queue_id_idr);
135 	ida_init(&adev->mes.doorbell_ida);
136 	spin_lock_init(&adev->mes.queue_id_lock);
137 	spin_lock_init(&adev->mes.ring_lock);
138 	mutex_init(&adev->mes.mutex_hidden);
139 
140 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
141 	adev->mes.vmid_mask_mmhub = 0xffffff00;
142 	adev->mes.vmid_mask_gfxhub = 0xffffff00;
143 
144 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
145 		/* use only 1st MEC pipes */
146 		if (i >= 4)
147 			continue;
148 		adev->mes.compute_hqd_mask[i] = 0xc;
149 	}
150 
151 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
152 		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
153 
154 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
155 		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
156 		    IP_VERSION(6, 0, 0))
157 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
158 		/* zero sdma_hqd_mask for non-existent engine */
159 		else if (adev->sdma.num_instances == 1)
160 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
161 		else
162 			adev->mes.sdma_hqd_mask[i] = 0xfc;
163 	}
164 
165 	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
166 	if (r) {
167 		dev_err(adev->dev,
168 			"(%d) ring trail_fence_offs wb alloc failed\n", r);
169 		goto error_ids;
170 	}
171 	adev->mes.sch_ctx_gpu_addr =
172 		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
173 	adev->mes.sch_ctx_ptr =
174 		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
175 
176 	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
177 	if (r) {
178 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
179 		dev_err(adev->dev,
180 			"(%d) query_status_fence_offs wb alloc failed\n", r);
181 		goto error_ids;
182 	}
183 	adev->mes.query_status_fence_gpu_addr =
184 		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
185 	adev->mes.query_status_fence_ptr =
186 		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
187 
188 	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
189 	if (r) {
190 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
191 		amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
192 		dev_err(adev->dev,
193 			"(%d) read_val_offs alloc failed\n", r);
194 		goto error_ids;
195 	}
196 	adev->mes.read_val_gpu_addr =
197 		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
198 	adev->mes.read_val_ptr =
199 		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
200 
201 	r = amdgpu_mes_doorbell_init(adev);
202 	if (r)
203 		goto error;
204 
205 	r = amdgpu_mes_event_log_init(adev);
206 	if (r)
207 		goto error_doorbell;
208 
209 	return 0;
210 
211 error_doorbell:
212 	amdgpu_mes_doorbell_free(adev);
213 error:
214 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
215 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
216 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
217 error_ids:
218 	idr_destroy(&adev->mes.pasid_idr);
219 	idr_destroy(&adev->mes.gang_id_idr);
220 	idr_destroy(&adev->mes.queue_id_idr);
221 	ida_destroy(&adev->mes.doorbell_ida);
222 	mutex_destroy(&adev->mes.mutex_hidden);
223 	return r;
224 }
225 
226 void amdgpu_mes_fini(struct amdgpu_device *adev)
227 {
228 	amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
229 			      &adev->mes.event_log_gpu_addr,
230 			      &adev->mes.event_log_cpu_addr);
231 
232 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
233 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
234 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
235 	amdgpu_mes_doorbell_free(adev);
236 
237 	idr_destroy(&adev->mes.pasid_idr);
238 	idr_destroy(&adev->mes.gang_id_idr);
239 	idr_destroy(&adev->mes.queue_id_idr);
240 	ida_destroy(&adev->mes.doorbell_ida);
241 	mutex_destroy(&adev->mes.mutex_hidden);
242 }
243 
244 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
245 {
246 	amdgpu_bo_free_kernel(&q->mqd_obj,
247 			      &q->mqd_gpu_addr,
248 			      &q->mqd_cpu_ptr);
249 }
250 
251 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
252 			      struct amdgpu_vm *vm)
253 {
254 	struct amdgpu_mes_process *process;
255 	int r;
256 
257 	/* allocate the mes process buffer */
258 	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
259 	if (!process) {
260 		DRM_ERROR("no more memory to create mes process\n");
261 		return -ENOMEM;
262 	}
263 
264 	/* allocate the process context bo and map it */
265 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
266 				    AMDGPU_GEM_DOMAIN_GTT,
267 				    &process->proc_ctx_bo,
268 				    &process->proc_ctx_gpu_addr,
269 				    &process->proc_ctx_cpu_ptr);
270 	if (r) {
271 		DRM_ERROR("failed to allocate process context bo\n");
272 		goto clean_up_memory;
273 	}
274 	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
275 
276 	/*
277 	 * Avoid taking any other locks under MES lock to avoid circular
278 	 * lock dependencies.
279 	 */
280 	amdgpu_mes_lock(&adev->mes);
281 
282 	/* add the mes process to idr list */
283 	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
284 		      GFP_KERNEL);
285 	if (r < 0) {
286 		DRM_ERROR("failed to lock pasid=%d\n", pasid);
287 		goto clean_up_ctx;
288 	}
289 
290 	INIT_LIST_HEAD(&process->gang_list);
291 	process->vm = vm;
292 	process->pasid = pasid;
293 	process->process_quantum = adev->mes.default_process_quantum;
294 	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
295 
296 	amdgpu_mes_unlock(&adev->mes);
297 	return 0;
298 
299 clean_up_ctx:
300 	amdgpu_mes_unlock(&adev->mes);
301 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
302 			      &process->proc_ctx_gpu_addr,
303 			      &process->proc_ctx_cpu_ptr);
304 clean_up_memory:
305 	kfree(process);
306 	return r;
307 }
308 
309 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
310 {
311 	struct amdgpu_mes_process *process;
312 	struct amdgpu_mes_gang *gang, *tmp1;
313 	struct amdgpu_mes_queue *queue, *tmp2;
314 	struct mes_remove_queue_input queue_input;
315 	unsigned long flags;
316 	int r;
317 
318 	/*
319 	 * Avoid taking any other locks under MES lock to avoid circular
320 	 * lock dependencies.
321 	 */
322 	amdgpu_mes_lock(&adev->mes);
323 
324 	process = idr_find(&adev->mes.pasid_idr, pasid);
325 	if (!process) {
326 		DRM_WARN("pasid %d doesn't exist\n", pasid);
327 		amdgpu_mes_unlock(&adev->mes);
328 		return;
329 	}
330 
331 	/* Remove all queues from hardware */
332 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
333 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
334 			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
335 			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
336 			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
337 
338 			queue_input.doorbell_offset = queue->doorbell_off;
339 			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
340 
341 			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
342 							     &queue_input);
343 			if (r)
344 				DRM_WARN("failed to remove hardware queue\n");
345 		}
346 
347 		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
348 	}
349 
350 	idr_remove(&adev->mes.pasid_idr, pasid);
351 	amdgpu_mes_unlock(&adev->mes);
352 
353 	/* free all memory allocated by the process */
354 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
355 		/* free all queues in the gang */
356 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
357 			amdgpu_mes_queue_free_mqd(queue);
358 			list_del(&queue->list);
359 			kfree(queue);
360 		}
361 		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
362 				      &gang->gang_ctx_gpu_addr,
363 				      &gang->gang_ctx_cpu_ptr);
364 		list_del(&gang->list);
365 		kfree(gang);
366 
367 	}
368 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
369 			      &process->proc_ctx_gpu_addr,
370 			      &process->proc_ctx_cpu_ptr);
371 	kfree(process);
372 }
373 
374 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
375 			struct amdgpu_mes_gang_properties *gprops,
376 			int *gang_id)
377 {
378 	struct amdgpu_mes_process *process;
379 	struct amdgpu_mes_gang *gang;
380 	int r;
381 
382 	/* allocate the mes gang buffer */
383 	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
384 	if (!gang) {
385 		return -ENOMEM;
386 	}
387 
388 	/* allocate the gang context bo and map it to cpu space */
389 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
390 				    AMDGPU_GEM_DOMAIN_GTT,
391 				    &gang->gang_ctx_bo,
392 				    &gang->gang_ctx_gpu_addr,
393 				    &gang->gang_ctx_cpu_ptr);
394 	if (r) {
395 		DRM_ERROR("failed to allocate process context bo\n");
396 		goto clean_up_mem;
397 	}
398 	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
399 
400 	/*
401 	 * Avoid taking any other locks under MES lock to avoid circular
402 	 * lock dependencies.
403 	 */
404 	amdgpu_mes_lock(&adev->mes);
405 
406 	process = idr_find(&adev->mes.pasid_idr, pasid);
407 	if (!process) {
408 		DRM_ERROR("pasid %d doesn't exist\n", pasid);
409 		r = -EINVAL;
410 		goto clean_up_ctx;
411 	}
412 
413 	/* add the mes gang to idr list */
414 	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
415 		      GFP_KERNEL);
416 	if (r < 0) {
417 		DRM_ERROR("failed to allocate idr for gang\n");
418 		goto clean_up_ctx;
419 	}
420 
421 	gang->gang_id = r;
422 	*gang_id = r;
423 
424 	INIT_LIST_HEAD(&gang->queue_list);
425 	gang->process = process;
426 	gang->priority = gprops->priority;
427 	gang->gang_quantum = gprops->gang_quantum ?
428 		gprops->gang_quantum : adev->mes.default_gang_quantum;
429 	gang->global_priority_level = gprops->global_priority_level;
430 	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
431 	list_add_tail(&gang->list, &process->gang_list);
432 
433 	amdgpu_mes_unlock(&adev->mes);
434 	return 0;
435 
436 clean_up_ctx:
437 	amdgpu_mes_unlock(&adev->mes);
438 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
439 			      &gang->gang_ctx_gpu_addr,
440 			      &gang->gang_ctx_cpu_ptr);
441 clean_up_mem:
442 	kfree(gang);
443 	return r;
444 }
445 
446 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
447 {
448 	struct amdgpu_mes_gang *gang;
449 
450 	/*
451 	 * Avoid taking any other locks under MES lock to avoid circular
452 	 * lock dependencies.
453 	 */
454 	amdgpu_mes_lock(&adev->mes);
455 
456 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
457 	if (!gang) {
458 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
459 		amdgpu_mes_unlock(&adev->mes);
460 		return -EINVAL;
461 	}
462 
463 	if (!list_empty(&gang->queue_list)) {
464 		DRM_ERROR("queue list is not empty\n");
465 		amdgpu_mes_unlock(&adev->mes);
466 		return -EBUSY;
467 	}
468 
469 	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
470 	list_del(&gang->list);
471 	amdgpu_mes_unlock(&adev->mes);
472 
473 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
474 			      &gang->gang_ctx_gpu_addr,
475 			      &gang->gang_ctx_cpu_ptr);
476 
477 	kfree(gang);
478 
479 	return 0;
480 }
481 
482 int amdgpu_mes_suspend(struct amdgpu_device *adev)
483 {
484 	struct idr *idp;
485 	struct amdgpu_mes_process *process;
486 	struct amdgpu_mes_gang *gang;
487 	struct mes_suspend_gang_input input;
488 	int r, pasid;
489 
490 	/*
491 	 * Avoid taking any other locks under MES lock to avoid circular
492 	 * lock dependencies.
493 	 */
494 	amdgpu_mes_lock(&adev->mes);
495 
496 	idp = &adev->mes.pasid_idr;
497 
498 	idr_for_each_entry(idp, process, pasid) {
499 		list_for_each_entry(gang, &process->gang_list, list) {
500 			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
501 			if (r)
502 				DRM_ERROR("failed to suspend pasid %d gangid %d",
503 					 pasid, gang->gang_id);
504 		}
505 	}
506 
507 	amdgpu_mes_unlock(&adev->mes);
508 	return 0;
509 }
510 
511 int amdgpu_mes_resume(struct amdgpu_device *adev)
512 {
513 	struct idr *idp;
514 	struct amdgpu_mes_process *process;
515 	struct amdgpu_mes_gang *gang;
516 	struct mes_resume_gang_input input;
517 	int r, pasid;
518 
519 	/*
520 	 * Avoid taking any other locks under MES lock to avoid circular
521 	 * lock dependencies.
522 	 */
523 	amdgpu_mes_lock(&adev->mes);
524 
525 	idp = &adev->mes.pasid_idr;
526 
527 	idr_for_each_entry(idp, process, pasid) {
528 		list_for_each_entry(gang, &process->gang_list, list) {
529 			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
530 			if (r)
531 				DRM_ERROR("failed to resume pasid %d gangid %d",
532 					 pasid, gang->gang_id);
533 		}
534 	}
535 
536 	amdgpu_mes_unlock(&adev->mes);
537 	return 0;
538 }
539 
540 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
541 				     struct amdgpu_mes_queue *q,
542 				     struct amdgpu_mes_queue_properties *p)
543 {
544 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
545 	u32 mqd_size = mqd_mgr->mqd_size;
546 	int r;
547 
548 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
549 				    AMDGPU_GEM_DOMAIN_GTT,
550 				    &q->mqd_obj,
551 				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
552 	if (r) {
553 		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
554 		return r;
555 	}
556 	memset(q->mqd_cpu_ptr, 0, mqd_size);
557 
558 	r = amdgpu_bo_reserve(q->mqd_obj, false);
559 	if (unlikely(r != 0))
560 		goto clean_up;
561 
562 	return 0;
563 
564 clean_up:
565 	amdgpu_bo_free_kernel(&q->mqd_obj,
566 			      &q->mqd_gpu_addr,
567 			      &q->mqd_cpu_ptr);
568 	return r;
569 }
570 
571 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
572 				     struct amdgpu_mes_queue *q,
573 				     struct amdgpu_mes_queue_properties *p)
574 {
575 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
576 	struct amdgpu_mqd_prop mqd_prop = {0};
577 
578 	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
579 	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
580 	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
581 	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
582 	mqd_prop.queue_size = p->queue_size;
583 	mqd_prop.use_doorbell = true;
584 	mqd_prop.doorbell_index = p->doorbell_off;
585 	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
586 	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
587 	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
588 	mqd_prop.hqd_active = false;
589 
590 	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
591 	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
592 		mutex_lock(&adev->srbm_mutex);
593 		amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
594 	}
595 
596 	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
597 
598 	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
599 	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
600 		amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
601 		mutex_unlock(&adev->srbm_mutex);
602 	}
603 
604 	amdgpu_bo_unreserve(q->mqd_obj);
605 }
606 
607 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
608 			    struct amdgpu_mes_queue_properties *qprops,
609 			    int *queue_id)
610 {
611 	struct amdgpu_mes_queue *queue;
612 	struct amdgpu_mes_gang *gang;
613 	struct mes_add_queue_input queue_input;
614 	unsigned long flags;
615 	int r;
616 
617 	memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
618 
619 	/* allocate the mes queue buffer */
620 	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
621 	if (!queue) {
622 		DRM_ERROR("Failed to allocate memory for queue\n");
623 		return -ENOMEM;
624 	}
625 
626 	/* Allocate the queue mqd */
627 	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
628 	if (r)
629 		goto clean_up_memory;
630 
631 	/*
632 	 * Avoid taking any other locks under MES lock to avoid circular
633 	 * lock dependencies.
634 	 */
635 	amdgpu_mes_lock(&adev->mes);
636 
637 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
638 	if (!gang) {
639 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
640 		r = -EINVAL;
641 		goto clean_up_mqd;
642 	}
643 
644 	/* add the mes gang to idr list */
645 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
646 	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
647 		      GFP_ATOMIC);
648 	if (r < 0) {
649 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
650 		goto clean_up_mqd;
651 	}
652 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
653 	*queue_id = queue->queue_id = r;
654 
655 	/* allocate a doorbell index for the queue */
656 	r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
657 					  qprops->queue_type,
658 					  &qprops->doorbell_off);
659 	if (r)
660 		goto clean_up_queue_id;
661 
662 	/* initialize the queue mqd */
663 	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
664 
665 	/* add hw queue to mes */
666 	queue_input.process_id = gang->process->pasid;
667 
668 	queue_input.page_table_base_addr =
669 		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
670 		adev->gmc.vram_start;
671 
672 	queue_input.process_va_start = 0;
673 	queue_input.process_va_end =
674 		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
675 	queue_input.process_quantum = gang->process->process_quantum;
676 	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
677 	queue_input.gang_quantum = gang->gang_quantum;
678 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
679 	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
680 	queue_input.gang_global_priority_level = gang->global_priority_level;
681 	queue_input.doorbell_offset = qprops->doorbell_off;
682 	queue_input.mqd_addr = queue->mqd_gpu_addr;
683 	queue_input.wptr_addr = qprops->wptr_gpu_addr;
684 	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
685 	queue_input.queue_type = qprops->queue_type;
686 	queue_input.paging = qprops->paging;
687 	queue_input.is_kfd_process = 0;
688 
689 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
690 	if (r) {
691 		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
692 			  qprops->doorbell_off);
693 		goto clean_up_doorbell;
694 	}
695 
696 	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
697 		  "queue type=%d, doorbell=0x%llx\n",
698 		  gang->process->pasid, gang_id, qprops->queue_type,
699 		  qprops->doorbell_off);
700 
701 	queue->ring = qprops->ring;
702 	queue->doorbell_off = qprops->doorbell_off;
703 	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
704 	queue->queue_type = qprops->queue_type;
705 	queue->paging = qprops->paging;
706 	queue->gang = gang;
707 	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
708 	list_add_tail(&queue->list, &gang->queue_list);
709 
710 	amdgpu_mes_unlock(&adev->mes);
711 	return 0;
712 
713 clean_up_doorbell:
714 	amdgpu_mes_kernel_doorbell_free(adev, gang->process,
715 				       qprops->doorbell_off);
716 clean_up_queue_id:
717 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
718 	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
719 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
720 clean_up_mqd:
721 	amdgpu_mes_unlock(&adev->mes);
722 	amdgpu_mes_queue_free_mqd(queue);
723 clean_up_memory:
724 	kfree(queue);
725 	return r;
726 }
727 
728 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
729 {
730 	unsigned long flags;
731 	struct amdgpu_mes_queue *queue;
732 	struct amdgpu_mes_gang *gang;
733 	struct mes_remove_queue_input queue_input;
734 	int r;
735 
736 	/*
737 	 * Avoid taking any other locks under MES lock to avoid circular
738 	 * lock dependencies.
739 	 */
740 	amdgpu_mes_lock(&adev->mes);
741 
742 	/* remove the mes gang from idr list */
743 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
744 
745 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
746 	if (!queue) {
747 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
748 		amdgpu_mes_unlock(&adev->mes);
749 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
750 		return -EINVAL;
751 	}
752 
753 	idr_remove(&adev->mes.queue_id_idr, queue_id);
754 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
755 
756 	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
757 		  queue->doorbell_off);
758 
759 	gang = queue->gang;
760 	queue_input.doorbell_offset = queue->doorbell_off;
761 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
762 
763 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
764 	if (r)
765 		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
766 			  queue_id);
767 
768 	list_del(&queue->list);
769 	amdgpu_mes_kernel_doorbell_free(adev, gang->process,
770 				       queue->doorbell_off);
771 	amdgpu_mes_unlock(&adev->mes);
772 
773 	amdgpu_mes_queue_free_mqd(queue);
774 	kfree(queue);
775 	return 0;
776 }
777 
778 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
779 				  struct amdgpu_ring *ring,
780 				  enum amdgpu_unmap_queues_action action,
781 				  u64 gpu_addr, u64 seq)
782 {
783 	struct mes_unmap_legacy_queue_input queue_input;
784 	int r;
785 
786 	queue_input.action = action;
787 	queue_input.queue_type = ring->funcs->type;
788 	queue_input.doorbell_offset = ring->doorbell_index;
789 	queue_input.pipe_id = ring->pipe;
790 	queue_input.queue_id = ring->queue;
791 	queue_input.trail_fence_addr = gpu_addr;
792 	queue_input.trail_fence_data = seq;
793 
794 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
795 	if (r)
796 		DRM_ERROR("failed to unmap legacy queue\n");
797 
798 	return r;
799 }
800 
801 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
802 {
803 	struct mes_misc_op_input op_input;
804 	int r, val = 0;
805 
806 	op_input.op = MES_MISC_OP_READ_REG;
807 	op_input.read_reg.reg_offset = reg;
808 	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
809 
810 	if (!adev->mes.funcs->misc_op) {
811 		DRM_ERROR("mes rreg is not supported!\n");
812 		goto error;
813 	}
814 
815 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
816 	if (r)
817 		DRM_ERROR("failed to read reg (0x%x)\n", reg);
818 	else
819 		val = *(adev->mes.read_val_ptr);
820 
821 error:
822 	return val;
823 }
824 
825 int amdgpu_mes_wreg(struct amdgpu_device *adev,
826 		    uint32_t reg, uint32_t val)
827 {
828 	struct mes_misc_op_input op_input;
829 	int r;
830 
831 	op_input.op = MES_MISC_OP_WRITE_REG;
832 	op_input.write_reg.reg_offset = reg;
833 	op_input.write_reg.reg_value = val;
834 
835 	if (!adev->mes.funcs->misc_op) {
836 		DRM_ERROR("mes wreg is not supported!\n");
837 		r = -EINVAL;
838 		goto error;
839 	}
840 
841 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
842 	if (r)
843 		DRM_ERROR("failed to write reg (0x%x)\n", reg);
844 
845 error:
846 	return r;
847 }
848 
849 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
850 				  uint32_t reg0, uint32_t reg1,
851 				  uint32_t ref, uint32_t mask)
852 {
853 	struct mes_misc_op_input op_input;
854 	int r;
855 
856 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
857 	op_input.wrm_reg.reg0 = reg0;
858 	op_input.wrm_reg.reg1 = reg1;
859 	op_input.wrm_reg.ref = ref;
860 	op_input.wrm_reg.mask = mask;
861 
862 	if (!adev->mes.funcs->misc_op) {
863 		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
864 		r = -EINVAL;
865 		goto error;
866 	}
867 
868 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
869 	if (r)
870 		DRM_ERROR("failed to reg_write_reg_wait\n");
871 
872 error:
873 	return r;
874 }
875 
876 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
877 			uint32_t val, uint32_t mask)
878 {
879 	struct mes_misc_op_input op_input;
880 	int r;
881 
882 	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
883 	op_input.wrm_reg.reg0 = reg;
884 	op_input.wrm_reg.ref = val;
885 	op_input.wrm_reg.mask = mask;
886 
887 	if (!adev->mes.funcs->misc_op) {
888 		DRM_ERROR("mes reg wait is not supported!\n");
889 		r = -EINVAL;
890 		goto error;
891 	}
892 
893 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
894 	if (r)
895 		DRM_ERROR("failed to reg_write_reg_wait\n");
896 
897 error:
898 	return r;
899 }
900 
901 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
902 				uint64_t process_context_addr,
903 				uint32_t spi_gdbg_per_vmid_cntl,
904 				const uint32_t *tcp_watch_cntl,
905 				uint32_t flags,
906 				bool trap_en)
907 {
908 	struct mes_misc_op_input op_input = {0};
909 	int r;
910 
911 	if (!adev->mes.funcs->misc_op) {
912 		DRM_ERROR("mes set shader debugger is not supported!\n");
913 		return -EINVAL;
914 	}
915 
916 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
917 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
918 	op_input.set_shader_debugger.flags.u32all = flags;
919 
920 	/* use amdgpu mes_flush_shader_debugger instead */
921 	if (op_input.set_shader_debugger.flags.process_ctx_flush)
922 		return -EINVAL;
923 
924 	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
925 	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
926 			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
927 
928 	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
929 			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
930 		op_input.set_shader_debugger.trap_en = trap_en;
931 
932 	amdgpu_mes_lock(&adev->mes);
933 
934 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
935 	if (r)
936 		DRM_ERROR("failed to set_shader_debugger\n");
937 
938 	amdgpu_mes_unlock(&adev->mes);
939 
940 	return r;
941 }
942 
943 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
944 				     uint64_t process_context_addr)
945 {
946 	struct mes_misc_op_input op_input = {0};
947 	int r;
948 
949 	if (!adev->mes.funcs->misc_op) {
950 		DRM_ERROR("mes flush shader debugger is not supported!\n");
951 		return -EINVAL;
952 	}
953 
954 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
955 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
956 	op_input.set_shader_debugger.flags.process_ctx_flush = true;
957 
958 	amdgpu_mes_lock(&adev->mes);
959 
960 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
961 	if (r)
962 		DRM_ERROR("failed to set_shader_debugger\n");
963 
964 	amdgpu_mes_unlock(&adev->mes);
965 
966 	return r;
967 }
968 
969 static void
970 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
971 			       struct amdgpu_ring *ring,
972 			       struct amdgpu_mes_queue_properties *props)
973 {
974 	props->queue_type = ring->funcs->type;
975 	props->hqd_base_gpu_addr = ring->gpu_addr;
976 	props->rptr_gpu_addr = ring->rptr_gpu_addr;
977 	props->wptr_gpu_addr = ring->wptr_gpu_addr;
978 	props->wptr_mc_addr =
979 		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
980 	props->queue_size = ring->ring_size;
981 	props->eop_gpu_addr = ring->eop_gpu_addr;
982 	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
983 	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
984 	props->paging = false;
985 	props->ring = ring;
986 }
987 
988 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
989 do {									\
990        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
991 		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
992 				_eng[ring->idx].slots[id_offs]);        \
993        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
994 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
995 				_eng[ring->idx].ring);                  \
996        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
997 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
998 				_eng[ring->idx].ib);                    \
999        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
1000 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
1001 				_eng[ring->idx].padding);               \
1002 } while(0)
1003 
1004 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1005 {
1006 	switch (ring->funcs->type) {
1007 	case AMDGPU_RING_TYPE_GFX:
1008 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1009 		break;
1010 	case AMDGPU_RING_TYPE_COMPUTE:
1011 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1012 		break;
1013 	case AMDGPU_RING_TYPE_SDMA:
1014 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1015 		break;
1016 	default:
1017 		break;
1018 	}
1019 
1020 	WARN_ON(1);
1021 	return -EINVAL;
1022 }
1023 
1024 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1025 			int queue_type, int idx,
1026 			struct amdgpu_mes_ctx_data *ctx_data,
1027 			struct amdgpu_ring **out)
1028 {
1029 	struct amdgpu_ring *ring;
1030 	struct amdgpu_mes_gang *gang;
1031 	struct amdgpu_mes_queue_properties qprops = {0};
1032 	int r, queue_id, pasid;
1033 
1034 	/*
1035 	 * Avoid taking any other locks under MES lock to avoid circular
1036 	 * lock dependencies.
1037 	 */
1038 	amdgpu_mes_lock(&adev->mes);
1039 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1040 	if (!gang) {
1041 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1042 		amdgpu_mes_unlock(&adev->mes);
1043 		return -EINVAL;
1044 	}
1045 	pasid = gang->process->pasid;
1046 
1047 	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1048 	if (!ring) {
1049 		amdgpu_mes_unlock(&adev->mes);
1050 		return -ENOMEM;
1051 	}
1052 
1053 	ring->ring_obj = NULL;
1054 	ring->use_doorbell = true;
1055 	ring->is_mes_queue = true;
1056 	ring->mes_ctx = ctx_data;
1057 	ring->idx = idx;
1058 	ring->no_scheduler = true;
1059 
1060 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1061 		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1062 				      compute[ring->idx].mec_hpd);
1063 		ring->eop_gpu_addr =
1064 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1065 	}
1066 
1067 	switch (queue_type) {
1068 	case AMDGPU_RING_TYPE_GFX:
1069 		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1070 		ring->me = adev->gfx.gfx_ring[0].me;
1071 		ring->pipe = adev->gfx.gfx_ring[0].pipe;
1072 		break;
1073 	case AMDGPU_RING_TYPE_COMPUTE:
1074 		ring->funcs = adev->gfx.compute_ring[0].funcs;
1075 		ring->me = adev->gfx.compute_ring[0].me;
1076 		ring->pipe = adev->gfx.compute_ring[0].pipe;
1077 		break;
1078 	case AMDGPU_RING_TYPE_SDMA:
1079 		ring->funcs = adev->sdma.instance[0].ring.funcs;
1080 		break;
1081 	default:
1082 		BUG();
1083 	}
1084 
1085 	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1086 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1087 	if (r)
1088 		goto clean_up_memory;
1089 
1090 	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1091 
1092 	dma_fence_wait(gang->process->vm->last_update, false);
1093 	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1094 	amdgpu_mes_unlock(&adev->mes);
1095 
1096 	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1097 	if (r)
1098 		goto clean_up_ring;
1099 
1100 	ring->hw_queue_id = queue_id;
1101 	ring->doorbell_index = qprops.doorbell_off;
1102 
1103 	if (queue_type == AMDGPU_RING_TYPE_GFX)
1104 		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1105 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1106 		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1107 			queue_id);
1108 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1109 		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1110 			queue_id);
1111 	else
1112 		BUG();
1113 
1114 	*out = ring;
1115 	return 0;
1116 
1117 clean_up_ring:
1118 	amdgpu_ring_fini(ring);
1119 clean_up_memory:
1120 	kfree(ring);
1121 	amdgpu_mes_unlock(&adev->mes);
1122 	return r;
1123 }
1124 
1125 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1126 			    struct amdgpu_ring *ring)
1127 {
1128 	if (!ring)
1129 		return;
1130 
1131 	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1132 	amdgpu_ring_fini(ring);
1133 	kfree(ring);
1134 }
1135 
1136 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1137 						   enum amdgpu_mes_priority_level prio)
1138 {
1139 	return adev->mes.aggregated_doorbells[prio];
1140 }
1141 
1142 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1143 				   struct amdgpu_mes_ctx_data *ctx_data)
1144 {
1145 	int r;
1146 
1147 	r = amdgpu_bo_create_kernel(adev,
1148 			    sizeof(struct amdgpu_mes_ctx_meta_data),
1149 			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1150 			    &ctx_data->meta_data_obj,
1151 			    &ctx_data->meta_data_mc_addr,
1152 			    &ctx_data->meta_data_ptr);
1153 	if (r) {
1154 		dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1155 		return r;
1156 	}
1157 
1158 	if (!ctx_data->meta_data_obj)
1159 		return -ENOMEM;
1160 
1161 	memset(ctx_data->meta_data_ptr, 0,
1162 	       sizeof(struct amdgpu_mes_ctx_meta_data));
1163 
1164 	return 0;
1165 }
1166 
1167 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1168 {
1169 	if (ctx_data->meta_data_obj)
1170 		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1171 				      &ctx_data->meta_data_mc_addr,
1172 				      &ctx_data->meta_data_ptr);
1173 }
1174 
1175 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1176 				 struct amdgpu_vm *vm,
1177 				 struct amdgpu_mes_ctx_data *ctx_data)
1178 {
1179 	struct amdgpu_bo_va *bo_va;
1180 	struct amdgpu_sync sync;
1181 	struct drm_exec exec;
1182 	int r;
1183 
1184 	amdgpu_sync_create(&sync);
1185 
1186 	drm_exec_init(&exec, 0, 0);
1187 	drm_exec_until_all_locked(&exec) {
1188 		r = drm_exec_lock_obj(&exec,
1189 				      &ctx_data->meta_data_obj->tbo.base);
1190 		drm_exec_retry_on_contention(&exec);
1191 		if (unlikely(r))
1192 			goto error_fini_exec;
1193 
1194 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1195 		drm_exec_retry_on_contention(&exec);
1196 		if (unlikely(r))
1197 			goto error_fini_exec;
1198 	}
1199 
1200 	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1201 	if (!bo_va) {
1202 		DRM_ERROR("failed to create bo_va for meta data BO\n");
1203 		r = -ENOMEM;
1204 		goto error_fini_exec;
1205 	}
1206 
1207 	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1208 			     sizeof(struct amdgpu_mes_ctx_meta_data),
1209 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1210 			     AMDGPU_PTE_EXECUTABLE);
1211 
1212 	if (r) {
1213 		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1214 		goto error_del_bo_va;
1215 	}
1216 
1217 	r = amdgpu_vm_bo_update(adev, bo_va, false);
1218 	if (r) {
1219 		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1220 		goto error_del_bo_va;
1221 	}
1222 	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1223 
1224 	r = amdgpu_vm_update_pdes(adev, vm, false);
1225 	if (r) {
1226 		DRM_ERROR("failed to update pdes on meta data\n");
1227 		goto error_del_bo_va;
1228 	}
1229 	amdgpu_sync_fence(&sync, vm->last_update);
1230 
1231 	amdgpu_sync_wait(&sync, false);
1232 	drm_exec_fini(&exec);
1233 
1234 	amdgpu_sync_free(&sync);
1235 	ctx_data->meta_data_va = bo_va;
1236 	return 0;
1237 
1238 error_del_bo_va:
1239 	amdgpu_vm_bo_del(adev, bo_va);
1240 
1241 error_fini_exec:
1242 	drm_exec_fini(&exec);
1243 	amdgpu_sync_free(&sync);
1244 	return r;
1245 }
1246 
1247 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1248 				   struct amdgpu_mes_ctx_data *ctx_data)
1249 {
1250 	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1251 	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1252 	struct amdgpu_vm *vm = bo_va->base.vm;
1253 	struct dma_fence *fence;
1254 	struct drm_exec exec;
1255 	long r;
1256 
1257 	drm_exec_init(&exec, 0, 0);
1258 	drm_exec_until_all_locked(&exec) {
1259 		r = drm_exec_lock_obj(&exec,
1260 				      &ctx_data->meta_data_obj->tbo.base);
1261 		drm_exec_retry_on_contention(&exec);
1262 		if (unlikely(r))
1263 			goto out_unlock;
1264 
1265 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1266 		drm_exec_retry_on_contention(&exec);
1267 		if (unlikely(r))
1268 			goto out_unlock;
1269 	}
1270 
1271 	amdgpu_vm_bo_del(adev, bo_va);
1272 	if (!amdgpu_vm_ready(vm))
1273 		goto out_unlock;
1274 
1275 	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1276 				   &fence);
1277 	if (r)
1278 		goto out_unlock;
1279 	if (fence) {
1280 		amdgpu_bo_fence(bo, fence, true);
1281 		fence = NULL;
1282 	}
1283 
1284 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1285 	if (r || !fence)
1286 		goto out_unlock;
1287 
1288 	dma_fence_wait(fence, false);
1289 	amdgpu_bo_fence(bo, fence, true);
1290 	dma_fence_put(fence);
1291 
1292 out_unlock:
1293 	if (unlikely(r < 0))
1294 		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1295 	drm_exec_fini(&exec);
1296 
1297 	return r;
1298 }
1299 
1300 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1301 					  int pasid, int *gang_id,
1302 					  int queue_type, int num_queue,
1303 					  struct amdgpu_ring **added_rings,
1304 					  struct amdgpu_mes_ctx_data *ctx_data)
1305 {
1306 	struct amdgpu_ring *ring;
1307 	struct amdgpu_mes_gang_properties gprops = {0};
1308 	int r, j;
1309 
1310 	/* create a gang for the process */
1311 	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1312 	gprops.gang_quantum = adev->mes.default_gang_quantum;
1313 	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1314 	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1315 	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1316 
1317 	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1318 	if (r) {
1319 		DRM_ERROR("failed to add gang\n");
1320 		return r;
1321 	}
1322 
1323 	/* create queues for the gang */
1324 	for (j = 0; j < num_queue; j++) {
1325 		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1326 					ctx_data, &ring);
1327 		if (r) {
1328 			DRM_ERROR("failed to add ring\n");
1329 			break;
1330 		}
1331 
1332 		DRM_INFO("ring %s was added\n", ring->name);
1333 		added_rings[j] = ring;
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1340 {
1341 	struct amdgpu_ring *ring;
1342 	int i, r;
1343 
1344 	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1345 		ring = added_rings[i];
1346 		if (!ring)
1347 			continue;
1348 
1349 		r = amdgpu_ring_test_helper(ring);
1350 		if (r)
1351 			return r;
1352 
1353 		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1354 		if (r) {
1355 			DRM_DEV_ERROR(ring->adev->dev,
1356 				      "ring %s ib test failed (%d)\n",
1357 				      ring->name, r);
1358 			return r;
1359 		} else
1360 			DRM_INFO("ring %s ib test pass\n", ring->name);
1361 	}
1362 
1363 	return 0;
1364 }
1365 
1366 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1367 {
1368 	struct amdgpu_vm *vm = NULL;
1369 	struct amdgpu_mes_ctx_data ctx_data = {0};
1370 	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1371 	int gang_ids[3] = {0};
1372 	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1373 				 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1374 				 { AMDGPU_RING_TYPE_SDMA, 1} };
1375 	int i, r, pasid, k = 0;
1376 
1377 	pasid = amdgpu_pasid_alloc(16);
1378 	if (pasid < 0) {
1379 		dev_warn(adev->dev, "No more PASIDs available!");
1380 		pasid = 0;
1381 	}
1382 
1383 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1384 	if (!vm) {
1385 		r = -ENOMEM;
1386 		goto error_pasid;
1387 	}
1388 
1389 	r = amdgpu_vm_init(adev, vm, -1);
1390 	if (r) {
1391 		DRM_ERROR("failed to initialize vm\n");
1392 		goto error_pasid;
1393 	}
1394 
1395 	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1396 	if (r) {
1397 		DRM_ERROR("failed to alloc ctx meta data\n");
1398 		goto error_fini;
1399 	}
1400 
1401 	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1402 	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1403 	if (r) {
1404 		DRM_ERROR("failed to map ctx meta data\n");
1405 		goto error_vm;
1406 	}
1407 
1408 	r = amdgpu_mes_create_process(adev, pasid, vm);
1409 	if (r) {
1410 		DRM_ERROR("failed to create MES process\n");
1411 		goto error_vm;
1412 	}
1413 
1414 	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1415 		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1416 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1417 			    IP_VERSION(10, 3, 0) &&
1418 		    amdgpu_ip_version(adev, GC_HWIP, 0) <
1419 			    IP_VERSION(11, 0, 0) &&
1420 		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1421 			continue;
1422 
1423 		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1424 							   &gang_ids[i],
1425 							   queue_types[i][0],
1426 							   queue_types[i][1],
1427 							   &added_rings[k],
1428 							   &ctx_data);
1429 		if (r)
1430 			goto error_queues;
1431 
1432 		k += queue_types[i][1];
1433 	}
1434 
1435 	/* start ring test and ib test for MES queues */
1436 	amdgpu_mes_test_queues(added_rings);
1437 
1438 error_queues:
1439 	/* remove all queues */
1440 	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1441 		if (!added_rings[i])
1442 			continue;
1443 		amdgpu_mes_remove_ring(adev, added_rings[i]);
1444 	}
1445 
1446 	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1447 		if (!gang_ids[i])
1448 			continue;
1449 		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1450 	}
1451 
1452 	amdgpu_mes_destroy_process(adev, pasid);
1453 
1454 error_vm:
1455 	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1456 
1457 error_fini:
1458 	amdgpu_vm_fini(adev, vm);
1459 
1460 error_pasid:
1461 	if (pasid)
1462 		amdgpu_pasid_free(pasid);
1463 
1464 	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1465 	kfree(vm);
1466 	return 0;
1467 }
1468 
1469 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1470 {
1471 	const struct mes_firmware_header_v1_0 *mes_hdr;
1472 	struct amdgpu_firmware_info *info;
1473 	char ucode_prefix[30];
1474 	char fw_name[40];
1475 	bool need_retry = false;
1476 	int r;
1477 
1478 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1479 				       sizeof(ucode_prefix));
1480 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1481 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1482 			 ucode_prefix,
1483 			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1484 		need_retry = true;
1485 	} else {
1486 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1487 			 ucode_prefix,
1488 			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1489 	}
1490 
1491 	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1492 	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1493 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1494 			 ucode_prefix);
1495 		DRM_INFO("try to fall back to %s\n", fw_name);
1496 		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1497 					 fw_name);
1498 	}
1499 
1500 	if (r)
1501 		goto out;
1502 
1503 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
1504 		adev->mes.fw[pipe]->data;
1505 	adev->mes.uc_start_addr[pipe] =
1506 		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1507 		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1508 	adev->mes.data_start_addr[pipe] =
1509 		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1510 		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1511 
1512 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1513 		int ucode, ucode_data;
1514 
1515 		if (pipe == AMDGPU_MES_SCHED_PIPE) {
1516 			ucode = AMDGPU_UCODE_ID_CP_MES;
1517 			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1518 		} else {
1519 			ucode = AMDGPU_UCODE_ID_CP_MES1;
1520 			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1521 		}
1522 
1523 		info = &adev->firmware.ucode[ucode];
1524 		info->ucode_id = ucode;
1525 		info->fw = adev->mes.fw[pipe];
1526 		adev->firmware.fw_size +=
1527 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1528 			      PAGE_SIZE);
1529 
1530 		info = &adev->firmware.ucode[ucode_data];
1531 		info->ucode_id = ucode_data;
1532 		info->fw = adev->mes.fw[pipe];
1533 		adev->firmware.fw_size +=
1534 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1535 			      PAGE_SIZE);
1536 	}
1537 
1538 	return 0;
1539 out:
1540 	amdgpu_ucode_release(&adev->mes.fw[pipe]);
1541 	return r;
1542 }
1543 
1544 #if defined(CONFIG_DEBUG_FS)
1545 
1546 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1547 {
1548 	struct amdgpu_device *adev = m->private;
1549 	uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1550 
1551 	seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1552 		     mem, PAGE_SIZE, false);
1553 
1554 	return 0;
1555 }
1556 
1557 
1558 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1559 
1560 #endif
1561 
1562 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1563 {
1564 
1565 #if defined(CONFIG_DEBUG_FS)
1566 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1567 	struct dentry *root = minor->debugfs_root;
1568 
1569 	debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1570 			    adev, &amdgpu_debugfs_mes_event_log_fops);
1571 
1572 #endif
1573 }
1574