xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c (revision 382fc1f681324bb38bedfe763107a60256c4ddc8)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu_mes.h"
25 #include "amdgpu.h"
26 #include "soc15_common.h"
27 #include "amdgpu_mes_ctx.h"
28 
29 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
30 #define AMDGPU_ONE_DOORBELL_SIZE 8
31 
32 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
33 {
34 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
35 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
36 		       PAGE_SIZE);
37 }
38 
39 int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev,
40 				      unsigned int *doorbell_index)
41 {
42 	int r = ida_simple_get(&adev->mes.doorbell_ida, 2,
43 			       adev->mes.max_doorbell_slices,
44 			       GFP_KERNEL);
45 	if (r > 0)
46 		*doorbell_index = r;
47 
48 	return r;
49 }
50 
51 void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev,
52 				      unsigned int doorbell_index)
53 {
54 	if (doorbell_index)
55 		ida_simple_remove(&adev->mes.doorbell_ida, doorbell_index);
56 }
57 
58 unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar(
59 					struct amdgpu_device *adev,
60 					uint32_t doorbell_index,
61 					unsigned int doorbell_id)
62 {
63 	return ((doorbell_index *
64 		amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32) +
65 		doorbell_id * 2);
66 }
67 
68 static int amdgpu_mes_queue_doorbell_get(struct amdgpu_device *adev,
69 					 struct amdgpu_mes_process *process,
70 					 int ip_type, uint64_t *doorbell_index)
71 {
72 	unsigned int offset, found;
73 
74 	if (ip_type == AMDGPU_RING_TYPE_SDMA) {
75 		offset = adev->doorbell_index.sdma_engine[0];
76 		found = find_next_zero_bit(process->doorbell_bitmap,
77 					   AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
78 					   offset);
79 	} else {
80 		found = find_first_zero_bit(process->doorbell_bitmap,
81 					    AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS);
82 	}
83 
84 	if (found >= AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS) {
85 		DRM_WARN("No doorbell available\n");
86 		return -ENOSPC;
87 	}
88 
89 	set_bit(found, process->doorbell_bitmap);
90 
91 	*doorbell_index = amdgpu_mes_get_doorbell_dw_offset_in_bar(adev,
92 				process->doorbell_index, found);
93 
94 	return 0;
95 }
96 
97 static void amdgpu_mes_queue_doorbell_free(struct amdgpu_device *adev,
98 					   struct amdgpu_mes_process *process,
99 					   uint32_t doorbell_index)
100 {
101 	unsigned int old, doorbell_id;
102 
103 	doorbell_id = doorbell_index -
104 		(process->doorbell_index *
105 		 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32);
106 	doorbell_id /= 2;
107 
108 	old = test_and_clear_bit(doorbell_id, process->doorbell_bitmap);
109 	WARN_ON(!old);
110 }
111 
112 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
113 {
114 	size_t doorbell_start_offset;
115 	size_t doorbell_aperture_size;
116 	size_t doorbell_process_limit;
117 
118 	doorbell_start_offset = (adev->doorbell_index.max_assignment+1) * sizeof(u32);
119 	doorbell_start_offset =
120 		roundup(doorbell_start_offset,
121 			amdgpu_mes_doorbell_process_slice(adev));
122 
123 	doorbell_aperture_size = adev->doorbell.size;
124 	doorbell_aperture_size =
125 			rounddown(doorbell_aperture_size,
126 				  amdgpu_mes_doorbell_process_slice(adev));
127 
128 	if (doorbell_aperture_size > doorbell_start_offset)
129 		doorbell_process_limit =
130 			(doorbell_aperture_size - doorbell_start_offset) /
131 			amdgpu_mes_doorbell_process_slice(adev);
132 	else
133 		return -ENOSPC;
134 
135 	adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32);
136 	adev->mes.max_doorbell_slices = doorbell_process_limit;
137 
138 	DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit);
139 	return 0;
140 }
141 
142 int amdgpu_mes_init(struct amdgpu_device *adev)
143 {
144 	int i, r;
145 
146 	adev->mes.adev = adev;
147 
148 	idr_init(&adev->mes.pasid_idr);
149 	idr_init(&adev->mes.gang_id_idr);
150 	idr_init(&adev->mes.queue_id_idr);
151 	ida_init(&adev->mes.doorbell_ida);
152 	spin_lock_init(&adev->mes.queue_id_lock);
153 	mutex_init(&adev->mes.mutex_hidden);
154 
155 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
156 	adev->mes.vmid_mask_mmhub = 0xffffff00;
157 	adev->mes.vmid_mask_gfxhub = 0xffffff00;
158 
159 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
160 		/* use only 1st MEC pipes */
161 		if (i >= 4)
162 			continue;
163 		adev->mes.compute_hqd_mask[i] = 0xc;
164 	}
165 
166 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
167 		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
168 
169 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
170 		if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
171 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
172 		else
173 			adev->mes.sdma_hqd_mask[i] = 0xfc;
174 	}
175 
176 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++)
177 		adev->mes.agreegated_doorbells[i] = 0xffffffff;
178 
179 	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
180 	if (r) {
181 		dev_err(adev->dev,
182 			"(%d) ring trail_fence_offs wb alloc failed\n", r);
183 		goto error_ids;
184 	}
185 	adev->mes.sch_ctx_gpu_addr =
186 		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
187 	adev->mes.sch_ctx_ptr =
188 		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
189 
190 	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
191 	if (r) {
192 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
193 		dev_err(adev->dev,
194 			"(%d) query_status_fence_offs wb alloc failed\n", r);
195 		goto error_ids;
196 	}
197 	adev->mes.query_status_fence_gpu_addr =
198 		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
199 	adev->mes.query_status_fence_ptr =
200 		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
201 
202 	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
203 	if (r) {
204 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
205 		amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
206 		dev_err(adev->dev,
207 			"(%d) read_val_offs alloc failed\n", r);
208 		goto error_ids;
209 	}
210 	adev->mes.read_val_gpu_addr =
211 		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
212 	adev->mes.read_val_ptr =
213 		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
214 
215 	r = amdgpu_mes_doorbell_init(adev);
216 	if (r)
217 		goto error;
218 
219 	return 0;
220 
221 error:
222 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
223 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
224 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
225 error_ids:
226 	idr_destroy(&adev->mes.pasid_idr);
227 	idr_destroy(&adev->mes.gang_id_idr);
228 	idr_destroy(&adev->mes.queue_id_idr);
229 	ida_destroy(&adev->mes.doorbell_ida);
230 	mutex_destroy(&adev->mes.mutex_hidden);
231 	return r;
232 }
233 
234 void amdgpu_mes_fini(struct amdgpu_device *adev)
235 {
236 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
237 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
238 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
239 
240 	idr_destroy(&adev->mes.pasid_idr);
241 	idr_destroy(&adev->mes.gang_id_idr);
242 	idr_destroy(&adev->mes.queue_id_idr);
243 	ida_destroy(&adev->mes.doorbell_ida);
244 	mutex_destroy(&adev->mes.mutex_hidden);
245 }
246 
247 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
248 {
249 	amdgpu_bo_free_kernel(&q->mqd_obj,
250 			      &q->mqd_gpu_addr,
251 			      &q->mqd_cpu_ptr);
252 }
253 
254 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
255 			      struct amdgpu_vm *vm)
256 {
257 	struct amdgpu_mes_process *process;
258 	int r;
259 
260 	/* allocate the mes process buffer */
261 	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
262 	if (!process) {
263 		DRM_ERROR("no more memory to create mes process\n");
264 		return -ENOMEM;
265 	}
266 
267 	process->doorbell_bitmap =
268 		kzalloc(DIV_ROUND_UP(AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
269 				     BITS_PER_BYTE), GFP_KERNEL);
270 	if (!process->doorbell_bitmap) {
271 		DRM_ERROR("failed to allocate doorbell bitmap\n");
272 		kfree(process);
273 		return -ENOMEM;
274 	}
275 
276 	/* allocate the process context bo and map it */
277 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
278 				    AMDGPU_GEM_DOMAIN_GTT,
279 				    &process->proc_ctx_bo,
280 				    &process->proc_ctx_gpu_addr,
281 				    &process->proc_ctx_cpu_ptr);
282 	if (r) {
283 		DRM_ERROR("failed to allocate process context bo\n");
284 		goto clean_up_memory;
285 	}
286 	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
287 
288 	/*
289 	 * Avoid taking any other locks under MES lock to avoid circular
290 	 * lock dependencies.
291 	 */
292 	amdgpu_mes_lock(&adev->mes);
293 
294 	/* add the mes process to idr list */
295 	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
296 		      GFP_KERNEL);
297 	if (r < 0) {
298 		DRM_ERROR("failed to lock pasid=%d\n", pasid);
299 		goto clean_up_ctx;
300 	}
301 
302 	/* allocate the starting doorbell index of the process */
303 	r = amdgpu_mes_alloc_process_doorbells(adev, &process->doorbell_index);
304 	if (r < 0) {
305 		DRM_ERROR("failed to allocate doorbell for process\n");
306 		goto clean_up_pasid;
307 	}
308 
309 	DRM_DEBUG("process doorbell index = %d\n", process->doorbell_index);
310 
311 	INIT_LIST_HEAD(&process->gang_list);
312 	process->vm = vm;
313 	process->pasid = pasid;
314 	process->process_quantum = adev->mes.default_process_quantum;
315 	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
316 
317 	amdgpu_mes_unlock(&adev->mes);
318 	return 0;
319 
320 clean_up_pasid:
321 	idr_remove(&adev->mes.pasid_idr, pasid);
322 	amdgpu_mes_unlock(&adev->mes);
323 clean_up_ctx:
324 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
325 			      &process->proc_ctx_gpu_addr,
326 			      &process->proc_ctx_cpu_ptr);
327 clean_up_memory:
328 	kfree(process->doorbell_bitmap);
329 	kfree(process);
330 	return r;
331 }
332 
333 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
334 {
335 	struct amdgpu_mes_process *process;
336 	struct amdgpu_mes_gang *gang, *tmp1;
337 	struct amdgpu_mes_queue *queue, *tmp2;
338 	struct mes_remove_queue_input queue_input;
339 	unsigned long flags;
340 	int r;
341 
342 	/*
343 	 * Avoid taking any other locks under MES lock to avoid circular
344 	 * lock dependencies.
345 	 */
346 	amdgpu_mes_lock(&adev->mes);
347 
348 	process = idr_find(&adev->mes.pasid_idr, pasid);
349 	if (!process) {
350 		DRM_WARN("pasid %d doesn't exist\n", pasid);
351 		amdgpu_mes_unlock(&adev->mes);
352 		return;
353 	}
354 
355 	/* Remove all queues from hardware */
356 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
357 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
358 			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
359 			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
360 			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
361 
362 			queue_input.doorbell_offset = queue->doorbell_off;
363 			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
364 
365 			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
366 							     &queue_input);
367 			if (r)
368 				DRM_WARN("failed to remove hardware queue\n");
369 		}
370 
371 		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
372 	}
373 
374 	amdgpu_mes_free_process_doorbells(adev, process->doorbell_index);
375 	idr_remove(&adev->mes.pasid_idr, pasid);
376 	amdgpu_mes_unlock(&adev->mes);
377 
378 	/* free all memory allocated by the process */
379 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
380 		/* free all queues in the gang */
381 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
382 			amdgpu_mes_queue_free_mqd(queue);
383 			list_del(&queue->list);
384 			kfree(queue);
385 		}
386 		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
387 				      &gang->gang_ctx_gpu_addr,
388 				      &gang->gang_ctx_cpu_ptr);
389 		list_del(&gang->list);
390 		kfree(gang);
391 
392 	}
393 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
394 			      &process->proc_ctx_gpu_addr,
395 			      &process->proc_ctx_cpu_ptr);
396 	kfree(process->doorbell_bitmap);
397 	kfree(process);
398 }
399 
400 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
401 			struct amdgpu_mes_gang_properties *gprops,
402 			int *gang_id)
403 {
404 	struct amdgpu_mes_process *process;
405 	struct amdgpu_mes_gang *gang;
406 	int r;
407 
408 	/* allocate the mes gang buffer */
409 	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
410 	if (!gang) {
411 		return -ENOMEM;
412 	}
413 
414 	/* allocate the gang context bo and map it to cpu space */
415 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
416 				    AMDGPU_GEM_DOMAIN_GTT,
417 				    &gang->gang_ctx_bo,
418 				    &gang->gang_ctx_gpu_addr,
419 				    &gang->gang_ctx_cpu_ptr);
420 	if (r) {
421 		DRM_ERROR("failed to allocate process context bo\n");
422 		goto clean_up_mem;
423 	}
424 	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
425 
426 	/*
427 	 * Avoid taking any other locks under MES lock to avoid circular
428 	 * lock dependencies.
429 	 */
430 	amdgpu_mes_lock(&adev->mes);
431 
432 	process = idr_find(&adev->mes.pasid_idr, pasid);
433 	if (!process) {
434 		DRM_ERROR("pasid %d doesn't exist\n", pasid);
435 		r = -EINVAL;
436 		goto clean_up_ctx;
437 	}
438 
439 	/* add the mes gang to idr list */
440 	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
441 		      GFP_KERNEL);
442 	if (r < 0) {
443 		DRM_ERROR("failed to allocate idr for gang\n");
444 		goto clean_up_ctx;
445 	}
446 
447 	gang->gang_id = r;
448 	*gang_id = r;
449 
450 	INIT_LIST_HEAD(&gang->queue_list);
451 	gang->process = process;
452 	gang->priority = gprops->priority;
453 	gang->gang_quantum = gprops->gang_quantum ?
454 		gprops->gang_quantum : adev->mes.default_gang_quantum;
455 	gang->global_priority_level = gprops->global_priority_level;
456 	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
457 	list_add_tail(&gang->list, &process->gang_list);
458 
459 	amdgpu_mes_unlock(&adev->mes);
460 	return 0;
461 
462 clean_up_ctx:
463 	amdgpu_mes_unlock(&adev->mes);
464 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
465 			      &gang->gang_ctx_gpu_addr,
466 			      &gang->gang_ctx_cpu_ptr);
467 clean_up_mem:
468 	kfree(gang);
469 	return r;
470 }
471 
472 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
473 {
474 	struct amdgpu_mes_gang *gang;
475 
476 	/*
477 	 * Avoid taking any other locks under MES lock to avoid circular
478 	 * lock dependencies.
479 	 */
480 	amdgpu_mes_lock(&adev->mes);
481 
482 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
483 	if (!gang) {
484 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
485 		amdgpu_mes_unlock(&adev->mes);
486 		return -EINVAL;
487 	}
488 
489 	if (!list_empty(&gang->queue_list)) {
490 		DRM_ERROR("queue list is not empty\n");
491 		amdgpu_mes_unlock(&adev->mes);
492 		return -EBUSY;
493 	}
494 
495 	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
496 	list_del(&gang->list);
497 	amdgpu_mes_unlock(&adev->mes);
498 
499 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
500 			      &gang->gang_ctx_gpu_addr,
501 			      &gang->gang_ctx_cpu_ptr);
502 
503 	kfree(gang);
504 
505 	return 0;
506 }
507 
508 int amdgpu_mes_suspend(struct amdgpu_device *adev)
509 {
510 	struct idr *idp;
511 	struct amdgpu_mes_process *process;
512 	struct amdgpu_mes_gang *gang;
513 	struct mes_suspend_gang_input input;
514 	int r, pasid;
515 
516 	/*
517 	 * Avoid taking any other locks under MES lock to avoid circular
518 	 * lock dependencies.
519 	 */
520 	amdgpu_mes_lock(&adev->mes);
521 
522 	idp = &adev->mes.pasid_idr;
523 
524 	idr_for_each_entry(idp, process, pasid) {
525 		list_for_each_entry(gang, &process->gang_list, list) {
526 			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
527 			if (r)
528 				DRM_ERROR("failed to suspend pasid %d gangid %d",
529 					 pasid, gang->gang_id);
530 		}
531 	}
532 
533 	amdgpu_mes_unlock(&adev->mes);
534 	return 0;
535 }
536 
537 int amdgpu_mes_resume(struct amdgpu_device *adev)
538 {
539 	struct idr *idp;
540 	struct amdgpu_mes_process *process;
541 	struct amdgpu_mes_gang *gang;
542 	struct mes_resume_gang_input input;
543 	int r, pasid;
544 
545 	/*
546 	 * Avoid taking any other locks under MES lock to avoid circular
547 	 * lock dependencies.
548 	 */
549 	amdgpu_mes_lock(&adev->mes);
550 
551 	idp = &adev->mes.pasid_idr;
552 
553 	idr_for_each_entry(idp, process, pasid) {
554 		list_for_each_entry(gang, &process->gang_list, list) {
555 			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
556 			if (r)
557 				DRM_ERROR("failed to resume pasid %d gangid %d",
558 					 pasid, gang->gang_id);
559 		}
560 	}
561 
562 	amdgpu_mes_unlock(&adev->mes);
563 	return 0;
564 }
565 
566 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
567 				     struct amdgpu_mes_queue *q,
568 				     struct amdgpu_mes_queue_properties *p)
569 {
570 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
571 	u32 mqd_size = mqd_mgr->mqd_size;
572 	int r;
573 
574 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
575 				    AMDGPU_GEM_DOMAIN_GTT,
576 				    &q->mqd_obj,
577 				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
578 	if (r) {
579 		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
580 		return r;
581 	}
582 	memset(q->mqd_cpu_ptr, 0, mqd_size);
583 
584 	r = amdgpu_bo_reserve(q->mqd_obj, false);
585 	if (unlikely(r != 0))
586 		goto clean_up;
587 
588 	return 0;
589 
590 clean_up:
591 	amdgpu_bo_free_kernel(&q->mqd_obj,
592 			      &q->mqd_gpu_addr,
593 			      &q->mqd_cpu_ptr);
594 	return r;
595 }
596 
597 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
598 				     struct amdgpu_mes_queue *q,
599 				     struct amdgpu_mes_queue_properties *p)
600 {
601 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
602 	struct amdgpu_mqd_prop mqd_prop = {0};
603 
604 	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
605 	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
606 	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
607 	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
608 	mqd_prop.queue_size = p->queue_size;
609 	mqd_prop.use_doorbell = true;
610 	mqd_prop.doorbell_index = p->doorbell_off;
611 	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
612 	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
613 	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
614 	mqd_prop.hqd_active = false;
615 
616 	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
617 
618 	amdgpu_bo_unreserve(q->mqd_obj);
619 }
620 
621 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
622 			    struct amdgpu_mes_queue_properties *qprops,
623 			    int *queue_id)
624 {
625 	struct amdgpu_mes_queue *queue;
626 	struct amdgpu_mes_gang *gang;
627 	struct mes_add_queue_input queue_input;
628 	unsigned long flags;
629 	int r;
630 
631 	/* allocate the mes queue buffer */
632 	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
633 	if (!queue) {
634 		DRM_ERROR("Failed to allocate memory for queue\n");
635 		return -ENOMEM;
636 	}
637 
638 	/* Allocate the queue mqd */
639 	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
640 	if (r)
641 		goto clean_up_memory;
642 
643 	/*
644 	 * Avoid taking any other locks under MES lock to avoid circular
645 	 * lock dependencies.
646 	 */
647 	amdgpu_mes_lock(&adev->mes);
648 
649 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
650 	if (!gang) {
651 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
652 		r = -EINVAL;
653 		goto clean_up_mqd;
654 	}
655 
656 	/* add the mes gang to idr list */
657 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
658 	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
659 		      GFP_ATOMIC);
660 	if (r < 0) {
661 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
662 		goto clean_up_mqd;
663 	}
664 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
665 	*queue_id = queue->queue_id = r;
666 
667 	/* allocate a doorbell index for the queue */
668 	r = amdgpu_mes_queue_doorbell_get(adev, gang->process,
669 					  qprops->queue_type,
670 					  &qprops->doorbell_off);
671 	if (r)
672 		goto clean_up_queue_id;
673 
674 	/* initialize the queue mqd */
675 	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
676 
677 	/* add hw queue to mes */
678 	queue_input.process_id = gang->process->pasid;
679 
680 	queue_input.page_table_base_addr =
681 		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
682 		adev->gmc.vram_start;
683 
684 	queue_input.process_va_start = 0;
685 	queue_input.process_va_end =
686 		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
687 	queue_input.process_quantum = gang->process->process_quantum;
688 	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
689 	queue_input.gang_quantum = gang->gang_quantum;
690 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
691 	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
692 	queue_input.gang_global_priority_level = gang->global_priority_level;
693 	queue_input.doorbell_offset = qprops->doorbell_off;
694 	queue_input.mqd_addr = queue->mqd_gpu_addr;
695 	queue_input.wptr_addr = qprops->wptr_gpu_addr;
696 	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
697 	queue_input.queue_type = qprops->queue_type;
698 	queue_input.paging = qprops->paging;
699 	queue_input.is_kfd_process = 0;
700 
701 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
702 	if (r) {
703 		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
704 			  qprops->doorbell_off);
705 		goto clean_up_doorbell;
706 	}
707 
708 	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
709 		  "queue type=%d, doorbell=0x%llx\n",
710 		  gang->process->pasid, gang_id, qprops->queue_type,
711 		  qprops->doorbell_off);
712 
713 	queue->ring = qprops->ring;
714 	queue->doorbell_off = qprops->doorbell_off;
715 	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
716 	queue->queue_type = qprops->queue_type;
717 	queue->paging = qprops->paging;
718 	queue->gang = gang;
719 	list_add_tail(&queue->list, &gang->queue_list);
720 
721 	amdgpu_mes_unlock(&adev->mes);
722 	return 0;
723 
724 clean_up_doorbell:
725 	amdgpu_mes_queue_doorbell_free(adev, gang->process,
726 				       qprops->doorbell_off);
727 clean_up_queue_id:
728 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
729 	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
730 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
731 clean_up_mqd:
732 	amdgpu_mes_unlock(&adev->mes);
733 	amdgpu_mes_queue_free_mqd(queue);
734 clean_up_memory:
735 	kfree(queue);
736 	return r;
737 }
738 
739 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
740 {
741 	unsigned long flags;
742 	struct amdgpu_mes_queue *queue;
743 	struct amdgpu_mes_gang *gang;
744 	struct mes_remove_queue_input queue_input;
745 	int r;
746 
747 	/*
748 	 * Avoid taking any other locks under MES lock to avoid circular
749 	 * lock dependencies.
750 	 */
751 	amdgpu_mes_lock(&adev->mes);
752 
753 	/* remove the mes gang from idr list */
754 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
755 
756 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
757 	if (!queue) {
758 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
759 		amdgpu_mes_unlock(&adev->mes);
760 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
761 		return -EINVAL;
762 	}
763 
764 	idr_remove(&adev->mes.queue_id_idr, queue_id);
765 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
766 
767 	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
768 		  queue->doorbell_off);
769 
770 	gang = queue->gang;
771 	queue_input.doorbell_offset = queue->doorbell_off;
772 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
773 
774 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
775 	if (r)
776 		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
777 			  queue_id);
778 
779 	list_del(&queue->list);
780 	amdgpu_mes_queue_doorbell_free(adev, gang->process,
781 				       queue->doorbell_off);
782 	amdgpu_mes_unlock(&adev->mes);
783 
784 	amdgpu_mes_queue_free_mqd(queue);
785 	kfree(queue);
786 	return 0;
787 }
788 
789 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
790 				  struct amdgpu_ring *ring,
791 				  enum amdgpu_unmap_queues_action action,
792 				  u64 gpu_addr, u64 seq)
793 {
794 	struct mes_unmap_legacy_queue_input queue_input;
795 	int r;
796 
797 	amdgpu_mes_lock(&adev->mes);
798 
799 	queue_input.action = action;
800 	queue_input.queue_type = ring->funcs->type;
801 	queue_input.doorbell_offset = ring->doorbell_index;
802 	queue_input.pipe_id = ring->pipe;
803 	queue_input.queue_id = ring->queue;
804 	queue_input.trail_fence_addr = gpu_addr;
805 	queue_input.trail_fence_data = seq;
806 
807 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
808 	if (r)
809 		DRM_ERROR("failed to unmap legacy queue\n");
810 
811 	amdgpu_mes_unlock(&adev->mes);
812 	return r;
813 }
814 
815 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
816 {
817 	struct mes_misc_op_input op_input;
818 	int r, val = 0;
819 
820 	amdgpu_mes_lock(&adev->mes);
821 
822 	op_input.op = MES_MISC_OP_READ_REG;
823 	op_input.read_reg.reg_offset = reg;
824 	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
825 
826 	if (!adev->mes.funcs->misc_op) {
827 		DRM_ERROR("mes rreg is not supported!\n");
828 		goto error;
829 	}
830 
831 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
832 	if (r)
833 		DRM_ERROR("failed to read reg (0x%x)\n", reg);
834 	else
835 		val = *(adev->mes.read_val_ptr);
836 
837 error:
838 	amdgpu_mes_unlock(&adev->mes);
839 	return val;
840 }
841 
842 int amdgpu_mes_wreg(struct amdgpu_device *adev,
843 		    uint32_t reg, uint32_t val)
844 {
845 	struct mes_misc_op_input op_input;
846 	int r;
847 
848 	amdgpu_mes_lock(&adev->mes);
849 
850 	op_input.op = MES_MISC_OP_WRITE_REG;
851 	op_input.write_reg.reg_offset = reg;
852 	op_input.write_reg.reg_value = val;
853 
854 	if (!adev->mes.funcs->misc_op) {
855 		DRM_ERROR("mes wreg is not supported!\n");
856 		r = -EINVAL;
857 		goto error;
858 	}
859 
860 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
861 	if (r)
862 		DRM_ERROR("failed to write reg (0x%x)\n", reg);
863 
864 error:
865 	amdgpu_mes_unlock(&adev->mes);
866 	return r;
867 }
868 
869 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
870 				  uint32_t reg0, uint32_t reg1,
871 				  uint32_t ref, uint32_t mask)
872 {
873 	struct mes_misc_op_input op_input;
874 	int r;
875 
876 	amdgpu_mes_lock(&adev->mes);
877 
878 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
879 	op_input.wrm_reg.reg0 = reg0;
880 	op_input.wrm_reg.reg1 = reg1;
881 	op_input.wrm_reg.ref = ref;
882 	op_input.wrm_reg.mask = mask;
883 
884 	if (!adev->mes.funcs->misc_op) {
885 		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
886 		r = -EINVAL;
887 		goto error;
888 	}
889 
890 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
891 	if (r)
892 		DRM_ERROR("failed to reg_write_reg_wait\n");
893 
894 error:
895 	amdgpu_mes_unlock(&adev->mes);
896 	return r;
897 }
898 
899 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
900 			uint32_t val, uint32_t mask)
901 {
902 	struct mes_misc_op_input op_input;
903 	int r;
904 
905 	amdgpu_mes_lock(&adev->mes);
906 
907 	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
908 	op_input.wrm_reg.reg0 = reg;
909 	op_input.wrm_reg.ref = val;
910 	op_input.wrm_reg.mask = mask;
911 
912 	if (!adev->mes.funcs->misc_op) {
913 		DRM_ERROR("mes reg wait is not supported!\n");
914 		r = -EINVAL;
915 		goto error;
916 	}
917 
918 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
919 	if (r)
920 		DRM_ERROR("failed to reg_write_reg_wait\n");
921 
922 error:
923 	amdgpu_mes_unlock(&adev->mes);
924 	return r;
925 }
926 
927 static void
928 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
929 			       struct amdgpu_ring *ring,
930 			       struct amdgpu_mes_queue_properties *props)
931 {
932 	props->queue_type = ring->funcs->type;
933 	props->hqd_base_gpu_addr = ring->gpu_addr;
934 	props->rptr_gpu_addr = ring->rptr_gpu_addr;
935 	props->wptr_gpu_addr = ring->wptr_gpu_addr;
936 	props->wptr_mc_addr =
937 		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
938 	props->queue_size = ring->ring_size;
939 	props->eop_gpu_addr = ring->eop_gpu_addr;
940 	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
941 	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
942 	props->paging = false;
943 	props->ring = ring;
944 }
945 
946 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
947 do {									\
948        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
949 		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
950 				_eng[ring->idx].slots[id_offs]);        \
951        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
952 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
953 				_eng[ring->idx].ring);                  \
954        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
955 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
956 				_eng[ring->idx].ib);                    \
957        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
958 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
959 				_eng[ring->idx].padding);               \
960 } while(0)
961 
962 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
963 {
964 	switch (ring->funcs->type) {
965 	case AMDGPU_RING_TYPE_GFX:
966 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
967 		break;
968 	case AMDGPU_RING_TYPE_COMPUTE:
969 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
970 		break;
971 	case AMDGPU_RING_TYPE_SDMA:
972 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
973 		break;
974 	default:
975 		break;
976 	}
977 
978 	WARN_ON(1);
979 	return -EINVAL;
980 }
981 
982 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
983 			int queue_type, int idx,
984 			struct amdgpu_mes_ctx_data *ctx_data,
985 			struct amdgpu_ring **out)
986 {
987 	struct amdgpu_ring *ring;
988 	struct amdgpu_mes_gang *gang;
989 	struct amdgpu_mes_queue_properties qprops = {0};
990 	int r, queue_id, pasid;
991 
992 	/*
993 	 * Avoid taking any other locks under MES lock to avoid circular
994 	 * lock dependencies.
995 	 */
996 	amdgpu_mes_lock(&adev->mes);
997 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
998 	if (!gang) {
999 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1000 		amdgpu_mes_unlock(&adev->mes);
1001 		return -EINVAL;
1002 	}
1003 	pasid = gang->process->pasid;
1004 
1005 	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1006 	if (!ring) {
1007 		amdgpu_mes_unlock(&adev->mes);
1008 		return -ENOMEM;
1009 	}
1010 
1011 	ring->ring_obj = NULL;
1012 	ring->use_doorbell = true;
1013 	ring->is_mes_queue = true;
1014 	ring->mes_ctx = ctx_data;
1015 	ring->idx = idx;
1016 	ring->no_scheduler = true;
1017 
1018 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1019 		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1020 				      compute[ring->idx].mec_hpd);
1021 		ring->eop_gpu_addr =
1022 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1023 	}
1024 
1025 	switch (queue_type) {
1026 	case AMDGPU_RING_TYPE_GFX:
1027 		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1028 		break;
1029 	case AMDGPU_RING_TYPE_COMPUTE:
1030 		ring->funcs = adev->gfx.compute_ring[0].funcs;
1031 		break;
1032 	case AMDGPU_RING_TYPE_SDMA:
1033 		ring->funcs = adev->sdma.instance[0].ring.funcs;
1034 		break;
1035 	default:
1036 		BUG();
1037 	}
1038 
1039 	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1040 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1041 	if (r)
1042 		goto clean_up_memory;
1043 
1044 	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1045 
1046 	dma_fence_wait(gang->process->vm->last_update, false);
1047 	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1048 	amdgpu_mes_unlock(&adev->mes);
1049 
1050 	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1051 	if (r)
1052 		goto clean_up_ring;
1053 
1054 	ring->hw_queue_id = queue_id;
1055 	ring->doorbell_index = qprops.doorbell_off;
1056 
1057 	if (queue_type == AMDGPU_RING_TYPE_GFX)
1058 		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1059 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1060 		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1061 			queue_id);
1062 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1063 		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1064 			queue_id);
1065 	else
1066 		BUG();
1067 
1068 	*out = ring;
1069 	return 0;
1070 
1071 clean_up_ring:
1072 	amdgpu_ring_fini(ring);
1073 clean_up_memory:
1074 	kfree(ring);
1075 	amdgpu_mes_unlock(&adev->mes);
1076 	return r;
1077 }
1078 
1079 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1080 			    struct amdgpu_ring *ring)
1081 {
1082 	if (!ring)
1083 		return;
1084 
1085 	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1086 	amdgpu_ring_fini(ring);
1087 	kfree(ring);
1088 }
1089 
1090 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1091 				   struct amdgpu_mes_ctx_data *ctx_data)
1092 {
1093 	int r;
1094 
1095 	r = amdgpu_bo_create_kernel(adev,
1096 			    sizeof(struct amdgpu_mes_ctx_meta_data),
1097 			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1098 			    &ctx_data->meta_data_obj,
1099 			    &ctx_data->meta_data_mc_addr,
1100 			    &ctx_data->meta_data_ptr);
1101 	if (!ctx_data->meta_data_obj)
1102 		return -ENOMEM;
1103 
1104 	memset(ctx_data->meta_data_ptr, 0,
1105 	       sizeof(struct amdgpu_mes_ctx_meta_data));
1106 
1107 	return 0;
1108 }
1109 
1110 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1111 {
1112 	if (ctx_data->meta_data_obj)
1113 		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1114 				      &ctx_data->meta_data_mc_addr,
1115 				      &ctx_data->meta_data_ptr);
1116 }
1117 
1118 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1119 				 struct amdgpu_vm *vm,
1120 				 struct amdgpu_mes_ctx_data *ctx_data)
1121 {
1122 	struct amdgpu_bo_va *bo_va;
1123 	struct ww_acquire_ctx ticket;
1124 	struct list_head list;
1125 	struct amdgpu_bo_list_entry pd;
1126 	struct ttm_validate_buffer csa_tv;
1127 	struct amdgpu_sync sync;
1128 	int r;
1129 
1130 	amdgpu_sync_create(&sync);
1131 	INIT_LIST_HEAD(&list);
1132 	INIT_LIST_HEAD(&csa_tv.head);
1133 
1134 	csa_tv.bo = &ctx_data->meta_data_obj->tbo;
1135 	csa_tv.num_shared = 1;
1136 
1137 	list_add(&csa_tv.head, &list);
1138 	amdgpu_vm_get_pd_bo(vm, &list, &pd);
1139 
1140 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
1141 	if (r) {
1142 		DRM_ERROR("failed to reserve meta data BO: err=%d\n", r);
1143 		return r;
1144 	}
1145 
1146 	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1147 	if (!bo_va) {
1148 		ttm_eu_backoff_reservation(&ticket, &list);
1149 		DRM_ERROR("failed to create bo_va for meta data BO\n");
1150 		return -ENOMEM;
1151 	}
1152 
1153 	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1154 			     sizeof(struct amdgpu_mes_ctx_meta_data),
1155 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1156 			     AMDGPU_PTE_EXECUTABLE);
1157 
1158 	if (r) {
1159 		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1160 		goto error;
1161 	}
1162 
1163 	r = amdgpu_vm_bo_update(adev, bo_va, false);
1164 	if (r) {
1165 		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1166 		goto error;
1167 	}
1168 	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1169 
1170 	r = amdgpu_vm_update_pdes(adev, vm, false);
1171 	if (r) {
1172 		DRM_ERROR("failed to update pdes on meta data\n");
1173 		goto error;
1174 	}
1175 	amdgpu_sync_fence(&sync, vm->last_update);
1176 
1177 	amdgpu_sync_wait(&sync, false);
1178 	ttm_eu_backoff_reservation(&ticket, &list);
1179 
1180 	amdgpu_sync_free(&sync);
1181 	ctx_data->meta_data_va = bo_va;
1182 	return 0;
1183 
1184 error:
1185 	amdgpu_vm_bo_del(adev, bo_va);
1186 	ttm_eu_backoff_reservation(&ticket, &list);
1187 	amdgpu_sync_free(&sync);
1188 	return r;
1189 }
1190 
1191 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1192 					  int pasid, int *gang_id,
1193 					  int queue_type, int num_queue,
1194 					  struct amdgpu_ring **added_rings,
1195 					  struct amdgpu_mes_ctx_data *ctx_data)
1196 {
1197 	struct amdgpu_ring *ring;
1198 	struct amdgpu_mes_gang_properties gprops = {0};
1199 	int r, j;
1200 
1201 	/* create a gang for the process */
1202 	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1203 	gprops.gang_quantum = adev->mes.default_gang_quantum;
1204 	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1205 	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1206 	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1207 
1208 	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1209 	if (r) {
1210 		DRM_ERROR("failed to add gang\n");
1211 		return r;
1212 	}
1213 
1214 	/* create queues for the gang */
1215 	for (j = 0; j < num_queue; j++) {
1216 		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1217 					ctx_data, &ring);
1218 		if (r) {
1219 			DRM_ERROR("failed to add ring\n");
1220 			break;
1221 		}
1222 
1223 		DRM_INFO("ring %s was added\n", ring->name);
1224 		added_rings[j] = ring;
1225 	}
1226 
1227 	return 0;
1228 }
1229 
1230 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1231 {
1232 	struct amdgpu_ring *ring;
1233 	int i, r;
1234 
1235 	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1236 		ring = added_rings[i];
1237 		if (!ring)
1238 			continue;
1239 
1240 		r = amdgpu_ring_test_ring(ring);
1241 		if (r) {
1242 			DRM_DEV_ERROR(ring->adev->dev,
1243 				      "ring %s test failed (%d)\n",
1244 				      ring->name, r);
1245 			return r;
1246 		} else
1247 			DRM_INFO("ring %s test pass\n", ring->name);
1248 
1249 		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1250 		if (r) {
1251 			DRM_DEV_ERROR(ring->adev->dev,
1252 				      "ring %s ib test failed (%d)\n",
1253 				      ring->name, r);
1254 			return r;
1255 		} else
1256 			DRM_INFO("ring %s ib test pass\n", ring->name);
1257 	}
1258 
1259 	return 0;
1260 }
1261 
1262 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1263 {
1264 	struct amdgpu_vm *vm = NULL;
1265 	struct amdgpu_mes_ctx_data ctx_data = {0};
1266 	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1267 	int gang_ids[3] = {0};
1268 	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX,
1269 				   AMDGPU_MES_CTX_MAX_GFX_RINGS},
1270 				 { AMDGPU_RING_TYPE_COMPUTE,
1271 				   AMDGPU_MES_CTX_MAX_COMPUTE_RINGS},
1272 				 { AMDGPU_RING_TYPE_SDMA,
1273 				   AMDGPU_MES_CTX_MAX_SDMA_RINGS } };
1274 	int i, r, pasid, k = 0;
1275 
1276 	pasid = amdgpu_pasid_alloc(16);
1277 	if (pasid < 0) {
1278 		dev_warn(adev->dev, "No more PASIDs available!");
1279 		pasid = 0;
1280 	}
1281 
1282 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1283 	if (!vm) {
1284 		r = -ENOMEM;
1285 		goto error_pasid;
1286 	}
1287 
1288 	r = amdgpu_vm_init(adev, vm);
1289 	if (r) {
1290 		DRM_ERROR("failed to initialize vm\n");
1291 		goto error_pasid;
1292 	}
1293 
1294 	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1295 	if (r) {
1296 		DRM_ERROR("failed to alloc ctx meta data\n");
1297 		goto error_pasid;
1298 	}
1299 
1300 	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1301 	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1302 	if (r) {
1303 		DRM_ERROR("failed to map ctx meta data\n");
1304 		goto error_vm;
1305 	}
1306 
1307 	r = amdgpu_mes_create_process(adev, pasid, vm);
1308 	if (r) {
1309 		DRM_ERROR("failed to create MES process\n");
1310 		goto error_vm;
1311 	}
1312 
1313 	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1314 		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1315 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) &&
1316 		    adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) &&
1317 		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1318 			continue;
1319 
1320 		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1321 							   &gang_ids[i],
1322 							   queue_types[i][0],
1323 							   queue_types[i][1],
1324 							   &added_rings[k],
1325 							   &ctx_data);
1326 		if (r)
1327 			goto error_queues;
1328 
1329 		k += queue_types[i][1];
1330 	}
1331 
1332 	/* start ring test and ib test for MES queues */
1333 	amdgpu_mes_test_queues(added_rings);
1334 
1335 error_queues:
1336 	/* remove all queues */
1337 	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1338 		if (!added_rings[i])
1339 			continue;
1340 		amdgpu_mes_remove_ring(adev, added_rings[i]);
1341 	}
1342 
1343 	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1344 		if (!gang_ids[i])
1345 			continue;
1346 		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1347 	}
1348 
1349 	amdgpu_mes_destroy_process(adev, pasid);
1350 
1351 error_vm:
1352 	BUG_ON(amdgpu_bo_reserve(ctx_data.meta_data_obj, true));
1353 	amdgpu_vm_bo_del(adev, ctx_data.meta_data_va);
1354 	amdgpu_bo_unreserve(ctx_data.meta_data_obj);
1355 	amdgpu_vm_fini(adev, vm);
1356 
1357 error_pasid:
1358 	if (pasid)
1359 		amdgpu_pasid_free(pasid);
1360 
1361 	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1362 	kfree(vm);
1363 	return 0;
1364 }
1365