xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c (revision 335bbdf01d25517ae832ac1807fd8323c1f4f3b9)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26 
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31 
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34 
35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 		       PAGE_SIZE);
40 }
41 
42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 					 struct amdgpu_mes_process *process,
44 					 int ip_type, uint64_t *doorbell_index)
45 {
46 	unsigned int offset, found;
47 	struct amdgpu_mes *mes = &adev->mes;
48 
49 	if (ip_type == AMDGPU_RING_TYPE_SDMA)
50 		offset = adev->doorbell_index.sdma_engine[0];
51 	else
52 		offset = 0;
53 
54 	found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
55 	if (found >= mes->num_mes_dbs) {
56 		DRM_WARN("No doorbell available\n");
57 		return -ENOSPC;
58 	}
59 
60 	set_bit(found, mes->doorbell_bitmap);
61 
62 	/* Get the absolute doorbell index on BAR */
63 	*doorbell_index = mes->db_start_dw_offset + found * 2;
64 	return 0;
65 }
66 
67 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
68 					   struct amdgpu_mes_process *process,
69 					   uint32_t doorbell_index)
70 {
71 	unsigned int old, rel_index;
72 	struct amdgpu_mes *mes = &adev->mes;
73 
74 	/* Find the relative index of the doorbell in this object */
75 	rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
76 	old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
77 	WARN_ON(!old);
78 }
79 
80 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
81 {
82 	int i;
83 	struct amdgpu_mes *mes = &adev->mes;
84 
85 	/* Bitmap for dynamic allocation of kernel doorbells */
86 	mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
87 	if (!mes->doorbell_bitmap) {
88 		DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
89 		return -ENOMEM;
90 	}
91 
92 	mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
93 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
94 		adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
95 		set_bit(i, mes->doorbell_bitmap);
96 	}
97 
98 	return 0;
99 }
100 
101 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
102 {
103 	bitmap_free(adev->mes.doorbell_bitmap);
104 }
105 
106 int amdgpu_mes_init(struct amdgpu_device *adev)
107 {
108 	int i, r;
109 
110 	adev->mes.adev = adev;
111 
112 	idr_init(&adev->mes.pasid_idr);
113 	idr_init(&adev->mes.gang_id_idr);
114 	idr_init(&adev->mes.queue_id_idr);
115 	ida_init(&adev->mes.doorbell_ida);
116 	spin_lock_init(&adev->mes.queue_id_lock);
117 	spin_lock_init(&adev->mes.ring_lock);
118 	mutex_init(&adev->mes.mutex_hidden);
119 
120 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
121 	adev->mes.vmid_mask_mmhub = 0xffffff00;
122 	adev->mes.vmid_mask_gfxhub = 0xffffff00;
123 
124 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
125 		/* use only 1st MEC pipes */
126 		if (i >= 4)
127 			continue;
128 		adev->mes.compute_hqd_mask[i] = 0xc;
129 	}
130 
131 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
132 		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
133 
134 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
135 		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
136 		    IP_VERSION(6, 0, 0))
137 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
138 		/* zero sdma_hqd_mask for non-existent engine */
139 		else if (adev->sdma.num_instances == 1)
140 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
141 		else
142 			adev->mes.sdma_hqd_mask[i] = 0xfc;
143 	}
144 
145 	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
146 	if (r) {
147 		dev_err(adev->dev,
148 			"(%d) ring trail_fence_offs wb alloc failed\n", r);
149 		goto error_ids;
150 	}
151 	adev->mes.sch_ctx_gpu_addr =
152 		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
153 	adev->mes.sch_ctx_ptr =
154 		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
155 
156 	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
157 	if (r) {
158 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
159 		dev_err(adev->dev,
160 			"(%d) query_status_fence_offs wb alloc failed\n", r);
161 		goto error_ids;
162 	}
163 	adev->mes.query_status_fence_gpu_addr =
164 		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
165 	adev->mes.query_status_fence_ptr =
166 		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
167 
168 	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
169 	if (r) {
170 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
171 		amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
172 		dev_err(adev->dev,
173 			"(%d) read_val_offs alloc failed\n", r);
174 		goto error_ids;
175 	}
176 	adev->mes.read_val_gpu_addr =
177 		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
178 	adev->mes.read_val_ptr =
179 		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
180 
181 	r = amdgpu_mes_doorbell_init(adev);
182 	if (r)
183 		goto error;
184 
185 	return 0;
186 
187 error:
188 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
189 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
190 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
191 error_ids:
192 	idr_destroy(&adev->mes.pasid_idr);
193 	idr_destroy(&adev->mes.gang_id_idr);
194 	idr_destroy(&adev->mes.queue_id_idr);
195 	ida_destroy(&adev->mes.doorbell_ida);
196 	mutex_destroy(&adev->mes.mutex_hidden);
197 	return r;
198 }
199 
200 void amdgpu_mes_fini(struct amdgpu_device *adev)
201 {
202 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
203 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
204 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
205 	amdgpu_mes_doorbell_free(adev);
206 
207 	idr_destroy(&adev->mes.pasid_idr);
208 	idr_destroy(&adev->mes.gang_id_idr);
209 	idr_destroy(&adev->mes.queue_id_idr);
210 	ida_destroy(&adev->mes.doorbell_ida);
211 	mutex_destroy(&adev->mes.mutex_hidden);
212 }
213 
214 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
215 {
216 	amdgpu_bo_free_kernel(&q->mqd_obj,
217 			      &q->mqd_gpu_addr,
218 			      &q->mqd_cpu_ptr);
219 }
220 
221 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
222 			      struct amdgpu_vm *vm)
223 {
224 	struct amdgpu_mes_process *process;
225 	int r;
226 
227 	/* allocate the mes process buffer */
228 	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
229 	if (!process) {
230 		DRM_ERROR("no more memory to create mes process\n");
231 		return -ENOMEM;
232 	}
233 
234 	/* allocate the process context bo and map it */
235 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
236 				    AMDGPU_GEM_DOMAIN_GTT,
237 				    &process->proc_ctx_bo,
238 				    &process->proc_ctx_gpu_addr,
239 				    &process->proc_ctx_cpu_ptr);
240 	if (r) {
241 		DRM_ERROR("failed to allocate process context bo\n");
242 		goto clean_up_memory;
243 	}
244 	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
245 
246 	/*
247 	 * Avoid taking any other locks under MES lock to avoid circular
248 	 * lock dependencies.
249 	 */
250 	amdgpu_mes_lock(&adev->mes);
251 
252 	/* add the mes process to idr list */
253 	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
254 		      GFP_KERNEL);
255 	if (r < 0) {
256 		DRM_ERROR("failed to lock pasid=%d\n", pasid);
257 		goto clean_up_ctx;
258 	}
259 
260 	INIT_LIST_HEAD(&process->gang_list);
261 	process->vm = vm;
262 	process->pasid = pasid;
263 	process->process_quantum = adev->mes.default_process_quantum;
264 	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
265 
266 	amdgpu_mes_unlock(&adev->mes);
267 	return 0;
268 
269 clean_up_ctx:
270 	amdgpu_mes_unlock(&adev->mes);
271 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
272 			      &process->proc_ctx_gpu_addr,
273 			      &process->proc_ctx_cpu_ptr);
274 clean_up_memory:
275 	kfree(process);
276 	return r;
277 }
278 
279 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
280 {
281 	struct amdgpu_mes_process *process;
282 	struct amdgpu_mes_gang *gang, *tmp1;
283 	struct amdgpu_mes_queue *queue, *tmp2;
284 	struct mes_remove_queue_input queue_input;
285 	unsigned long flags;
286 	int r;
287 
288 	/*
289 	 * Avoid taking any other locks under MES lock to avoid circular
290 	 * lock dependencies.
291 	 */
292 	amdgpu_mes_lock(&adev->mes);
293 
294 	process = idr_find(&adev->mes.pasid_idr, pasid);
295 	if (!process) {
296 		DRM_WARN("pasid %d doesn't exist\n", pasid);
297 		amdgpu_mes_unlock(&adev->mes);
298 		return;
299 	}
300 
301 	/* Remove all queues from hardware */
302 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
303 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
304 			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
305 			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
306 			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
307 
308 			queue_input.doorbell_offset = queue->doorbell_off;
309 			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
310 
311 			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
312 							     &queue_input);
313 			if (r)
314 				DRM_WARN("failed to remove hardware queue\n");
315 		}
316 
317 		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
318 	}
319 
320 	idr_remove(&adev->mes.pasid_idr, pasid);
321 	amdgpu_mes_unlock(&adev->mes);
322 
323 	/* free all memory allocated by the process */
324 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
325 		/* free all queues in the gang */
326 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
327 			amdgpu_mes_queue_free_mqd(queue);
328 			list_del(&queue->list);
329 			kfree(queue);
330 		}
331 		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
332 				      &gang->gang_ctx_gpu_addr,
333 				      &gang->gang_ctx_cpu_ptr);
334 		list_del(&gang->list);
335 		kfree(gang);
336 
337 	}
338 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
339 			      &process->proc_ctx_gpu_addr,
340 			      &process->proc_ctx_cpu_ptr);
341 	kfree(process);
342 }
343 
344 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
345 			struct amdgpu_mes_gang_properties *gprops,
346 			int *gang_id)
347 {
348 	struct amdgpu_mes_process *process;
349 	struct amdgpu_mes_gang *gang;
350 	int r;
351 
352 	/* allocate the mes gang buffer */
353 	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
354 	if (!gang) {
355 		return -ENOMEM;
356 	}
357 
358 	/* allocate the gang context bo and map it to cpu space */
359 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
360 				    AMDGPU_GEM_DOMAIN_GTT,
361 				    &gang->gang_ctx_bo,
362 				    &gang->gang_ctx_gpu_addr,
363 				    &gang->gang_ctx_cpu_ptr);
364 	if (r) {
365 		DRM_ERROR("failed to allocate process context bo\n");
366 		goto clean_up_mem;
367 	}
368 	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
369 
370 	/*
371 	 * Avoid taking any other locks under MES lock to avoid circular
372 	 * lock dependencies.
373 	 */
374 	amdgpu_mes_lock(&adev->mes);
375 
376 	process = idr_find(&adev->mes.pasid_idr, pasid);
377 	if (!process) {
378 		DRM_ERROR("pasid %d doesn't exist\n", pasid);
379 		r = -EINVAL;
380 		goto clean_up_ctx;
381 	}
382 
383 	/* add the mes gang to idr list */
384 	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
385 		      GFP_KERNEL);
386 	if (r < 0) {
387 		DRM_ERROR("failed to allocate idr for gang\n");
388 		goto clean_up_ctx;
389 	}
390 
391 	gang->gang_id = r;
392 	*gang_id = r;
393 
394 	INIT_LIST_HEAD(&gang->queue_list);
395 	gang->process = process;
396 	gang->priority = gprops->priority;
397 	gang->gang_quantum = gprops->gang_quantum ?
398 		gprops->gang_quantum : adev->mes.default_gang_quantum;
399 	gang->global_priority_level = gprops->global_priority_level;
400 	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
401 	list_add_tail(&gang->list, &process->gang_list);
402 
403 	amdgpu_mes_unlock(&adev->mes);
404 	return 0;
405 
406 clean_up_ctx:
407 	amdgpu_mes_unlock(&adev->mes);
408 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
409 			      &gang->gang_ctx_gpu_addr,
410 			      &gang->gang_ctx_cpu_ptr);
411 clean_up_mem:
412 	kfree(gang);
413 	return r;
414 }
415 
416 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
417 {
418 	struct amdgpu_mes_gang *gang;
419 
420 	/*
421 	 * Avoid taking any other locks under MES lock to avoid circular
422 	 * lock dependencies.
423 	 */
424 	amdgpu_mes_lock(&adev->mes);
425 
426 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
427 	if (!gang) {
428 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
429 		amdgpu_mes_unlock(&adev->mes);
430 		return -EINVAL;
431 	}
432 
433 	if (!list_empty(&gang->queue_list)) {
434 		DRM_ERROR("queue list is not empty\n");
435 		amdgpu_mes_unlock(&adev->mes);
436 		return -EBUSY;
437 	}
438 
439 	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
440 	list_del(&gang->list);
441 	amdgpu_mes_unlock(&adev->mes);
442 
443 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
444 			      &gang->gang_ctx_gpu_addr,
445 			      &gang->gang_ctx_cpu_ptr);
446 
447 	kfree(gang);
448 
449 	return 0;
450 }
451 
452 int amdgpu_mes_suspend(struct amdgpu_device *adev)
453 {
454 	struct idr *idp;
455 	struct amdgpu_mes_process *process;
456 	struct amdgpu_mes_gang *gang;
457 	struct mes_suspend_gang_input input;
458 	int r, pasid;
459 
460 	/*
461 	 * Avoid taking any other locks under MES lock to avoid circular
462 	 * lock dependencies.
463 	 */
464 	amdgpu_mes_lock(&adev->mes);
465 
466 	idp = &adev->mes.pasid_idr;
467 
468 	idr_for_each_entry(idp, process, pasid) {
469 		list_for_each_entry(gang, &process->gang_list, list) {
470 			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
471 			if (r)
472 				DRM_ERROR("failed to suspend pasid %d gangid %d",
473 					 pasid, gang->gang_id);
474 		}
475 	}
476 
477 	amdgpu_mes_unlock(&adev->mes);
478 	return 0;
479 }
480 
481 int amdgpu_mes_resume(struct amdgpu_device *adev)
482 {
483 	struct idr *idp;
484 	struct amdgpu_mes_process *process;
485 	struct amdgpu_mes_gang *gang;
486 	struct mes_resume_gang_input input;
487 	int r, pasid;
488 
489 	/*
490 	 * Avoid taking any other locks under MES lock to avoid circular
491 	 * lock dependencies.
492 	 */
493 	amdgpu_mes_lock(&adev->mes);
494 
495 	idp = &adev->mes.pasid_idr;
496 
497 	idr_for_each_entry(idp, process, pasid) {
498 		list_for_each_entry(gang, &process->gang_list, list) {
499 			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
500 			if (r)
501 				DRM_ERROR("failed to resume pasid %d gangid %d",
502 					 pasid, gang->gang_id);
503 		}
504 	}
505 
506 	amdgpu_mes_unlock(&adev->mes);
507 	return 0;
508 }
509 
510 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
511 				     struct amdgpu_mes_queue *q,
512 				     struct amdgpu_mes_queue_properties *p)
513 {
514 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
515 	u32 mqd_size = mqd_mgr->mqd_size;
516 	int r;
517 
518 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
519 				    AMDGPU_GEM_DOMAIN_GTT,
520 				    &q->mqd_obj,
521 				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
522 	if (r) {
523 		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
524 		return r;
525 	}
526 	memset(q->mqd_cpu_ptr, 0, mqd_size);
527 
528 	r = amdgpu_bo_reserve(q->mqd_obj, false);
529 	if (unlikely(r != 0))
530 		goto clean_up;
531 
532 	return 0;
533 
534 clean_up:
535 	amdgpu_bo_free_kernel(&q->mqd_obj,
536 			      &q->mqd_gpu_addr,
537 			      &q->mqd_cpu_ptr);
538 	return r;
539 }
540 
541 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
542 				     struct amdgpu_mes_queue *q,
543 				     struct amdgpu_mes_queue_properties *p)
544 {
545 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
546 	struct amdgpu_mqd_prop mqd_prop = {0};
547 
548 	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
549 	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
550 	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
551 	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
552 	mqd_prop.queue_size = p->queue_size;
553 	mqd_prop.use_doorbell = true;
554 	mqd_prop.doorbell_index = p->doorbell_off;
555 	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
556 	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
557 	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
558 	mqd_prop.hqd_active = false;
559 
560 	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
561 	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
562 		mutex_lock(&adev->srbm_mutex);
563 		amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
564 	}
565 
566 	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
567 
568 	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
569 	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
570 		amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
571 		mutex_unlock(&adev->srbm_mutex);
572 	}
573 
574 	amdgpu_bo_unreserve(q->mqd_obj);
575 }
576 
577 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
578 			    struct amdgpu_mes_queue_properties *qprops,
579 			    int *queue_id)
580 {
581 	struct amdgpu_mes_queue *queue;
582 	struct amdgpu_mes_gang *gang;
583 	struct mes_add_queue_input queue_input;
584 	unsigned long flags;
585 	int r;
586 
587 	memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
588 
589 	/* allocate the mes queue buffer */
590 	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
591 	if (!queue) {
592 		DRM_ERROR("Failed to allocate memory for queue\n");
593 		return -ENOMEM;
594 	}
595 
596 	/* Allocate the queue mqd */
597 	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
598 	if (r)
599 		goto clean_up_memory;
600 
601 	/*
602 	 * Avoid taking any other locks under MES lock to avoid circular
603 	 * lock dependencies.
604 	 */
605 	amdgpu_mes_lock(&adev->mes);
606 
607 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
608 	if (!gang) {
609 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
610 		r = -EINVAL;
611 		goto clean_up_mqd;
612 	}
613 
614 	/* add the mes gang to idr list */
615 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
616 	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
617 		      GFP_ATOMIC);
618 	if (r < 0) {
619 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
620 		goto clean_up_mqd;
621 	}
622 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
623 	*queue_id = queue->queue_id = r;
624 
625 	/* allocate a doorbell index for the queue */
626 	r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
627 					  qprops->queue_type,
628 					  &qprops->doorbell_off);
629 	if (r)
630 		goto clean_up_queue_id;
631 
632 	/* initialize the queue mqd */
633 	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
634 
635 	/* add hw queue to mes */
636 	queue_input.process_id = gang->process->pasid;
637 
638 	queue_input.page_table_base_addr =
639 		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
640 		adev->gmc.vram_start;
641 
642 	queue_input.process_va_start = 0;
643 	queue_input.process_va_end =
644 		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
645 	queue_input.process_quantum = gang->process->process_quantum;
646 	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
647 	queue_input.gang_quantum = gang->gang_quantum;
648 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
649 	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
650 	queue_input.gang_global_priority_level = gang->global_priority_level;
651 	queue_input.doorbell_offset = qprops->doorbell_off;
652 	queue_input.mqd_addr = queue->mqd_gpu_addr;
653 	queue_input.wptr_addr = qprops->wptr_gpu_addr;
654 	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
655 	queue_input.queue_type = qprops->queue_type;
656 	queue_input.paging = qprops->paging;
657 	queue_input.is_kfd_process = 0;
658 
659 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
660 	if (r) {
661 		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
662 			  qprops->doorbell_off);
663 		goto clean_up_doorbell;
664 	}
665 
666 	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
667 		  "queue type=%d, doorbell=0x%llx\n",
668 		  gang->process->pasid, gang_id, qprops->queue_type,
669 		  qprops->doorbell_off);
670 
671 	queue->ring = qprops->ring;
672 	queue->doorbell_off = qprops->doorbell_off;
673 	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
674 	queue->queue_type = qprops->queue_type;
675 	queue->paging = qprops->paging;
676 	queue->gang = gang;
677 	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
678 	list_add_tail(&queue->list, &gang->queue_list);
679 
680 	amdgpu_mes_unlock(&adev->mes);
681 	return 0;
682 
683 clean_up_doorbell:
684 	amdgpu_mes_kernel_doorbell_free(adev, gang->process,
685 				       qprops->doorbell_off);
686 clean_up_queue_id:
687 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
688 	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
689 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
690 clean_up_mqd:
691 	amdgpu_mes_unlock(&adev->mes);
692 	amdgpu_mes_queue_free_mqd(queue);
693 clean_up_memory:
694 	kfree(queue);
695 	return r;
696 }
697 
698 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
699 {
700 	unsigned long flags;
701 	struct amdgpu_mes_queue *queue;
702 	struct amdgpu_mes_gang *gang;
703 	struct mes_remove_queue_input queue_input;
704 	int r;
705 
706 	/*
707 	 * Avoid taking any other locks under MES lock to avoid circular
708 	 * lock dependencies.
709 	 */
710 	amdgpu_mes_lock(&adev->mes);
711 
712 	/* remove the mes gang from idr list */
713 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
714 
715 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
716 	if (!queue) {
717 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
718 		amdgpu_mes_unlock(&adev->mes);
719 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
720 		return -EINVAL;
721 	}
722 
723 	idr_remove(&adev->mes.queue_id_idr, queue_id);
724 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
725 
726 	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
727 		  queue->doorbell_off);
728 
729 	gang = queue->gang;
730 	queue_input.doorbell_offset = queue->doorbell_off;
731 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
732 
733 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
734 	if (r)
735 		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
736 			  queue_id);
737 
738 	list_del(&queue->list);
739 	amdgpu_mes_kernel_doorbell_free(adev, gang->process,
740 				       queue->doorbell_off);
741 	amdgpu_mes_unlock(&adev->mes);
742 
743 	amdgpu_mes_queue_free_mqd(queue);
744 	kfree(queue);
745 	return 0;
746 }
747 
748 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
749 				  struct amdgpu_ring *ring,
750 				  enum amdgpu_unmap_queues_action action,
751 				  u64 gpu_addr, u64 seq)
752 {
753 	struct mes_unmap_legacy_queue_input queue_input;
754 	int r;
755 
756 	queue_input.action = action;
757 	queue_input.queue_type = ring->funcs->type;
758 	queue_input.doorbell_offset = ring->doorbell_index;
759 	queue_input.pipe_id = ring->pipe;
760 	queue_input.queue_id = ring->queue;
761 	queue_input.trail_fence_addr = gpu_addr;
762 	queue_input.trail_fence_data = seq;
763 
764 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
765 	if (r)
766 		DRM_ERROR("failed to unmap legacy queue\n");
767 
768 	return r;
769 }
770 
771 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
772 {
773 	struct mes_misc_op_input op_input;
774 	int r, val = 0;
775 
776 	op_input.op = MES_MISC_OP_READ_REG;
777 	op_input.read_reg.reg_offset = reg;
778 	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
779 
780 	if (!adev->mes.funcs->misc_op) {
781 		DRM_ERROR("mes rreg is not supported!\n");
782 		goto error;
783 	}
784 
785 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
786 	if (r)
787 		DRM_ERROR("failed to read reg (0x%x)\n", reg);
788 	else
789 		val = *(adev->mes.read_val_ptr);
790 
791 error:
792 	return val;
793 }
794 
795 int amdgpu_mes_wreg(struct amdgpu_device *adev,
796 		    uint32_t reg, uint32_t val)
797 {
798 	struct mes_misc_op_input op_input;
799 	int r;
800 
801 	op_input.op = MES_MISC_OP_WRITE_REG;
802 	op_input.write_reg.reg_offset = reg;
803 	op_input.write_reg.reg_value = val;
804 
805 	if (!adev->mes.funcs->misc_op) {
806 		DRM_ERROR("mes wreg is not supported!\n");
807 		r = -EINVAL;
808 		goto error;
809 	}
810 
811 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
812 	if (r)
813 		DRM_ERROR("failed to write reg (0x%x)\n", reg);
814 
815 error:
816 	return r;
817 }
818 
819 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
820 				  uint32_t reg0, uint32_t reg1,
821 				  uint32_t ref, uint32_t mask)
822 {
823 	struct mes_misc_op_input op_input;
824 	int r;
825 
826 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
827 	op_input.wrm_reg.reg0 = reg0;
828 	op_input.wrm_reg.reg1 = reg1;
829 	op_input.wrm_reg.ref = ref;
830 	op_input.wrm_reg.mask = mask;
831 
832 	if (!adev->mes.funcs->misc_op) {
833 		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
834 		r = -EINVAL;
835 		goto error;
836 	}
837 
838 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
839 	if (r)
840 		DRM_ERROR("failed to reg_write_reg_wait\n");
841 
842 error:
843 	return r;
844 }
845 
846 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
847 			uint32_t val, uint32_t mask)
848 {
849 	struct mes_misc_op_input op_input;
850 	int r;
851 
852 	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
853 	op_input.wrm_reg.reg0 = reg;
854 	op_input.wrm_reg.ref = val;
855 	op_input.wrm_reg.mask = mask;
856 
857 	if (!adev->mes.funcs->misc_op) {
858 		DRM_ERROR("mes reg wait is not supported!\n");
859 		r = -EINVAL;
860 		goto error;
861 	}
862 
863 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
864 	if (r)
865 		DRM_ERROR("failed to reg_write_reg_wait\n");
866 
867 error:
868 	return r;
869 }
870 
871 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
872 				uint64_t process_context_addr,
873 				uint32_t spi_gdbg_per_vmid_cntl,
874 				const uint32_t *tcp_watch_cntl,
875 				uint32_t flags,
876 				bool trap_en)
877 {
878 	struct mes_misc_op_input op_input = {0};
879 	int r;
880 
881 	if (!adev->mes.funcs->misc_op) {
882 		DRM_ERROR("mes set shader debugger is not supported!\n");
883 		return -EINVAL;
884 	}
885 
886 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
887 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
888 	op_input.set_shader_debugger.flags.u32all = flags;
889 	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
890 	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
891 			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
892 
893 	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
894 			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
895 		op_input.set_shader_debugger.trap_en = trap_en;
896 
897 	amdgpu_mes_lock(&adev->mes);
898 
899 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
900 	if (r)
901 		DRM_ERROR("failed to set_shader_debugger\n");
902 
903 	amdgpu_mes_unlock(&adev->mes);
904 
905 	return r;
906 }
907 
908 static void
909 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
910 			       struct amdgpu_ring *ring,
911 			       struct amdgpu_mes_queue_properties *props)
912 {
913 	props->queue_type = ring->funcs->type;
914 	props->hqd_base_gpu_addr = ring->gpu_addr;
915 	props->rptr_gpu_addr = ring->rptr_gpu_addr;
916 	props->wptr_gpu_addr = ring->wptr_gpu_addr;
917 	props->wptr_mc_addr =
918 		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
919 	props->queue_size = ring->ring_size;
920 	props->eop_gpu_addr = ring->eop_gpu_addr;
921 	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
922 	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
923 	props->paging = false;
924 	props->ring = ring;
925 }
926 
927 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
928 do {									\
929        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
930 		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
931 				_eng[ring->idx].slots[id_offs]);        \
932        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
933 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
934 				_eng[ring->idx].ring);                  \
935        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
936 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
937 				_eng[ring->idx].ib);                    \
938        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
939 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
940 				_eng[ring->idx].padding);               \
941 } while(0)
942 
943 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
944 {
945 	switch (ring->funcs->type) {
946 	case AMDGPU_RING_TYPE_GFX:
947 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
948 		break;
949 	case AMDGPU_RING_TYPE_COMPUTE:
950 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
951 		break;
952 	case AMDGPU_RING_TYPE_SDMA:
953 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
954 		break;
955 	default:
956 		break;
957 	}
958 
959 	WARN_ON(1);
960 	return -EINVAL;
961 }
962 
963 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
964 			int queue_type, int idx,
965 			struct amdgpu_mes_ctx_data *ctx_data,
966 			struct amdgpu_ring **out)
967 {
968 	struct amdgpu_ring *ring;
969 	struct amdgpu_mes_gang *gang;
970 	struct amdgpu_mes_queue_properties qprops = {0};
971 	int r, queue_id, pasid;
972 
973 	/*
974 	 * Avoid taking any other locks under MES lock to avoid circular
975 	 * lock dependencies.
976 	 */
977 	amdgpu_mes_lock(&adev->mes);
978 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
979 	if (!gang) {
980 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
981 		amdgpu_mes_unlock(&adev->mes);
982 		return -EINVAL;
983 	}
984 	pasid = gang->process->pasid;
985 
986 	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
987 	if (!ring) {
988 		amdgpu_mes_unlock(&adev->mes);
989 		return -ENOMEM;
990 	}
991 
992 	ring->ring_obj = NULL;
993 	ring->use_doorbell = true;
994 	ring->is_mes_queue = true;
995 	ring->mes_ctx = ctx_data;
996 	ring->idx = idx;
997 	ring->no_scheduler = true;
998 
999 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1000 		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1001 				      compute[ring->idx].mec_hpd);
1002 		ring->eop_gpu_addr =
1003 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1004 	}
1005 
1006 	switch (queue_type) {
1007 	case AMDGPU_RING_TYPE_GFX:
1008 		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1009 		ring->me = adev->gfx.gfx_ring[0].me;
1010 		ring->pipe = adev->gfx.gfx_ring[0].pipe;
1011 		break;
1012 	case AMDGPU_RING_TYPE_COMPUTE:
1013 		ring->funcs = adev->gfx.compute_ring[0].funcs;
1014 		ring->me = adev->gfx.compute_ring[0].me;
1015 		ring->pipe = adev->gfx.compute_ring[0].pipe;
1016 		break;
1017 	case AMDGPU_RING_TYPE_SDMA:
1018 		ring->funcs = adev->sdma.instance[0].ring.funcs;
1019 		break;
1020 	default:
1021 		BUG();
1022 	}
1023 
1024 	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1025 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1026 	if (r)
1027 		goto clean_up_memory;
1028 
1029 	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1030 
1031 	dma_fence_wait(gang->process->vm->last_update, false);
1032 	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1033 	amdgpu_mes_unlock(&adev->mes);
1034 
1035 	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1036 	if (r)
1037 		goto clean_up_ring;
1038 
1039 	ring->hw_queue_id = queue_id;
1040 	ring->doorbell_index = qprops.doorbell_off;
1041 
1042 	if (queue_type == AMDGPU_RING_TYPE_GFX)
1043 		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1044 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1045 		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1046 			queue_id);
1047 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1048 		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1049 			queue_id);
1050 	else
1051 		BUG();
1052 
1053 	*out = ring;
1054 	return 0;
1055 
1056 clean_up_ring:
1057 	amdgpu_ring_fini(ring);
1058 clean_up_memory:
1059 	kfree(ring);
1060 	amdgpu_mes_unlock(&adev->mes);
1061 	return r;
1062 }
1063 
1064 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1065 			    struct amdgpu_ring *ring)
1066 {
1067 	if (!ring)
1068 		return;
1069 
1070 	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1071 	amdgpu_ring_fini(ring);
1072 	kfree(ring);
1073 }
1074 
1075 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1076 						   enum amdgpu_mes_priority_level prio)
1077 {
1078 	return adev->mes.aggregated_doorbells[prio];
1079 }
1080 
1081 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1082 				   struct amdgpu_mes_ctx_data *ctx_data)
1083 {
1084 	int r;
1085 
1086 	r = amdgpu_bo_create_kernel(adev,
1087 			    sizeof(struct amdgpu_mes_ctx_meta_data),
1088 			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1089 			    &ctx_data->meta_data_obj,
1090 			    &ctx_data->meta_data_mc_addr,
1091 			    &ctx_data->meta_data_ptr);
1092 	if (r) {
1093 		dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1094 		return r;
1095 	}
1096 
1097 	if (!ctx_data->meta_data_obj)
1098 		return -ENOMEM;
1099 
1100 	memset(ctx_data->meta_data_ptr, 0,
1101 	       sizeof(struct amdgpu_mes_ctx_meta_data));
1102 
1103 	return 0;
1104 }
1105 
1106 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1107 {
1108 	if (ctx_data->meta_data_obj)
1109 		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1110 				      &ctx_data->meta_data_mc_addr,
1111 				      &ctx_data->meta_data_ptr);
1112 }
1113 
1114 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1115 				 struct amdgpu_vm *vm,
1116 				 struct amdgpu_mes_ctx_data *ctx_data)
1117 {
1118 	struct amdgpu_bo_va *bo_va;
1119 	struct amdgpu_sync sync;
1120 	struct drm_exec exec;
1121 	int r;
1122 
1123 	amdgpu_sync_create(&sync);
1124 
1125 	drm_exec_init(&exec, 0);
1126 	drm_exec_until_all_locked(&exec) {
1127 		r = drm_exec_lock_obj(&exec,
1128 				      &ctx_data->meta_data_obj->tbo.base);
1129 		drm_exec_retry_on_contention(&exec);
1130 		if (unlikely(r))
1131 			goto error_fini_exec;
1132 
1133 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1134 		drm_exec_retry_on_contention(&exec);
1135 		if (unlikely(r))
1136 			goto error_fini_exec;
1137 	}
1138 
1139 	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1140 	if (!bo_va) {
1141 		DRM_ERROR("failed to create bo_va for meta data BO\n");
1142 		r = -ENOMEM;
1143 		goto error_fini_exec;
1144 	}
1145 
1146 	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1147 			     sizeof(struct amdgpu_mes_ctx_meta_data),
1148 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1149 			     AMDGPU_PTE_EXECUTABLE);
1150 
1151 	if (r) {
1152 		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1153 		goto error_del_bo_va;
1154 	}
1155 
1156 	r = amdgpu_vm_bo_update(adev, bo_va, false);
1157 	if (r) {
1158 		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1159 		goto error_del_bo_va;
1160 	}
1161 	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1162 
1163 	r = amdgpu_vm_update_pdes(adev, vm, false);
1164 	if (r) {
1165 		DRM_ERROR("failed to update pdes on meta data\n");
1166 		goto error_del_bo_va;
1167 	}
1168 	amdgpu_sync_fence(&sync, vm->last_update);
1169 
1170 	amdgpu_sync_wait(&sync, false);
1171 	drm_exec_fini(&exec);
1172 
1173 	amdgpu_sync_free(&sync);
1174 	ctx_data->meta_data_va = bo_va;
1175 	return 0;
1176 
1177 error_del_bo_va:
1178 	amdgpu_vm_bo_del(adev, bo_va);
1179 
1180 error_fini_exec:
1181 	drm_exec_fini(&exec);
1182 	amdgpu_sync_free(&sync);
1183 	return r;
1184 }
1185 
1186 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1187 				   struct amdgpu_mes_ctx_data *ctx_data)
1188 {
1189 	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1190 	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1191 	struct amdgpu_vm *vm = bo_va->base.vm;
1192 	struct dma_fence *fence;
1193 	struct drm_exec exec;
1194 	long r;
1195 
1196 	drm_exec_init(&exec, 0);
1197 	drm_exec_until_all_locked(&exec) {
1198 		r = drm_exec_lock_obj(&exec,
1199 				      &ctx_data->meta_data_obj->tbo.base);
1200 		drm_exec_retry_on_contention(&exec);
1201 		if (unlikely(r))
1202 			goto out_unlock;
1203 
1204 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1205 		drm_exec_retry_on_contention(&exec);
1206 		if (unlikely(r))
1207 			goto out_unlock;
1208 	}
1209 
1210 	amdgpu_vm_bo_del(adev, bo_va);
1211 	if (!amdgpu_vm_ready(vm))
1212 		goto out_unlock;
1213 
1214 	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1215 				   &fence);
1216 	if (r)
1217 		goto out_unlock;
1218 	if (fence) {
1219 		amdgpu_bo_fence(bo, fence, true);
1220 		fence = NULL;
1221 	}
1222 
1223 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1224 	if (r || !fence)
1225 		goto out_unlock;
1226 
1227 	dma_fence_wait(fence, false);
1228 	amdgpu_bo_fence(bo, fence, true);
1229 	dma_fence_put(fence);
1230 
1231 out_unlock:
1232 	if (unlikely(r < 0))
1233 		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1234 	drm_exec_fini(&exec);
1235 
1236 	return r;
1237 }
1238 
1239 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1240 					  int pasid, int *gang_id,
1241 					  int queue_type, int num_queue,
1242 					  struct amdgpu_ring **added_rings,
1243 					  struct amdgpu_mes_ctx_data *ctx_data)
1244 {
1245 	struct amdgpu_ring *ring;
1246 	struct amdgpu_mes_gang_properties gprops = {0};
1247 	int r, j;
1248 
1249 	/* create a gang for the process */
1250 	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1251 	gprops.gang_quantum = adev->mes.default_gang_quantum;
1252 	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1253 	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1254 	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1255 
1256 	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1257 	if (r) {
1258 		DRM_ERROR("failed to add gang\n");
1259 		return r;
1260 	}
1261 
1262 	/* create queues for the gang */
1263 	for (j = 0; j < num_queue; j++) {
1264 		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1265 					ctx_data, &ring);
1266 		if (r) {
1267 			DRM_ERROR("failed to add ring\n");
1268 			break;
1269 		}
1270 
1271 		DRM_INFO("ring %s was added\n", ring->name);
1272 		added_rings[j] = ring;
1273 	}
1274 
1275 	return 0;
1276 }
1277 
1278 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1279 {
1280 	struct amdgpu_ring *ring;
1281 	int i, r;
1282 
1283 	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1284 		ring = added_rings[i];
1285 		if (!ring)
1286 			continue;
1287 
1288 		r = amdgpu_ring_test_helper(ring);
1289 		if (r)
1290 			return r;
1291 
1292 		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1293 		if (r) {
1294 			DRM_DEV_ERROR(ring->adev->dev,
1295 				      "ring %s ib test failed (%d)\n",
1296 				      ring->name, r);
1297 			return r;
1298 		} else
1299 			DRM_INFO("ring %s ib test pass\n", ring->name);
1300 	}
1301 
1302 	return 0;
1303 }
1304 
1305 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1306 {
1307 	struct amdgpu_vm *vm = NULL;
1308 	struct amdgpu_mes_ctx_data ctx_data = {0};
1309 	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1310 	int gang_ids[3] = {0};
1311 	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1312 				 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1313 				 { AMDGPU_RING_TYPE_SDMA, 1} };
1314 	int i, r, pasid, k = 0;
1315 
1316 	pasid = amdgpu_pasid_alloc(16);
1317 	if (pasid < 0) {
1318 		dev_warn(adev->dev, "No more PASIDs available!");
1319 		pasid = 0;
1320 	}
1321 
1322 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1323 	if (!vm) {
1324 		r = -ENOMEM;
1325 		goto error_pasid;
1326 	}
1327 
1328 	r = amdgpu_vm_init(adev, vm, -1);
1329 	if (r) {
1330 		DRM_ERROR("failed to initialize vm\n");
1331 		goto error_pasid;
1332 	}
1333 
1334 	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1335 	if (r) {
1336 		DRM_ERROR("failed to alloc ctx meta data\n");
1337 		goto error_fini;
1338 	}
1339 
1340 	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1341 	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1342 	if (r) {
1343 		DRM_ERROR("failed to map ctx meta data\n");
1344 		goto error_vm;
1345 	}
1346 
1347 	r = amdgpu_mes_create_process(adev, pasid, vm);
1348 	if (r) {
1349 		DRM_ERROR("failed to create MES process\n");
1350 		goto error_vm;
1351 	}
1352 
1353 	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1354 		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1355 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1356 			    IP_VERSION(10, 3, 0) &&
1357 		    amdgpu_ip_version(adev, GC_HWIP, 0) <
1358 			    IP_VERSION(11, 0, 0) &&
1359 		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1360 			continue;
1361 
1362 		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1363 							   &gang_ids[i],
1364 							   queue_types[i][0],
1365 							   queue_types[i][1],
1366 							   &added_rings[k],
1367 							   &ctx_data);
1368 		if (r)
1369 			goto error_queues;
1370 
1371 		k += queue_types[i][1];
1372 	}
1373 
1374 	/* start ring test and ib test for MES queues */
1375 	amdgpu_mes_test_queues(added_rings);
1376 
1377 error_queues:
1378 	/* remove all queues */
1379 	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1380 		if (!added_rings[i])
1381 			continue;
1382 		amdgpu_mes_remove_ring(adev, added_rings[i]);
1383 	}
1384 
1385 	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1386 		if (!gang_ids[i])
1387 			continue;
1388 		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1389 	}
1390 
1391 	amdgpu_mes_destroy_process(adev, pasid);
1392 
1393 error_vm:
1394 	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1395 
1396 error_fini:
1397 	amdgpu_vm_fini(adev, vm);
1398 
1399 error_pasid:
1400 	if (pasid)
1401 		amdgpu_pasid_free(pasid);
1402 
1403 	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1404 	kfree(vm);
1405 	return 0;
1406 }
1407 
1408 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1409 {
1410 	const struct mes_firmware_header_v1_0 *mes_hdr;
1411 	struct amdgpu_firmware_info *info;
1412 	char ucode_prefix[30];
1413 	char fw_name[40];
1414 	bool need_retry = false;
1415 	int r;
1416 
1417 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1418 				       sizeof(ucode_prefix));
1419 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1420 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1421 			 ucode_prefix,
1422 			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1423 		need_retry = true;
1424 	} else {
1425 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1426 			 ucode_prefix,
1427 			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1428 	}
1429 
1430 	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1431 	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1432 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1433 			 ucode_prefix);
1434 		DRM_INFO("try to fall back to %s\n", fw_name);
1435 		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1436 					 fw_name);
1437 	}
1438 
1439 	if (r)
1440 		goto out;
1441 
1442 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
1443 		adev->mes.fw[pipe]->data;
1444 	adev->mes.uc_start_addr[pipe] =
1445 		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1446 		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1447 	adev->mes.data_start_addr[pipe] =
1448 		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1449 		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1450 
1451 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1452 		int ucode, ucode_data;
1453 
1454 		if (pipe == AMDGPU_MES_SCHED_PIPE) {
1455 			ucode = AMDGPU_UCODE_ID_CP_MES;
1456 			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1457 		} else {
1458 			ucode = AMDGPU_UCODE_ID_CP_MES1;
1459 			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1460 		}
1461 
1462 		info = &adev->firmware.ucode[ucode];
1463 		info->ucode_id = ucode;
1464 		info->fw = adev->mes.fw[pipe];
1465 		adev->firmware.fw_size +=
1466 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1467 			      PAGE_SIZE);
1468 
1469 		info = &adev->firmware.ucode[ucode_data];
1470 		info->ucode_id = ucode_data;
1471 		info->fw = adev->mes.fw[pipe];
1472 		adev->firmware.fw_size +=
1473 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1474 			      PAGE_SIZE);
1475 	}
1476 
1477 	return 0;
1478 out:
1479 	amdgpu_ucode_release(&adev->mes.fw[pipe]);
1480 	return r;
1481 }
1482