xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c (revision fd7d598270724cc787982ea48bbe17ad383a8b7f)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26 
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31 
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34 
35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 		       PAGE_SIZE);
40 }
41 
42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 					 struct amdgpu_mes_process *process,
44 					 int ip_type, uint64_t *doorbell_index)
45 {
46 	unsigned int offset, found;
47 	struct amdgpu_mes *mes = &adev->mes;
48 
49 	if (ip_type == AMDGPU_RING_TYPE_SDMA)
50 		offset = adev->doorbell_index.sdma_engine[0];
51 	else
52 		offset = 0;
53 
54 	found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
55 	if (found >= mes->num_mes_dbs) {
56 		DRM_WARN("No doorbell available\n");
57 		return -ENOSPC;
58 	}
59 
60 	set_bit(found, mes->doorbell_bitmap);
61 
62 	/* Get the absolute doorbell index on BAR */
63 	*doorbell_index = mes->db_start_dw_offset + found * 2;
64 	return 0;
65 }
66 
67 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
68 					   struct amdgpu_mes_process *process,
69 					   uint32_t doorbell_index)
70 {
71 	unsigned int old, rel_index;
72 	struct amdgpu_mes *mes = &adev->mes;
73 
74 	/* Find the relative index of the doorbell in this object */
75 	rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
76 	old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
77 	WARN_ON(!old);
78 }
79 
80 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
81 {
82 	int i;
83 	struct amdgpu_mes *mes = &adev->mes;
84 
85 	/* Bitmap for dynamic allocation of kernel doorbells */
86 	mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
87 	if (!mes->doorbell_bitmap) {
88 		DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
89 		return -ENOMEM;
90 	}
91 
92 	mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
93 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
94 		adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
95 		set_bit(i, mes->doorbell_bitmap);
96 	}
97 
98 	return 0;
99 }
100 
101 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
102 {
103 	bitmap_free(adev->mes.doorbell_bitmap);
104 }
105 
106 int amdgpu_mes_init(struct amdgpu_device *adev)
107 {
108 	int i, r;
109 
110 	adev->mes.adev = adev;
111 
112 	idr_init(&adev->mes.pasid_idr);
113 	idr_init(&adev->mes.gang_id_idr);
114 	idr_init(&adev->mes.queue_id_idr);
115 	ida_init(&adev->mes.doorbell_ida);
116 	spin_lock_init(&adev->mes.queue_id_lock);
117 	spin_lock_init(&adev->mes.ring_lock);
118 	mutex_init(&adev->mes.mutex_hidden);
119 
120 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
121 	adev->mes.vmid_mask_mmhub = 0xffffff00;
122 	adev->mes.vmid_mask_gfxhub = 0xffffff00;
123 
124 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
125 		/* use only 1st MEC pipes */
126 		if (i >= 4)
127 			continue;
128 		adev->mes.compute_hqd_mask[i] = 0xc;
129 	}
130 
131 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
132 		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
133 
134 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
135 		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
136 		    IP_VERSION(6, 0, 0))
137 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
138 		/* zero sdma_hqd_mask for non-existent engine */
139 		else if (adev->sdma.num_instances == 1)
140 			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
141 		else
142 			adev->mes.sdma_hqd_mask[i] = 0xfc;
143 	}
144 
145 	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
146 	if (r) {
147 		dev_err(adev->dev,
148 			"(%d) ring trail_fence_offs wb alloc failed\n", r);
149 		goto error_ids;
150 	}
151 	adev->mes.sch_ctx_gpu_addr =
152 		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
153 	adev->mes.sch_ctx_ptr =
154 		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
155 
156 	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
157 	if (r) {
158 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
159 		dev_err(adev->dev,
160 			"(%d) query_status_fence_offs wb alloc failed\n", r);
161 		goto error_ids;
162 	}
163 	adev->mes.query_status_fence_gpu_addr =
164 		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
165 	adev->mes.query_status_fence_ptr =
166 		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
167 
168 	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
169 	if (r) {
170 		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
171 		amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
172 		dev_err(adev->dev,
173 			"(%d) read_val_offs alloc failed\n", r);
174 		goto error_ids;
175 	}
176 	adev->mes.read_val_gpu_addr =
177 		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
178 	adev->mes.read_val_ptr =
179 		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
180 
181 	r = amdgpu_mes_doorbell_init(adev);
182 	if (r)
183 		goto error;
184 
185 	return 0;
186 
187 error:
188 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
189 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
190 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
191 error_ids:
192 	idr_destroy(&adev->mes.pasid_idr);
193 	idr_destroy(&adev->mes.gang_id_idr);
194 	idr_destroy(&adev->mes.queue_id_idr);
195 	ida_destroy(&adev->mes.doorbell_ida);
196 	mutex_destroy(&adev->mes.mutex_hidden);
197 	return r;
198 }
199 
200 void amdgpu_mes_fini(struct amdgpu_device *adev)
201 {
202 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
203 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
204 	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
205 	amdgpu_mes_doorbell_free(adev);
206 
207 	idr_destroy(&adev->mes.pasid_idr);
208 	idr_destroy(&adev->mes.gang_id_idr);
209 	idr_destroy(&adev->mes.queue_id_idr);
210 	ida_destroy(&adev->mes.doorbell_ida);
211 	mutex_destroy(&adev->mes.mutex_hidden);
212 }
213 
214 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
215 {
216 	amdgpu_bo_free_kernel(&q->mqd_obj,
217 			      &q->mqd_gpu_addr,
218 			      &q->mqd_cpu_ptr);
219 }
220 
221 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
222 			      struct amdgpu_vm *vm)
223 {
224 	struct amdgpu_mes_process *process;
225 	int r;
226 
227 	/* allocate the mes process buffer */
228 	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
229 	if (!process) {
230 		DRM_ERROR("no more memory to create mes process\n");
231 		return -ENOMEM;
232 	}
233 
234 	/* allocate the process context bo and map it */
235 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
236 				    AMDGPU_GEM_DOMAIN_GTT,
237 				    &process->proc_ctx_bo,
238 				    &process->proc_ctx_gpu_addr,
239 				    &process->proc_ctx_cpu_ptr);
240 	if (r) {
241 		DRM_ERROR("failed to allocate process context bo\n");
242 		goto clean_up_memory;
243 	}
244 	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
245 
246 	/*
247 	 * Avoid taking any other locks under MES lock to avoid circular
248 	 * lock dependencies.
249 	 */
250 	amdgpu_mes_lock(&adev->mes);
251 
252 	/* add the mes process to idr list */
253 	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
254 		      GFP_KERNEL);
255 	if (r < 0) {
256 		DRM_ERROR("failed to lock pasid=%d\n", pasid);
257 		goto clean_up_ctx;
258 	}
259 
260 	INIT_LIST_HEAD(&process->gang_list);
261 	process->vm = vm;
262 	process->pasid = pasid;
263 	process->process_quantum = adev->mes.default_process_quantum;
264 	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
265 
266 	amdgpu_mes_unlock(&adev->mes);
267 	return 0;
268 
269 clean_up_ctx:
270 	amdgpu_mes_unlock(&adev->mes);
271 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
272 			      &process->proc_ctx_gpu_addr,
273 			      &process->proc_ctx_cpu_ptr);
274 clean_up_memory:
275 	kfree(process);
276 	return r;
277 }
278 
279 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
280 {
281 	struct amdgpu_mes_process *process;
282 	struct amdgpu_mes_gang *gang, *tmp1;
283 	struct amdgpu_mes_queue *queue, *tmp2;
284 	struct mes_remove_queue_input queue_input;
285 	unsigned long flags;
286 	int r;
287 
288 	/*
289 	 * Avoid taking any other locks under MES lock to avoid circular
290 	 * lock dependencies.
291 	 */
292 	amdgpu_mes_lock(&adev->mes);
293 
294 	process = idr_find(&adev->mes.pasid_idr, pasid);
295 	if (!process) {
296 		DRM_WARN("pasid %d doesn't exist\n", pasid);
297 		amdgpu_mes_unlock(&adev->mes);
298 		return;
299 	}
300 
301 	/* Remove all queues from hardware */
302 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
303 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
304 			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
305 			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
306 			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
307 
308 			queue_input.doorbell_offset = queue->doorbell_off;
309 			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
310 
311 			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
312 							     &queue_input);
313 			if (r)
314 				DRM_WARN("failed to remove hardware queue\n");
315 		}
316 
317 		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
318 	}
319 
320 	idr_remove(&adev->mes.pasid_idr, pasid);
321 	amdgpu_mes_unlock(&adev->mes);
322 
323 	/* free all memory allocated by the process */
324 	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
325 		/* free all queues in the gang */
326 		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
327 			amdgpu_mes_queue_free_mqd(queue);
328 			list_del(&queue->list);
329 			kfree(queue);
330 		}
331 		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
332 				      &gang->gang_ctx_gpu_addr,
333 				      &gang->gang_ctx_cpu_ptr);
334 		list_del(&gang->list);
335 		kfree(gang);
336 
337 	}
338 	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
339 			      &process->proc_ctx_gpu_addr,
340 			      &process->proc_ctx_cpu_ptr);
341 	kfree(process);
342 }
343 
344 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
345 			struct amdgpu_mes_gang_properties *gprops,
346 			int *gang_id)
347 {
348 	struct amdgpu_mes_process *process;
349 	struct amdgpu_mes_gang *gang;
350 	int r;
351 
352 	/* allocate the mes gang buffer */
353 	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
354 	if (!gang) {
355 		return -ENOMEM;
356 	}
357 
358 	/* allocate the gang context bo and map it to cpu space */
359 	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
360 				    AMDGPU_GEM_DOMAIN_GTT,
361 				    &gang->gang_ctx_bo,
362 				    &gang->gang_ctx_gpu_addr,
363 				    &gang->gang_ctx_cpu_ptr);
364 	if (r) {
365 		DRM_ERROR("failed to allocate process context bo\n");
366 		goto clean_up_mem;
367 	}
368 	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
369 
370 	/*
371 	 * Avoid taking any other locks under MES lock to avoid circular
372 	 * lock dependencies.
373 	 */
374 	amdgpu_mes_lock(&adev->mes);
375 
376 	process = idr_find(&adev->mes.pasid_idr, pasid);
377 	if (!process) {
378 		DRM_ERROR("pasid %d doesn't exist\n", pasid);
379 		r = -EINVAL;
380 		goto clean_up_ctx;
381 	}
382 
383 	/* add the mes gang to idr list */
384 	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
385 		      GFP_KERNEL);
386 	if (r < 0) {
387 		DRM_ERROR("failed to allocate idr for gang\n");
388 		goto clean_up_ctx;
389 	}
390 
391 	gang->gang_id = r;
392 	*gang_id = r;
393 
394 	INIT_LIST_HEAD(&gang->queue_list);
395 	gang->process = process;
396 	gang->priority = gprops->priority;
397 	gang->gang_quantum = gprops->gang_quantum ?
398 		gprops->gang_quantum : adev->mes.default_gang_quantum;
399 	gang->global_priority_level = gprops->global_priority_level;
400 	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
401 	list_add_tail(&gang->list, &process->gang_list);
402 
403 	amdgpu_mes_unlock(&adev->mes);
404 	return 0;
405 
406 clean_up_ctx:
407 	amdgpu_mes_unlock(&adev->mes);
408 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
409 			      &gang->gang_ctx_gpu_addr,
410 			      &gang->gang_ctx_cpu_ptr);
411 clean_up_mem:
412 	kfree(gang);
413 	return r;
414 }
415 
416 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
417 {
418 	struct amdgpu_mes_gang *gang;
419 
420 	/*
421 	 * Avoid taking any other locks under MES lock to avoid circular
422 	 * lock dependencies.
423 	 */
424 	amdgpu_mes_lock(&adev->mes);
425 
426 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
427 	if (!gang) {
428 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
429 		amdgpu_mes_unlock(&adev->mes);
430 		return -EINVAL;
431 	}
432 
433 	if (!list_empty(&gang->queue_list)) {
434 		DRM_ERROR("queue list is not empty\n");
435 		amdgpu_mes_unlock(&adev->mes);
436 		return -EBUSY;
437 	}
438 
439 	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
440 	list_del(&gang->list);
441 	amdgpu_mes_unlock(&adev->mes);
442 
443 	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
444 			      &gang->gang_ctx_gpu_addr,
445 			      &gang->gang_ctx_cpu_ptr);
446 
447 	kfree(gang);
448 
449 	return 0;
450 }
451 
452 int amdgpu_mes_suspend(struct amdgpu_device *adev)
453 {
454 	struct idr *idp;
455 	struct amdgpu_mes_process *process;
456 	struct amdgpu_mes_gang *gang;
457 	struct mes_suspend_gang_input input;
458 	int r, pasid;
459 
460 	/*
461 	 * Avoid taking any other locks under MES lock to avoid circular
462 	 * lock dependencies.
463 	 */
464 	amdgpu_mes_lock(&adev->mes);
465 
466 	idp = &adev->mes.pasid_idr;
467 
468 	idr_for_each_entry(idp, process, pasid) {
469 		list_for_each_entry(gang, &process->gang_list, list) {
470 			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
471 			if (r)
472 				DRM_ERROR("failed to suspend pasid %d gangid %d",
473 					 pasid, gang->gang_id);
474 		}
475 	}
476 
477 	amdgpu_mes_unlock(&adev->mes);
478 	return 0;
479 }
480 
481 int amdgpu_mes_resume(struct amdgpu_device *adev)
482 {
483 	struct idr *idp;
484 	struct amdgpu_mes_process *process;
485 	struct amdgpu_mes_gang *gang;
486 	struct mes_resume_gang_input input;
487 	int r, pasid;
488 
489 	/*
490 	 * Avoid taking any other locks under MES lock to avoid circular
491 	 * lock dependencies.
492 	 */
493 	amdgpu_mes_lock(&adev->mes);
494 
495 	idp = &adev->mes.pasid_idr;
496 
497 	idr_for_each_entry(idp, process, pasid) {
498 		list_for_each_entry(gang, &process->gang_list, list) {
499 			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
500 			if (r)
501 				DRM_ERROR("failed to resume pasid %d gangid %d",
502 					 pasid, gang->gang_id);
503 		}
504 	}
505 
506 	amdgpu_mes_unlock(&adev->mes);
507 	return 0;
508 }
509 
510 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
511 				     struct amdgpu_mes_queue *q,
512 				     struct amdgpu_mes_queue_properties *p)
513 {
514 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
515 	u32 mqd_size = mqd_mgr->mqd_size;
516 	int r;
517 
518 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
519 				    AMDGPU_GEM_DOMAIN_GTT,
520 				    &q->mqd_obj,
521 				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
522 	if (r) {
523 		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
524 		return r;
525 	}
526 	memset(q->mqd_cpu_ptr, 0, mqd_size);
527 
528 	r = amdgpu_bo_reserve(q->mqd_obj, false);
529 	if (unlikely(r != 0))
530 		goto clean_up;
531 
532 	return 0;
533 
534 clean_up:
535 	amdgpu_bo_free_kernel(&q->mqd_obj,
536 			      &q->mqd_gpu_addr,
537 			      &q->mqd_cpu_ptr);
538 	return r;
539 }
540 
541 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
542 				     struct amdgpu_mes_queue *q,
543 				     struct amdgpu_mes_queue_properties *p)
544 {
545 	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
546 	struct amdgpu_mqd_prop mqd_prop = {0};
547 
548 	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
549 	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
550 	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
551 	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
552 	mqd_prop.queue_size = p->queue_size;
553 	mqd_prop.use_doorbell = true;
554 	mqd_prop.doorbell_index = p->doorbell_off;
555 	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
556 	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
557 	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
558 	mqd_prop.hqd_active = false;
559 
560 	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
561 
562 	amdgpu_bo_unreserve(q->mqd_obj);
563 }
564 
565 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
566 			    struct amdgpu_mes_queue_properties *qprops,
567 			    int *queue_id)
568 {
569 	struct amdgpu_mes_queue *queue;
570 	struct amdgpu_mes_gang *gang;
571 	struct mes_add_queue_input queue_input;
572 	unsigned long flags;
573 	int r;
574 
575 	memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
576 
577 	/* allocate the mes queue buffer */
578 	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
579 	if (!queue) {
580 		DRM_ERROR("Failed to allocate memory for queue\n");
581 		return -ENOMEM;
582 	}
583 
584 	/* Allocate the queue mqd */
585 	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
586 	if (r)
587 		goto clean_up_memory;
588 
589 	/*
590 	 * Avoid taking any other locks under MES lock to avoid circular
591 	 * lock dependencies.
592 	 */
593 	amdgpu_mes_lock(&adev->mes);
594 
595 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
596 	if (!gang) {
597 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
598 		r = -EINVAL;
599 		goto clean_up_mqd;
600 	}
601 
602 	/* add the mes gang to idr list */
603 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
604 	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
605 		      GFP_ATOMIC);
606 	if (r < 0) {
607 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
608 		goto clean_up_mqd;
609 	}
610 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
611 	*queue_id = queue->queue_id = r;
612 
613 	/* allocate a doorbell index for the queue */
614 	r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
615 					  qprops->queue_type,
616 					  &qprops->doorbell_off);
617 	if (r)
618 		goto clean_up_queue_id;
619 
620 	/* initialize the queue mqd */
621 	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
622 
623 	/* add hw queue to mes */
624 	queue_input.process_id = gang->process->pasid;
625 
626 	queue_input.page_table_base_addr =
627 		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
628 		adev->gmc.vram_start;
629 
630 	queue_input.process_va_start = 0;
631 	queue_input.process_va_end =
632 		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
633 	queue_input.process_quantum = gang->process->process_quantum;
634 	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
635 	queue_input.gang_quantum = gang->gang_quantum;
636 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
637 	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
638 	queue_input.gang_global_priority_level = gang->global_priority_level;
639 	queue_input.doorbell_offset = qprops->doorbell_off;
640 	queue_input.mqd_addr = queue->mqd_gpu_addr;
641 	queue_input.wptr_addr = qprops->wptr_gpu_addr;
642 	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
643 	queue_input.queue_type = qprops->queue_type;
644 	queue_input.paging = qprops->paging;
645 	queue_input.is_kfd_process = 0;
646 
647 	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
648 	if (r) {
649 		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
650 			  qprops->doorbell_off);
651 		goto clean_up_doorbell;
652 	}
653 
654 	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
655 		  "queue type=%d, doorbell=0x%llx\n",
656 		  gang->process->pasid, gang_id, qprops->queue_type,
657 		  qprops->doorbell_off);
658 
659 	queue->ring = qprops->ring;
660 	queue->doorbell_off = qprops->doorbell_off;
661 	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
662 	queue->queue_type = qprops->queue_type;
663 	queue->paging = qprops->paging;
664 	queue->gang = gang;
665 	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
666 	list_add_tail(&queue->list, &gang->queue_list);
667 
668 	amdgpu_mes_unlock(&adev->mes);
669 	return 0;
670 
671 clean_up_doorbell:
672 	amdgpu_mes_kernel_doorbell_free(adev, gang->process,
673 				       qprops->doorbell_off);
674 clean_up_queue_id:
675 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
676 	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
677 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
678 clean_up_mqd:
679 	amdgpu_mes_unlock(&adev->mes);
680 	amdgpu_mes_queue_free_mqd(queue);
681 clean_up_memory:
682 	kfree(queue);
683 	return r;
684 }
685 
686 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
687 {
688 	unsigned long flags;
689 	struct amdgpu_mes_queue *queue;
690 	struct amdgpu_mes_gang *gang;
691 	struct mes_remove_queue_input queue_input;
692 	int r;
693 
694 	/*
695 	 * Avoid taking any other locks under MES lock to avoid circular
696 	 * lock dependencies.
697 	 */
698 	amdgpu_mes_lock(&adev->mes);
699 
700 	/* remove the mes gang from idr list */
701 	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
702 
703 	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
704 	if (!queue) {
705 		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
706 		amdgpu_mes_unlock(&adev->mes);
707 		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
708 		return -EINVAL;
709 	}
710 
711 	idr_remove(&adev->mes.queue_id_idr, queue_id);
712 	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
713 
714 	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
715 		  queue->doorbell_off);
716 
717 	gang = queue->gang;
718 	queue_input.doorbell_offset = queue->doorbell_off;
719 	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
720 
721 	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
722 	if (r)
723 		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
724 			  queue_id);
725 
726 	list_del(&queue->list);
727 	amdgpu_mes_kernel_doorbell_free(adev, gang->process,
728 				       queue->doorbell_off);
729 	amdgpu_mes_unlock(&adev->mes);
730 
731 	amdgpu_mes_queue_free_mqd(queue);
732 	kfree(queue);
733 	return 0;
734 }
735 
736 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
737 				  struct amdgpu_ring *ring,
738 				  enum amdgpu_unmap_queues_action action,
739 				  u64 gpu_addr, u64 seq)
740 {
741 	struct mes_unmap_legacy_queue_input queue_input;
742 	int r;
743 
744 	queue_input.action = action;
745 	queue_input.queue_type = ring->funcs->type;
746 	queue_input.doorbell_offset = ring->doorbell_index;
747 	queue_input.pipe_id = ring->pipe;
748 	queue_input.queue_id = ring->queue;
749 	queue_input.trail_fence_addr = gpu_addr;
750 	queue_input.trail_fence_data = seq;
751 
752 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
753 	if (r)
754 		DRM_ERROR("failed to unmap legacy queue\n");
755 
756 	return r;
757 }
758 
759 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
760 {
761 	struct mes_misc_op_input op_input;
762 	int r, val = 0;
763 
764 	op_input.op = MES_MISC_OP_READ_REG;
765 	op_input.read_reg.reg_offset = reg;
766 	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
767 
768 	if (!adev->mes.funcs->misc_op) {
769 		DRM_ERROR("mes rreg is not supported!\n");
770 		goto error;
771 	}
772 
773 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
774 	if (r)
775 		DRM_ERROR("failed to read reg (0x%x)\n", reg);
776 	else
777 		val = *(adev->mes.read_val_ptr);
778 
779 error:
780 	return val;
781 }
782 
783 int amdgpu_mes_wreg(struct amdgpu_device *adev,
784 		    uint32_t reg, uint32_t val)
785 {
786 	struct mes_misc_op_input op_input;
787 	int r;
788 
789 	op_input.op = MES_MISC_OP_WRITE_REG;
790 	op_input.write_reg.reg_offset = reg;
791 	op_input.write_reg.reg_value = val;
792 
793 	if (!adev->mes.funcs->misc_op) {
794 		DRM_ERROR("mes wreg is not supported!\n");
795 		r = -EINVAL;
796 		goto error;
797 	}
798 
799 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
800 	if (r)
801 		DRM_ERROR("failed to write reg (0x%x)\n", reg);
802 
803 error:
804 	return r;
805 }
806 
807 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
808 				  uint32_t reg0, uint32_t reg1,
809 				  uint32_t ref, uint32_t mask)
810 {
811 	struct mes_misc_op_input op_input;
812 	int r;
813 
814 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
815 	op_input.wrm_reg.reg0 = reg0;
816 	op_input.wrm_reg.reg1 = reg1;
817 	op_input.wrm_reg.ref = ref;
818 	op_input.wrm_reg.mask = mask;
819 
820 	if (!adev->mes.funcs->misc_op) {
821 		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
822 		r = -EINVAL;
823 		goto error;
824 	}
825 
826 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
827 	if (r)
828 		DRM_ERROR("failed to reg_write_reg_wait\n");
829 
830 error:
831 	return r;
832 }
833 
834 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
835 			uint32_t val, uint32_t mask)
836 {
837 	struct mes_misc_op_input op_input;
838 	int r;
839 
840 	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
841 	op_input.wrm_reg.reg0 = reg;
842 	op_input.wrm_reg.ref = val;
843 	op_input.wrm_reg.mask = mask;
844 
845 	if (!adev->mes.funcs->misc_op) {
846 		DRM_ERROR("mes reg wait is not supported!\n");
847 		r = -EINVAL;
848 		goto error;
849 	}
850 
851 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
852 	if (r)
853 		DRM_ERROR("failed to reg_write_reg_wait\n");
854 
855 error:
856 	return r;
857 }
858 
859 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
860 				uint64_t process_context_addr,
861 				uint32_t spi_gdbg_per_vmid_cntl,
862 				const uint32_t *tcp_watch_cntl,
863 				uint32_t flags,
864 				bool trap_en)
865 {
866 	struct mes_misc_op_input op_input = {0};
867 	int r;
868 
869 	if (!adev->mes.funcs->misc_op) {
870 		DRM_ERROR("mes set shader debugger is not supported!\n");
871 		return -EINVAL;
872 	}
873 
874 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
875 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
876 	op_input.set_shader_debugger.flags.u32all = flags;
877 	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
878 	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
879 			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
880 
881 	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
882 			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
883 		op_input.set_shader_debugger.trap_en = trap_en;
884 
885 	amdgpu_mes_lock(&adev->mes);
886 
887 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
888 	if (r)
889 		DRM_ERROR("failed to set_shader_debugger\n");
890 
891 	amdgpu_mes_unlock(&adev->mes);
892 
893 	return r;
894 }
895 
896 static void
897 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
898 			       struct amdgpu_ring *ring,
899 			       struct amdgpu_mes_queue_properties *props)
900 {
901 	props->queue_type = ring->funcs->type;
902 	props->hqd_base_gpu_addr = ring->gpu_addr;
903 	props->rptr_gpu_addr = ring->rptr_gpu_addr;
904 	props->wptr_gpu_addr = ring->wptr_gpu_addr;
905 	props->wptr_mc_addr =
906 		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
907 	props->queue_size = ring->ring_size;
908 	props->eop_gpu_addr = ring->eop_gpu_addr;
909 	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
910 	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
911 	props->paging = false;
912 	props->ring = ring;
913 }
914 
915 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
916 do {									\
917        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
918 		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
919 				_eng[ring->idx].slots[id_offs]);        \
920        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
921 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
922 				_eng[ring->idx].ring);                  \
923        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
924 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
925 				_eng[ring->idx].ib);                    \
926        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
927 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
928 				_eng[ring->idx].padding);               \
929 } while(0)
930 
931 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
932 {
933 	switch (ring->funcs->type) {
934 	case AMDGPU_RING_TYPE_GFX:
935 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
936 		break;
937 	case AMDGPU_RING_TYPE_COMPUTE:
938 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
939 		break;
940 	case AMDGPU_RING_TYPE_SDMA:
941 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
942 		break;
943 	default:
944 		break;
945 	}
946 
947 	WARN_ON(1);
948 	return -EINVAL;
949 }
950 
951 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
952 			int queue_type, int idx,
953 			struct amdgpu_mes_ctx_data *ctx_data,
954 			struct amdgpu_ring **out)
955 {
956 	struct amdgpu_ring *ring;
957 	struct amdgpu_mes_gang *gang;
958 	struct amdgpu_mes_queue_properties qprops = {0};
959 	int r, queue_id, pasid;
960 
961 	/*
962 	 * Avoid taking any other locks under MES lock to avoid circular
963 	 * lock dependencies.
964 	 */
965 	amdgpu_mes_lock(&adev->mes);
966 	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
967 	if (!gang) {
968 		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
969 		amdgpu_mes_unlock(&adev->mes);
970 		return -EINVAL;
971 	}
972 	pasid = gang->process->pasid;
973 
974 	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
975 	if (!ring) {
976 		amdgpu_mes_unlock(&adev->mes);
977 		return -ENOMEM;
978 	}
979 
980 	ring->ring_obj = NULL;
981 	ring->use_doorbell = true;
982 	ring->is_mes_queue = true;
983 	ring->mes_ctx = ctx_data;
984 	ring->idx = idx;
985 	ring->no_scheduler = true;
986 
987 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
988 		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
989 				      compute[ring->idx].mec_hpd);
990 		ring->eop_gpu_addr =
991 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
992 	}
993 
994 	switch (queue_type) {
995 	case AMDGPU_RING_TYPE_GFX:
996 		ring->funcs = adev->gfx.gfx_ring[0].funcs;
997 		break;
998 	case AMDGPU_RING_TYPE_COMPUTE:
999 		ring->funcs = adev->gfx.compute_ring[0].funcs;
1000 		break;
1001 	case AMDGPU_RING_TYPE_SDMA:
1002 		ring->funcs = adev->sdma.instance[0].ring.funcs;
1003 		break;
1004 	default:
1005 		BUG();
1006 	}
1007 
1008 	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1009 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1010 	if (r)
1011 		goto clean_up_memory;
1012 
1013 	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1014 
1015 	dma_fence_wait(gang->process->vm->last_update, false);
1016 	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1017 	amdgpu_mes_unlock(&adev->mes);
1018 
1019 	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1020 	if (r)
1021 		goto clean_up_ring;
1022 
1023 	ring->hw_queue_id = queue_id;
1024 	ring->doorbell_index = qprops.doorbell_off;
1025 
1026 	if (queue_type == AMDGPU_RING_TYPE_GFX)
1027 		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1028 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1029 		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1030 			queue_id);
1031 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1032 		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1033 			queue_id);
1034 	else
1035 		BUG();
1036 
1037 	*out = ring;
1038 	return 0;
1039 
1040 clean_up_ring:
1041 	amdgpu_ring_fini(ring);
1042 clean_up_memory:
1043 	kfree(ring);
1044 	amdgpu_mes_unlock(&adev->mes);
1045 	return r;
1046 }
1047 
1048 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1049 			    struct amdgpu_ring *ring)
1050 {
1051 	if (!ring)
1052 		return;
1053 
1054 	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1055 	amdgpu_ring_fini(ring);
1056 	kfree(ring);
1057 }
1058 
1059 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1060 						   enum amdgpu_mes_priority_level prio)
1061 {
1062 	return adev->mes.aggregated_doorbells[prio];
1063 }
1064 
1065 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1066 				   struct amdgpu_mes_ctx_data *ctx_data)
1067 {
1068 	int r;
1069 
1070 	r = amdgpu_bo_create_kernel(adev,
1071 			    sizeof(struct amdgpu_mes_ctx_meta_data),
1072 			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1073 			    &ctx_data->meta_data_obj,
1074 			    &ctx_data->meta_data_mc_addr,
1075 			    &ctx_data->meta_data_ptr);
1076 	if (r) {
1077 		dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1078 		return r;
1079 	}
1080 
1081 	if (!ctx_data->meta_data_obj)
1082 		return -ENOMEM;
1083 
1084 	memset(ctx_data->meta_data_ptr, 0,
1085 	       sizeof(struct amdgpu_mes_ctx_meta_data));
1086 
1087 	return 0;
1088 }
1089 
1090 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1091 {
1092 	if (ctx_data->meta_data_obj)
1093 		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1094 				      &ctx_data->meta_data_mc_addr,
1095 				      &ctx_data->meta_data_ptr);
1096 }
1097 
1098 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1099 				 struct amdgpu_vm *vm,
1100 				 struct amdgpu_mes_ctx_data *ctx_data)
1101 {
1102 	struct amdgpu_bo_va *bo_va;
1103 	struct amdgpu_sync sync;
1104 	struct drm_exec exec;
1105 	int r;
1106 
1107 	amdgpu_sync_create(&sync);
1108 
1109 	drm_exec_init(&exec, 0);
1110 	drm_exec_until_all_locked(&exec) {
1111 		r = drm_exec_lock_obj(&exec,
1112 				      &ctx_data->meta_data_obj->tbo.base);
1113 		drm_exec_retry_on_contention(&exec);
1114 		if (unlikely(r))
1115 			goto error_fini_exec;
1116 
1117 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1118 		drm_exec_retry_on_contention(&exec);
1119 		if (unlikely(r))
1120 			goto error_fini_exec;
1121 	}
1122 
1123 	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1124 	if (!bo_va) {
1125 		DRM_ERROR("failed to create bo_va for meta data BO\n");
1126 		r = -ENOMEM;
1127 		goto error_fini_exec;
1128 	}
1129 
1130 	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1131 			     sizeof(struct amdgpu_mes_ctx_meta_data),
1132 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1133 			     AMDGPU_PTE_EXECUTABLE);
1134 
1135 	if (r) {
1136 		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1137 		goto error_del_bo_va;
1138 	}
1139 
1140 	r = amdgpu_vm_bo_update(adev, bo_va, false);
1141 	if (r) {
1142 		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1143 		goto error_del_bo_va;
1144 	}
1145 	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1146 
1147 	r = amdgpu_vm_update_pdes(adev, vm, false);
1148 	if (r) {
1149 		DRM_ERROR("failed to update pdes on meta data\n");
1150 		goto error_del_bo_va;
1151 	}
1152 	amdgpu_sync_fence(&sync, vm->last_update);
1153 
1154 	amdgpu_sync_wait(&sync, false);
1155 	drm_exec_fini(&exec);
1156 
1157 	amdgpu_sync_free(&sync);
1158 	ctx_data->meta_data_va = bo_va;
1159 	return 0;
1160 
1161 error_del_bo_va:
1162 	amdgpu_vm_bo_del(adev, bo_va);
1163 
1164 error_fini_exec:
1165 	drm_exec_fini(&exec);
1166 	amdgpu_sync_free(&sync);
1167 	return r;
1168 }
1169 
1170 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1171 				   struct amdgpu_mes_ctx_data *ctx_data)
1172 {
1173 	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1174 	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1175 	struct amdgpu_vm *vm = bo_va->base.vm;
1176 	struct dma_fence *fence;
1177 	struct drm_exec exec;
1178 	long r;
1179 
1180 	drm_exec_init(&exec, 0);
1181 	drm_exec_until_all_locked(&exec) {
1182 		r = drm_exec_lock_obj(&exec,
1183 				      &ctx_data->meta_data_obj->tbo.base);
1184 		drm_exec_retry_on_contention(&exec);
1185 		if (unlikely(r))
1186 			goto out_unlock;
1187 
1188 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1189 		drm_exec_retry_on_contention(&exec);
1190 		if (unlikely(r))
1191 			goto out_unlock;
1192 	}
1193 
1194 	amdgpu_vm_bo_del(adev, bo_va);
1195 	if (!amdgpu_vm_ready(vm))
1196 		goto out_unlock;
1197 
1198 	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1199 				   &fence);
1200 	if (r)
1201 		goto out_unlock;
1202 	if (fence) {
1203 		amdgpu_bo_fence(bo, fence, true);
1204 		fence = NULL;
1205 	}
1206 
1207 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1208 	if (r || !fence)
1209 		goto out_unlock;
1210 
1211 	dma_fence_wait(fence, false);
1212 	amdgpu_bo_fence(bo, fence, true);
1213 	dma_fence_put(fence);
1214 
1215 out_unlock:
1216 	if (unlikely(r < 0))
1217 		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1218 	drm_exec_fini(&exec);
1219 
1220 	return r;
1221 }
1222 
1223 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1224 					  int pasid, int *gang_id,
1225 					  int queue_type, int num_queue,
1226 					  struct amdgpu_ring **added_rings,
1227 					  struct amdgpu_mes_ctx_data *ctx_data)
1228 {
1229 	struct amdgpu_ring *ring;
1230 	struct amdgpu_mes_gang_properties gprops = {0};
1231 	int r, j;
1232 
1233 	/* create a gang for the process */
1234 	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1235 	gprops.gang_quantum = adev->mes.default_gang_quantum;
1236 	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1237 	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1238 	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1239 
1240 	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1241 	if (r) {
1242 		DRM_ERROR("failed to add gang\n");
1243 		return r;
1244 	}
1245 
1246 	/* create queues for the gang */
1247 	for (j = 0; j < num_queue; j++) {
1248 		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1249 					ctx_data, &ring);
1250 		if (r) {
1251 			DRM_ERROR("failed to add ring\n");
1252 			break;
1253 		}
1254 
1255 		DRM_INFO("ring %s was added\n", ring->name);
1256 		added_rings[j] = ring;
1257 	}
1258 
1259 	return 0;
1260 }
1261 
1262 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1263 {
1264 	struct amdgpu_ring *ring;
1265 	int i, r;
1266 
1267 	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1268 		ring = added_rings[i];
1269 		if (!ring)
1270 			continue;
1271 
1272 		r = amdgpu_ring_test_helper(ring);
1273 		if (r)
1274 			return r;
1275 
1276 		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1277 		if (r) {
1278 			DRM_DEV_ERROR(ring->adev->dev,
1279 				      "ring %s ib test failed (%d)\n",
1280 				      ring->name, r);
1281 			return r;
1282 		} else
1283 			DRM_INFO("ring %s ib test pass\n", ring->name);
1284 	}
1285 
1286 	return 0;
1287 }
1288 
1289 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1290 {
1291 	struct amdgpu_vm *vm = NULL;
1292 	struct amdgpu_mes_ctx_data ctx_data = {0};
1293 	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1294 	int gang_ids[3] = {0};
1295 	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1296 				 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1297 				 { AMDGPU_RING_TYPE_SDMA, 1} };
1298 	int i, r, pasid, k = 0;
1299 
1300 	pasid = amdgpu_pasid_alloc(16);
1301 	if (pasid < 0) {
1302 		dev_warn(adev->dev, "No more PASIDs available!");
1303 		pasid = 0;
1304 	}
1305 
1306 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1307 	if (!vm) {
1308 		r = -ENOMEM;
1309 		goto error_pasid;
1310 	}
1311 
1312 	r = amdgpu_vm_init(adev, vm, -1);
1313 	if (r) {
1314 		DRM_ERROR("failed to initialize vm\n");
1315 		goto error_pasid;
1316 	}
1317 
1318 	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1319 	if (r) {
1320 		DRM_ERROR("failed to alloc ctx meta data\n");
1321 		goto error_fini;
1322 	}
1323 
1324 	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1325 	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1326 	if (r) {
1327 		DRM_ERROR("failed to map ctx meta data\n");
1328 		goto error_vm;
1329 	}
1330 
1331 	r = amdgpu_mes_create_process(adev, pasid, vm);
1332 	if (r) {
1333 		DRM_ERROR("failed to create MES process\n");
1334 		goto error_vm;
1335 	}
1336 
1337 	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1338 		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1339 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1340 			    IP_VERSION(10, 3, 0) &&
1341 		    amdgpu_ip_version(adev, GC_HWIP, 0) <
1342 			    IP_VERSION(11, 0, 0) &&
1343 		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1344 			continue;
1345 
1346 		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1347 							   &gang_ids[i],
1348 							   queue_types[i][0],
1349 							   queue_types[i][1],
1350 							   &added_rings[k],
1351 							   &ctx_data);
1352 		if (r)
1353 			goto error_queues;
1354 
1355 		k += queue_types[i][1];
1356 	}
1357 
1358 	/* start ring test and ib test for MES queues */
1359 	amdgpu_mes_test_queues(added_rings);
1360 
1361 error_queues:
1362 	/* remove all queues */
1363 	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1364 		if (!added_rings[i])
1365 			continue;
1366 		amdgpu_mes_remove_ring(adev, added_rings[i]);
1367 	}
1368 
1369 	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1370 		if (!gang_ids[i])
1371 			continue;
1372 		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1373 	}
1374 
1375 	amdgpu_mes_destroy_process(adev, pasid);
1376 
1377 error_vm:
1378 	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1379 
1380 error_fini:
1381 	amdgpu_vm_fini(adev, vm);
1382 
1383 error_pasid:
1384 	if (pasid)
1385 		amdgpu_pasid_free(pasid);
1386 
1387 	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1388 	kfree(vm);
1389 	return 0;
1390 }
1391 
1392 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1393 {
1394 	const struct mes_firmware_header_v1_0 *mes_hdr;
1395 	struct amdgpu_firmware_info *info;
1396 	char ucode_prefix[30];
1397 	char fw_name[40];
1398 	bool need_retry = false;
1399 	int r;
1400 
1401 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1402 				       sizeof(ucode_prefix));
1403 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1404 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1405 			 ucode_prefix,
1406 			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1407 		need_retry = true;
1408 	} else {
1409 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1410 			 ucode_prefix,
1411 			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1412 	}
1413 
1414 	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1415 	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1416 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1417 			 ucode_prefix);
1418 		DRM_INFO("try to fall back to %s\n", fw_name);
1419 		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1420 					 fw_name);
1421 	}
1422 
1423 	if (r)
1424 		goto out;
1425 
1426 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
1427 		adev->mes.fw[pipe]->data;
1428 	adev->mes.uc_start_addr[pipe] =
1429 		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1430 		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1431 	adev->mes.data_start_addr[pipe] =
1432 		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1433 		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1434 
1435 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1436 		int ucode, ucode_data;
1437 
1438 		if (pipe == AMDGPU_MES_SCHED_PIPE) {
1439 			ucode = AMDGPU_UCODE_ID_CP_MES;
1440 			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1441 		} else {
1442 			ucode = AMDGPU_UCODE_ID_CP_MES1;
1443 			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1444 		}
1445 
1446 		info = &adev->firmware.ucode[ucode];
1447 		info->ucode_id = ucode;
1448 		info->fw = adev->mes.fw[pipe];
1449 		adev->firmware.fw_size +=
1450 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1451 			      PAGE_SIZE);
1452 
1453 		info = &adev->firmware.ucode[ucode_data];
1454 		info->ucode_id = ucode_data;
1455 		info->fw = adev->mes.fw[pipe];
1456 		adev->firmware.fw_size +=
1457 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1458 			      PAGE_SIZE);
1459 	}
1460 
1461 	return 0;
1462 out:
1463 	amdgpu_ucode_release(&adev->mes.fw[pipe]);
1464 	return r;
1465 }
1466