xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c (revision cfc4ca8986bb1f6182da6cd7bb57f228590b4643)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26 
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31 
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34 
35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 		       PAGE_SIZE);
40 }
41 
42 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
43 {
44 	int i;
45 	struct amdgpu_mes *mes = &adev->mes;
46 
47 	/* Bitmap for dynamic allocation of kernel doorbells */
48 	mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
49 	if (!mes->doorbell_bitmap) {
50 		DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
51 		return -ENOMEM;
52 	}
53 
54 	mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
55 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
56 		adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
57 		set_bit(i, mes->doorbell_bitmap);
58 	}
59 
60 	return 0;
61 }
62 
63 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
64 {
65 	int r;
66 
67 	if (!amdgpu_mes_log_enable)
68 		return 0;
69 
70 	r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
71 				    AMDGPU_GEM_DOMAIN_VRAM,
72 				    &adev->mes.event_log_gpu_obj,
73 				    &adev->mes.event_log_gpu_addr,
74 				    &adev->mes.event_log_cpu_addr);
75 	if (r) {
76 		dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
77 		return r;
78 	}
79 
80 	memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
81 
82 	return  0;
83 
84 }
85 
86 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
87 {
88 	bitmap_free(adev->mes.doorbell_bitmap);
89 }
90 
91 int amdgpu_mes_init(struct amdgpu_device *adev)
92 {
93 	int i, r, num_pipes;
94 
95 	adev->mes.adev = adev;
96 
97 	idr_init(&adev->mes.pasid_idr);
98 	idr_init(&adev->mes.gang_id_idr);
99 	idr_init(&adev->mes.queue_id_idr);
100 	ida_init(&adev->mes.doorbell_ida);
101 	spin_lock_init(&adev->mes.queue_id_lock);
102 	mutex_init(&adev->mes.mutex_hidden);
103 
104 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
105 		spin_lock_init(&adev->mes.ring_lock[i]);
106 
107 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
108 	adev->mes.vmid_mask_mmhub = 0xffffff00;
109 	adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00;
110 
111 	num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
112 	if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
113 		dev_warn(adev->dev, "more gfx pipes than supported by MES! (%d vs %d)\n",
114 			 num_pipes, AMDGPU_MES_MAX_GFX_PIPES);
115 
116 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) {
117 		if (i >= num_pipes)
118 			break;
119 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
120 		    IP_VERSION(12, 0, 0))
121 			/*
122 			 * GFX V12 has only one GFX pipe, but 8 queues in it.
123 			 * GFX pipe 0 queue 0 is being used by Kernel queue.
124 			 * Set GFX pipe 0 queue 1-7 for MES scheduling
125 			 * mask = 1111 1110b
126 			 */
127 			adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0xFF : 0xFE;
128 		else
129 			/*
130 			 * GFX pipe 0 queue 0 is being used by Kernel queue.
131 			 * Set GFX pipe 0 queue 1 for MES scheduling
132 			 * mask = 10b
133 			 */
134 			adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0x3 : 0x2;
135 	}
136 
137 	num_pipes = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec;
138 	if (num_pipes > AMDGPU_MES_MAX_COMPUTE_PIPES)
139 		dev_warn(adev->dev, "more compute pipes than supported by MES! (%d vs %d)\n",
140 			 num_pipes, AMDGPU_MES_MAX_COMPUTE_PIPES);
141 
142 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
143 		if (i >= num_pipes)
144 			break;
145 		adev->mes.compute_hqd_mask[i] = adev->gfx.disable_kq ? 0xF : 0xC;
146 	}
147 
148 	num_pipes = adev->sdma.num_instances;
149 	if (num_pipes > AMDGPU_MES_MAX_SDMA_PIPES)
150 		dev_warn(adev->dev, "more SDMA pipes than supported by MES! (%d vs %d)\n",
151 			 num_pipes, AMDGPU_MES_MAX_SDMA_PIPES);
152 
153 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
154 		if (i >= num_pipes)
155 			break;
156 		adev->mes.sdma_hqd_mask[i] = 0xfc;
157 	}
158 
159 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
160 		r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
161 		if (r) {
162 			dev_err(adev->dev,
163 				"(%d) ring trail_fence_offs wb alloc failed\n",
164 				r);
165 			goto error;
166 		}
167 		adev->mes.sch_ctx_gpu_addr[i] =
168 			adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
169 		adev->mes.sch_ctx_ptr[i] =
170 			(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
171 
172 		r = amdgpu_device_wb_get(adev,
173 				 &adev->mes.query_status_fence_offs[i]);
174 		if (r) {
175 			dev_err(adev->dev,
176 			      "(%d) query_status_fence_offs wb alloc failed\n",
177 			      r);
178 			goto error;
179 		}
180 		adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
181 			(adev->mes.query_status_fence_offs[i] * 4);
182 		adev->mes.query_status_fence_ptr[i] =
183 			(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
184 	}
185 
186 	r = amdgpu_mes_doorbell_init(adev);
187 	if (r)
188 		goto error;
189 
190 	r = amdgpu_mes_event_log_init(adev);
191 	if (r)
192 		goto error_doorbell;
193 
194 	return 0;
195 
196 error_doorbell:
197 	amdgpu_mes_doorbell_free(adev);
198 error:
199 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
200 		if (adev->mes.sch_ctx_ptr[i])
201 			amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
202 		if (adev->mes.query_status_fence_ptr[i])
203 			amdgpu_device_wb_free(adev,
204 				      adev->mes.query_status_fence_offs[i]);
205 	}
206 
207 	idr_destroy(&adev->mes.pasid_idr);
208 	idr_destroy(&adev->mes.gang_id_idr);
209 	idr_destroy(&adev->mes.queue_id_idr);
210 	ida_destroy(&adev->mes.doorbell_ida);
211 	mutex_destroy(&adev->mes.mutex_hidden);
212 	return r;
213 }
214 
215 void amdgpu_mes_fini(struct amdgpu_device *adev)
216 {
217 	int i;
218 
219 	amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
220 			      &adev->mes.event_log_gpu_addr,
221 			      &adev->mes.event_log_cpu_addr);
222 
223 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
224 		if (adev->mes.sch_ctx_ptr[i])
225 			amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
226 		if (adev->mes.query_status_fence_ptr[i])
227 			amdgpu_device_wb_free(adev,
228 				      adev->mes.query_status_fence_offs[i]);
229 	}
230 
231 	amdgpu_mes_doorbell_free(adev);
232 
233 	idr_destroy(&adev->mes.pasid_idr);
234 	idr_destroy(&adev->mes.gang_id_idr);
235 	idr_destroy(&adev->mes.queue_id_idr);
236 	ida_destroy(&adev->mes.doorbell_ida);
237 	mutex_destroy(&adev->mes.mutex_hidden);
238 }
239 
240 int amdgpu_mes_suspend(struct amdgpu_device *adev)
241 {
242 	struct mes_suspend_gang_input input;
243 	int r;
244 
245 	if (!amdgpu_mes_suspend_resume_all_supported(adev))
246 		return 0;
247 
248 	memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
249 	input.suspend_all_gangs = 1;
250 
251 	/*
252 	 * Avoid taking any other locks under MES lock to avoid circular
253 	 * lock dependencies.
254 	 */
255 	amdgpu_mes_lock(&adev->mes);
256 	r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
257 	amdgpu_mes_unlock(&adev->mes);
258 	if (r)
259 		DRM_ERROR("failed to suspend all gangs");
260 
261 	return r;
262 }
263 
264 int amdgpu_mes_resume(struct amdgpu_device *adev)
265 {
266 	struct mes_resume_gang_input input;
267 	int r;
268 
269 	if (!amdgpu_mes_suspend_resume_all_supported(adev))
270 		return 0;
271 
272 	memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
273 	input.resume_all_gangs = 1;
274 
275 	/*
276 	 * Avoid taking any other locks under MES lock to avoid circular
277 	 * lock dependencies.
278 	 */
279 	amdgpu_mes_lock(&adev->mes);
280 	r = adev->mes.funcs->resume_gang(&adev->mes, &input);
281 	amdgpu_mes_unlock(&adev->mes);
282 	if (r)
283 		DRM_ERROR("failed to resume all gangs");
284 
285 	return r;
286 }
287 
288 int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
289 				struct amdgpu_ring *ring)
290 {
291 	struct mes_map_legacy_queue_input queue_input;
292 	int r;
293 
294 	memset(&queue_input, 0, sizeof(queue_input));
295 
296 	queue_input.queue_type = ring->funcs->type;
297 	queue_input.doorbell_offset = ring->doorbell_index;
298 	queue_input.pipe_id = ring->pipe;
299 	queue_input.queue_id = ring->queue;
300 	queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
301 	queue_input.wptr_addr = ring->wptr_gpu_addr;
302 
303 	r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
304 	if (r)
305 		DRM_ERROR("failed to map legacy queue\n");
306 
307 	return r;
308 }
309 
310 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
311 				  struct amdgpu_ring *ring,
312 				  enum amdgpu_unmap_queues_action action,
313 				  u64 gpu_addr, u64 seq)
314 {
315 	struct mes_unmap_legacy_queue_input queue_input;
316 	int r;
317 
318 	queue_input.action = action;
319 	queue_input.queue_type = ring->funcs->type;
320 	queue_input.doorbell_offset = ring->doorbell_index;
321 	queue_input.pipe_id = ring->pipe;
322 	queue_input.queue_id = ring->queue;
323 	queue_input.trail_fence_addr = gpu_addr;
324 	queue_input.trail_fence_data = seq;
325 
326 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
327 	if (r)
328 		DRM_ERROR("failed to unmap legacy queue\n");
329 
330 	return r;
331 }
332 
333 int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
334 				  struct amdgpu_ring *ring,
335 				  unsigned int vmid,
336 				  bool use_mmio)
337 {
338 	struct mes_reset_queue_input queue_input;
339 	int r;
340 
341 	memset(&queue_input, 0, sizeof(queue_input));
342 
343 	queue_input.queue_type = ring->funcs->type;
344 	queue_input.doorbell_offset = ring->doorbell_index;
345 	queue_input.me_id = ring->me;
346 	queue_input.pipe_id = ring->pipe;
347 	queue_input.queue_id = ring->queue;
348 	queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0;
349 	queue_input.wptr_addr = ring->wptr_gpu_addr;
350 	queue_input.vmid = vmid;
351 	queue_input.use_mmio = use_mmio;
352 	queue_input.is_kq = true;
353 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX)
354 		queue_input.legacy_gfx = true;
355 
356 	r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
357 	if (r)
358 		DRM_ERROR("failed to reset legacy queue\n");
359 
360 	return r;
361 }
362 
363 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
364 {
365 	struct mes_misc_op_input op_input;
366 	int r, val = 0;
367 	uint32_t addr_offset = 0;
368 	uint64_t read_val_gpu_addr;
369 	uint32_t *read_val_ptr;
370 
371 	if (amdgpu_device_wb_get(adev, &addr_offset)) {
372 		dev_err(adev->dev, "critical bug! too many mes readers\n");
373 		goto error;
374 	}
375 	read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
376 	read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset];
377 	op_input.op = MES_MISC_OP_READ_REG;
378 	op_input.read_reg.reg_offset = reg;
379 	op_input.read_reg.buffer_addr = read_val_gpu_addr;
380 
381 	if (!adev->mes.funcs->misc_op) {
382 		dev_err(adev->dev, "mes rreg is not supported!\n");
383 		goto error;
384 	}
385 
386 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
387 	if (r)
388 		dev_err(adev->dev, "failed to read reg (0x%x)\n", reg);
389 	else
390 		val = *(read_val_ptr);
391 
392 error:
393 	if (addr_offset)
394 		amdgpu_device_wb_free(adev, addr_offset);
395 	return val;
396 }
397 
398 int amdgpu_mes_wreg(struct amdgpu_device *adev,
399 		    uint32_t reg, uint32_t val)
400 {
401 	struct mes_misc_op_input op_input;
402 	int r;
403 
404 	op_input.op = MES_MISC_OP_WRITE_REG;
405 	op_input.write_reg.reg_offset = reg;
406 	op_input.write_reg.reg_value = val;
407 
408 	if (!adev->mes.funcs->misc_op) {
409 		dev_err(adev->dev, "mes wreg is not supported!\n");
410 		r = -EINVAL;
411 		goto error;
412 	}
413 
414 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
415 	if (r)
416 		dev_err(adev->dev, "failed to write reg (0x%x)\n", reg);
417 
418 error:
419 	return r;
420 }
421 
422 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
423 				  uint32_t reg0, uint32_t reg1,
424 				  uint32_t ref, uint32_t mask)
425 {
426 	struct mes_misc_op_input op_input;
427 	int r;
428 
429 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
430 	op_input.wrm_reg.reg0 = reg0;
431 	op_input.wrm_reg.reg1 = reg1;
432 	op_input.wrm_reg.ref = ref;
433 	op_input.wrm_reg.mask = mask;
434 
435 	if (!adev->mes.funcs->misc_op) {
436 		dev_err(adev->dev, "mes reg_write_reg_wait is not supported!\n");
437 		r = -EINVAL;
438 		goto error;
439 	}
440 
441 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
442 	if (r)
443 		dev_err(adev->dev, "failed to reg_write_reg_wait\n");
444 
445 error:
446 	return r;
447 }
448 
449 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
450 			uint32_t val, uint32_t mask)
451 {
452 	struct mes_misc_op_input op_input;
453 	int r;
454 
455 	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
456 	op_input.wrm_reg.reg0 = reg;
457 	op_input.wrm_reg.ref = val;
458 	op_input.wrm_reg.mask = mask;
459 
460 	if (!adev->mes.funcs->misc_op) {
461 		dev_err(adev->dev, "mes reg wait is not supported!\n");
462 		r = -EINVAL;
463 		goto error;
464 	}
465 
466 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
467 	if (r)
468 		dev_err(adev->dev, "failed to reg_write_reg_wait\n");
469 
470 error:
471 	return r;
472 }
473 
474 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
475 				uint64_t process_context_addr,
476 				uint32_t spi_gdbg_per_vmid_cntl,
477 				const uint32_t *tcp_watch_cntl,
478 				uint32_t flags,
479 				bool trap_en)
480 {
481 	struct mes_misc_op_input op_input = {0};
482 	int r;
483 
484 	if (!adev->mes.funcs->misc_op) {
485 		DRM_ERROR("mes set shader debugger is not supported!\n");
486 		return -EINVAL;
487 	}
488 
489 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
490 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
491 	op_input.set_shader_debugger.flags.u32all = flags;
492 
493 	/* use amdgpu mes_flush_shader_debugger instead */
494 	if (op_input.set_shader_debugger.flags.process_ctx_flush)
495 		return -EINVAL;
496 
497 	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
498 	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
499 			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
500 
501 	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
502 			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
503 		op_input.set_shader_debugger.trap_en = trap_en;
504 
505 	amdgpu_mes_lock(&adev->mes);
506 
507 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
508 	if (r)
509 		DRM_ERROR("failed to set_shader_debugger\n");
510 
511 	amdgpu_mes_unlock(&adev->mes);
512 
513 	return r;
514 }
515 
516 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
517 				     uint64_t process_context_addr)
518 {
519 	struct mes_misc_op_input op_input = {0};
520 	int r;
521 
522 	if (!adev->mes.funcs->misc_op) {
523 		DRM_ERROR("mes flush shader debugger is not supported!\n");
524 		return -EINVAL;
525 	}
526 
527 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
528 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
529 	op_input.set_shader_debugger.flags.process_ctx_flush = true;
530 
531 	amdgpu_mes_lock(&adev->mes);
532 
533 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
534 	if (r)
535 		DRM_ERROR("failed to set_shader_debugger\n");
536 
537 	amdgpu_mes_unlock(&adev->mes);
538 
539 	return r;
540 }
541 
542 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
543 do {									\
544        if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
545 		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
546 				_eng[ring->idx].slots[id_offs]);        \
547        else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
548 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
549 				_eng[ring->idx].ring);                  \
550        else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
551 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
552 				_eng[ring->idx].ib);                    \
553        else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
554 		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
555 				_eng[ring->idx].padding);               \
556 } while(0)
557 
558 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
559 {
560 	switch (ring->funcs->type) {
561 	case AMDGPU_RING_TYPE_GFX:
562 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
563 		break;
564 	case AMDGPU_RING_TYPE_COMPUTE:
565 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
566 		break;
567 	case AMDGPU_RING_TYPE_SDMA:
568 		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
569 		break;
570 	default:
571 		break;
572 	}
573 
574 	WARN_ON(1);
575 	return -EINVAL;
576 }
577 
578 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
579 						   enum amdgpu_mes_priority_level prio)
580 {
581 	return adev->mes.aggregated_doorbells[prio];
582 }
583 
584 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
585 {
586 	const struct mes_firmware_header_v1_0 *mes_hdr;
587 	struct amdgpu_firmware_info *info;
588 	char ucode_prefix[30];
589 	char fw_name[50];
590 	bool need_retry = false;
591 	u32 *ucode_ptr;
592 	int r;
593 
594 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
595 				       sizeof(ucode_prefix));
596 	if (adev->enable_uni_mes) {
597 		snprintf(fw_name, sizeof(fw_name),
598 			 "amdgpu/%s_uni_mes.bin", ucode_prefix);
599 	} else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
600 	    amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
601 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
602 			 ucode_prefix,
603 			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
604 		need_retry = true;
605 	} else {
606 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
607 			 ucode_prefix,
608 			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
609 	}
610 
611 	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], AMDGPU_UCODE_REQUIRED,
612 				 "%s", fw_name);
613 	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
614 		dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
615 		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
616 					 AMDGPU_UCODE_REQUIRED,
617 					 "amdgpu/%s_mes.bin", ucode_prefix);
618 	}
619 
620 	if (r)
621 		goto out;
622 
623 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
624 		adev->mes.fw[pipe]->data;
625 	adev->mes.uc_start_addr[pipe] =
626 		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
627 		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
628 	adev->mes.data_start_addr[pipe] =
629 		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
630 		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
631 	ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data +
632 			  sizeof(union amdgpu_firmware_header));
633 	adev->mes.fw_version[pipe] =
634 		le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK;
635 
636 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
637 		int ucode, ucode_data;
638 
639 		if (pipe == AMDGPU_MES_SCHED_PIPE) {
640 			ucode = AMDGPU_UCODE_ID_CP_MES;
641 			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
642 		} else {
643 			ucode = AMDGPU_UCODE_ID_CP_MES1;
644 			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
645 		}
646 
647 		info = &adev->firmware.ucode[ucode];
648 		info->ucode_id = ucode;
649 		info->fw = adev->mes.fw[pipe];
650 		adev->firmware.fw_size +=
651 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
652 			      PAGE_SIZE);
653 
654 		info = &adev->firmware.ucode[ucode_data];
655 		info->ucode_id = ucode_data;
656 		info->fw = adev->mes.fw[pipe];
657 		adev->firmware.fw_size +=
658 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
659 			      PAGE_SIZE);
660 	}
661 
662 	return 0;
663 out:
664 	amdgpu_ucode_release(&adev->mes.fw[pipe]);
665 	return r;
666 }
667 
668 bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
669 {
670 	uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
671 	bool is_supported = false;
672 
673 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
674 	    amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
675 	    mes_rev >= 0x63)
676 		is_supported = true;
677 
678 	return is_supported;
679 }
680 
681 /* Fix me -- node_id is used to identify the correct MES instances in the future */
682 static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
683 					    uint32_t node_id, bool enable)
684 {
685 	struct mes_misc_op_input op_input = {0};
686 	int r;
687 
688 	op_input.op = MES_MISC_OP_CHANGE_CONFIG;
689 	op_input.change_config.option.limit_single_process = enable ? 1 : 0;
690 
691 	if (!adev->mes.funcs->misc_op) {
692 		dev_err(adev->dev, "mes change config is not supported!\n");
693 		r = -EINVAL;
694 		goto error;
695 	}
696 
697 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
698 	if (r)
699 		dev_err(adev->dev, "failed to change_config.\n");
700 
701 error:
702 	return r;
703 }
704 
705 int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
706 {
707 	int i, r = 0;
708 
709 	if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
710 		mutex_lock(&adev->enforce_isolation_mutex);
711 		for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
712 			if (adev->enforce_isolation[i] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
713 				r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
714 			else
715 				r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
716 		}
717 		mutex_unlock(&adev->enforce_isolation_mutex);
718 	}
719 	return r;
720 }
721 
722 #if defined(CONFIG_DEBUG_FS)
723 
724 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
725 {
726 	struct amdgpu_device *adev = m->private;
727 	uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
728 
729 	seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
730 		     mem, adev->mes.event_log_size, false);
731 
732 	return 0;
733 }
734 
735 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
736 
737 #endif
738 
739 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
740 {
741 
742 #if defined(CONFIG_DEBUG_FS)
743 	struct drm_minor *minor = adev_to_drm(adev)->primary;
744 	struct dentry *root = minor->debugfs_root;
745 	if (adev->enable_mes && amdgpu_mes_log_enable)
746 		debugfs_create_file("amdgpu_mes_event_log", 0444, root,
747 				    adev, &amdgpu_debugfs_mes_event_log_fops);
748 
749 #endif
750 }
751