xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c (revision c7062be3380cb20c8b1c4a935a13f1848ead0719)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26 
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31 
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34 #define AMDGPU_MES_RESERVED_QUEUES	2
35 
36 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
37 {
38 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
39 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
40 		       PAGE_SIZE);
41 }
42 
43 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
44 {
45 	int i;
46 	struct amdgpu_mes *mes = &adev->mes;
47 
48 	/* Bitmap for dynamic allocation of kernel doorbells */
49 	mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
50 	if (!mes->doorbell_bitmap) {
51 		dev_err(adev->dev, "Failed to allocate MES doorbell bitmap\n");
52 		return -ENOMEM;
53 	}
54 
55 	mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
56 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
57 		adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
58 		set_bit(i, mes->doorbell_bitmap);
59 	}
60 
61 	return 0;
62 }
63 
64 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
65 {
66 	int r;
67 
68 	if (!amdgpu_mes_log_enable)
69 		return 0;
70 
71 	r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
72 				    AMDGPU_GEM_DOMAIN_VRAM,
73 				    &adev->mes.event_log_gpu_obj,
74 				    &adev->mes.event_log_gpu_addr,
75 				    &adev->mes.event_log_cpu_addr);
76 	if (r) {
77 		dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
78 		return r;
79 	}
80 
81 	memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
82 
83 	return  0;
84 
85 }
86 
87 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
88 {
89 	bitmap_free(adev->mes.doorbell_bitmap);
90 }
91 
92 int amdgpu_mes_init(struct amdgpu_device *adev)
93 {
94 	int i, r, num_pipes;
95 	int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
96 	u32 total_vmid_mask, reserved_vmid_mask;
97 	u32 queue_mask, reserved_queue_mask;
98 
99 	adev->mes.adev = adev;
100 
101 	idr_init(&adev->mes.pasid_idr);
102 	idr_init(&adev->mes.gang_id_idr);
103 	idr_init(&adev->mes.queue_id_idr);
104 	ida_init(&adev->mes.doorbell_ida);
105 	spin_lock_init(&adev->mes.queue_id_lock);
106 	mutex_init(&adev->mes.mutex_hidden);
107 
108 	for (i = 0; i < AMDGPU_MAX_MES_PIPES * num_xcc; i++)
109 		spin_lock_init(&adev->mes.ring_lock[i]);
110 
111 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
112 	total_vmid_mask = (u32)((1UL << 16) - 1);
113 	reserved_vmid_mask = (u32)((1UL << adev->vm_manager.first_kfd_vmid) - 1);
114 
115 	adev->mes.vmid_mask_mmhub = 0xFF00;
116 	adev->mes.vmid_mask_gfxhub = total_vmid_mask & ~reserved_vmid_mask;
117 
118 	queue_mask = (u32)(1UL << adev->gfx.mec.num_queue_per_pipe) - 1;
119 	reserved_queue_mask = (u32)(1UL << AMDGPU_MES_RESERVED_QUEUES) - 1;
120 
121 	num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
122 	if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
123 		dev_warn(adev->dev, "more gfx pipes than supported by MES! (%d vs %d)\n",
124 			 num_pipes, AMDGPU_MES_MAX_GFX_PIPES);
125 
126 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) {
127 		if (i >= num_pipes)
128 			break;
129 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
130 		    IP_VERSION(12, 0, 0))
131 			/*
132 			 * GFX V12 has only one GFX pipe, but 8 queues in it.
133 			 * GFX pipe 0 queue 0 is being used by Kernel queue.
134 			 * Set GFX pipe 0 queue 1-7 for MES scheduling
135 			 * mask = 1111 1110b
136 			 */
137 			adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0xFF : 0xFE;
138 		else
139 			/*
140 			 * GFX pipe 0 queue 0 is being used by Kernel queue.
141 			 * Set GFX pipe 0 queue 1 for MES scheduling
142 			 * mask = 10b
143 			 */
144 			adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0x3 : 0x2;
145 	}
146 
147 	num_pipes = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec;
148 	if (num_pipes > AMDGPU_MES_MAX_COMPUTE_PIPES)
149 		dev_warn(adev->dev, "more compute pipes than supported by MES! (%d vs %d)\n",
150 			 num_pipes, AMDGPU_MES_MAX_COMPUTE_PIPES);
151 
152 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
153 		if (i >= num_pipes)
154 			break;
155 		adev->mes.compute_hqd_mask[i] =
156 			adev->gfx.disable_kq ? 0xF : (queue_mask & ~reserved_queue_mask);
157 	}
158 
159 	num_pipes = adev->sdma.num_instances;
160 	if (num_pipes > AMDGPU_MES_MAX_SDMA_PIPES)
161 		dev_warn(adev->dev, "more SDMA pipes than supported by MES! (%d vs %d)\n",
162 			 num_pipes, AMDGPU_MES_MAX_SDMA_PIPES);
163 
164 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
165 		if (i >= num_pipes)
166 			break;
167 		adev->mes.sdma_hqd_mask[i] = 0xfc;
168 	}
169 
170 	for (i = 0; i < AMDGPU_MAX_MES_PIPES * num_xcc; i++) {
171 		r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
172 		if (r) {
173 			dev_err(adev->dev,
174 				"(%d) ring trail_fence_offs wb alloc failed\n",
175 				r);
176 			goto error;
177 		}
178 		adev->mes.sch_ctx_gpu_addr[i] =
179 			adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
180 		adev->mes.sch_ctx_ptr[i] =
181 			(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
182 
183 		r = amdgpu_device_wb_get(adev,
184 				 &adev->mes.query_status_fence_offs[i]);
185 		if (r) {
186 			dev_err(adev->dev,
187 			      "(%d) query_status_fence_offs wb alloc failed\n",
188 			      r);
189 			goto error;
190 		}
191 		adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
192 			(adev->mes.query_status_fence_offs[i] * 4);
193 		adev->mes.query_status_fence_ptr[i] =
194 			(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
195 	}
196 
197 	r = amdgpu_mes_doorbell_init(adev);
198 	if (r)
199 		goto error;
200 
201 	r = amdgpu_mes_event_log_init(adev);
202 	if (r)
203 		goto error_doorbell;
204 
205 	if (adev->mes.hung_queue_db_array_size) {
206 		for (i = 0; i < AMDGPU_MAX_MES_PIPES * num_xcc; i++) {
207 			r = amdgpu_bo_create_kernel(adev,
208 						    adev->mes.hung_queue_db_array_size * sizeof(u32),
209 						    PAGE_SIZE,
210 						    AMDGPU_GEM_DOMAIN_GTT,
211 						    &adev->mes.hung_queue_db_array_gpu_obj[i],
212 						    &adev->mes.hung_queue_db_array_gpu_addr[i],
213 						    &adev->mes.hung_queue_db_array_cpu_addr[i]);
214 			if (r) {
215 				dev_warn(adev->dev, "failed to create MES hung db array buffer (%d)", r);
216 				goto error_doorbell;
217 			}
218 		}
219 	}
220 
221 	return 0;
222 
223 error_doorbell:
224 	amdgpu_mes_doorbell_free(adev);
225 error:
226 	for (i = 0; i < AMDGPU_MAX_MES_PIPES * num_xcc; i++) {
227 		if (adev->mes.sch_ctx_ptr[i])
228 			amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
229 		if (adev->mes.query_status_fence_ptr[i])
230 			amdgpu_device_wb_free(adev,
231 				      adev->mes.query_status_fence_offs[i]);
232 		if (adev->mes.hung_queue_db_array_gpu_obj[i])
233 			amdgpu_bo_free_kernel(&adev->mes.hung_queue_db_array_gpu_obj[i],
234 					      &adev->mes.hung_queue_db_array_gpu_addr[i],
235 					      &adev->mes.hung_queue_db_array_cpu_addr[i]);
236 	}
237 
238 	idr_destroy(&adev->mes.pasid_idr);
239 	idr_destroy(&adev->mes.gang_id_idr);
240 	idr_destroy(&adev->mes.queue_id_idr);
241 	ida_destroy(&adev->mes.doorbell_ida);
242 	mutex_destroy(&adev->mes.mutex_hidden);
243 	return r;
244 }
245 
246 void amdgpu_mes_fini(struct amdgpu_device *adev)
247 {
248 	int i, num_xcc = NUM_XCC(adev->gfx.xcc_mask);
249 
250 	amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
251 			      &adev->mes.event_log_gpu_addr,
252 			      &adev->mes.event_log_cpu_addr);
253 
254 	for (i = 0; i < AMDGPU_MAX_MES_PIPES * num_xcc; i++) {
255 		amdgpu_bo_free_kernel(&adev->mes.hung_queue_db_array_gpu_obj[i],
256 				      &adev->mes.hung_queue_db_array_gpu_addr[i],
257 				      &adev->mes.hung_queue_db_array_cpu_addr[i]);
258 
259 		if (adev->mes.sch_ctx_ptr[i])
260 			amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
261 		if (adev->mes.query_status_fence_ptr[i])
262 			amdgpu_device_wb_free(adev,
263 				      adev->mes.query_status_fence_offs[i]);
264 	}
265 
266 	amdgpu_mes_doorbell_free(adev);
267 
268 	idr_destroy(&adev->mes.pasid_idr);
269 	idr_destroy(&adev->mes.gang_id_idr);
270 	idr_destroy(&adev->mes.queue_id_idr);
271 	ida_destroy(&adev->mes.doorbell_ida);
272 	mutex_destroy(&adev->mes.mutex_hidden);
273 }
274 
275 int amdgpu_mes_suspend(struct amdgpu_device *adev)
276 {
277 	struct mes_suspend_gang_input input;
278 	int r;
279 
280 	if (!amdgpu_mes_suspend_resume_all_supported(adev))
281 		return 0;
282 
283 	memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
284 	input.suspend_all_gangs = 1;
285 
286 	/*
287 	 * Avoid taking any other locks under MES lock to avoid circular
288 	 * lock dependencies.
289 	 */
290 	amdgpu_mes_lock(&adev->mes);
291 	r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
292 	amdgpu_mes_unlock(&adev->mes);
293 	if (r)
294 		dev_err(adev->dev, "failed to suspend all gangs");
295 
296 	return r;
297 }
298 
299 int amdgpu_mes_resume(struct amdgpu_device *adev)
300 {
301 	struct mes_resume_gang_input input;
302 	int r;
303 
304 	if (!amdgpu_mes_suspend_resume_all_supported(adev))
305 		return 0;
306 
307 	memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
308 	input.resume_all_gangs = 1;
309 
310 	/*
311 	 * Avoid taking any other locks under MES lock to avoid circular
312 	 * lock dependencies.
313 	 */
314 	amdgpu_mes_lock(&adev->mes);
315 	r = adev->mes.funcs->resume_gang(&adev->mes, &input);
316 	amdgpu_mes_unlock(&adev->mes);
317 	if (r)
318 		dev_err(adev->dev, "failed to resume all gangs");
319 
320 	return r;
321 }
322 
323 int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
324 				struct amdgpu_ring *ring, uint32_t xcc_id)
325 {
326 	struct mes_map_legacy_queue_input queue_input;
327 	int r;
328 
329 	memset(&queue_input, 0, sizeof(queue_input));
330 
331 	queue_input.xcc_id = xcc_id;
332 	queue_input.queue_type = ring->funcs->type;
333 	queue_input.doorbell_offset = ring->doorbell_index;
334 	queue_input.pipe_id = ring->pipe;
335 	queue_input.queue_id = ring->queue;
336 	queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
337 	queue_input.wptr_addr = ring->wptr_gpu_addr;
338 
339 	amdgpu_mes_lock(&adev->mes);
340 	r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
341 	amdgpu_mes_unlock(&adev->mes);
342 	if (r)
343 		dev_err(adev->dev, "failed to map legacy queue\n");
344 
345 	return r;
346 }
347 
348 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
349 				  struct amdgpu_ring *ring,
350 				  enum amdgpu_unmap_queues_action action,
351 				  u64 gpu_addr, u64 seq, uint32_t xcc_id)
352 {
353 	struct mes_unmap_legacy_queue_input queue_input;
354 	int r;
355 
356 	queue_input.xcc_id = xcc_id;
357 	queue_input.action = action;
358 	queue_input.queue_type = ring->funcs->type;
359 	queue_input.doorbell_offset = ring->doorbell_index;
360 	queue_input.pipe_id = ring->pipe;
361 	queue_input.queue_id = ring->queue;
362 	queue_input.trail_fence_addr = gpu_addr;
363 	queue_input.trail_fence_data = seq;
364 
365 	amdgpu_mes_lock(&adev->mes);
366 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
367 	amdgpu_mes_unlock(&adev->mes);
368 	if (r)
369 		dev_err(adev->dev, "failed to unmap legacy queue\n");
370 
371 	return r;
372 }
373 
374 int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
375 				  struct amdgpu_ring *ring,
376 				  unsigned int vmid,
377 				  bool use_mmio,
378 				  uint32_t xcc_id)
379 {
380 	struct mes_reset_queue_input queue_input;
381 	int r;
382 
383 	memset(&queue_input, 0, sizeof(queue_input));
384 
385 	queue_input.xcc_id = xcc_id;
386 	queue_input.queue_type = ring->funcs->type;
387 	queue_input.doorbell_offset = ring->doorbell_index;
388 	queue_input.me_id = ring->me;
389 	queue_input.pipe_id = ring->pipe;
390 	queue_input.queue_id = ring->queue;
391 	queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0;
392 	queue_input.wptr_addr = ring->wptr_gpu_addr;
393 	queue_input.vmid = vmid;
394 	queue_input.use_mmio = use_mmio;
395 	queue_input.is_kq = true;
396 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX)
397 		queue_input.legacy_gfx = true;
398 
399 	amdgpu_mes_lock(&adev->mes);
400 	r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
401 	amdgpu_mes_unlock(&adev->mes);
402 	if (r)
403 		dev_err(adev->dev, "failed to reset legacy queue\n");
404 
405 	return r;
406 }
407 
408 int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev)
409 {
410 	return adev->mes.hung_queue_db_array_size;
411 }
412 
413 int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
414 					    int queue_type,
415 					    bool detect_only,
416 					    unsigned int *hung_db_num,
417 					    u32 *hung_db_array,
418 					    uint32_t xcc_id)
419 {
420 	struct mes_detect_and_reset_queue_input input;
421 	u32 *db_array = adev->mes.hung_queue_db_array_cpu_addr[xcc_id];
422 	int r, i;
423 
424 	if (!hung_db_num || !hung_db_array)
425 		return -EINVAL;
426 
427 	if ((queue_type != AMDGPU_RING_TYPE_GFX) &&
428 	    (queue_type != AMDGPU_RING_TYPE_COMPUTE) &&
429 	    (queue_type != AMDGPU_RING_TYPE_SDMA))
430 		return -EINVAL;
431 
432 	/* Clear the doorbell array before detection */
433 	memset(adev->mes.hung_queue_db_array_cpu_addr[xcc_id], AMDGPU_MES_INVALID_DB_OFFSET,
434 		adev->mes.hung_queue_db_array_size * sizeof(u32));
435 	input.queue_type = queue_type;
436 	input.detect_only = detect_only;
437 
438 	r = adev->mes.funcs->detect_and_reset_hung_queues(&adev->mes,
439 							  &input);
440 	if (r) {
441 		dev_err(adev->dev, "failed to detect and reset\n");
442 	} else {
443 		*hung_db_num = 0;
444 		for (i = 0; i < adev->mes.hung_queue_hqd_info_offset; i++) {
445 			if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) {
446 				hung_db_array[i] = db_array[i];
447 				*hung_db_num += 1;
448 			}
449 		}
450 
451 		/*
452 		 * TODO: return HQD info for MES scheduled user compute queue reset cases
453 		 * stored in hung_db_array hqd info offset to full array size
454 		 */
455 	}
456 
457 	return r;
458 }
459 
460 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg,
461 			 uint32_t xcc_id)
462 {
463 	struct mes_misc_op_input op_input;
464 	int r, val = 0;
465 	uint32_t addr_offset = 0;
466 	uint64_t read_val_gpu_addr;
467 	uint32_t *read_val_ptr;
468 
469 	if (amdgpu_device_wb_get(adev, &addr_offset)) {
470 		dev_err(adev->dev, "critical bug! too many mes readers\n");
471 		goto error;
472 	}
473 	read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
474 	read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset];
475 	op_input.xcc_id = xcc_id;
476 	op_input.op = MES_MISC_OP_READ_REG;
477 	op_input.read_reg.reg_offset = reg;
478 	op_input.read_reg.buffer_addr = read_val_gpu_addr;
479 
480 	if (!adev->mes.funcs->misc_op) {
481 		dev_err(adev->dev, "mes rreg is not supported!\n");
482 		goto error;
483 	}
484 
485 	amdgpu_mes_lock(&adev->mes);
486 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
487 	amdgpu_mes_unlock(&adev->mes);
488 	if (r)
489 		dev_err(adev->dev, "failed to read reg (0x%x)\n", reg);
490 	else
491 		val = *(read_val_ptr);
492 
493 error:
494 	if (addr_offset)
495 		amdgpu_device_wb_free(adev, addr_offset);
496 	return val;
497 }
498 
499 int amdgpu_mes_wreg(struct amdgpu_device *adev, uint32_t reg,
500 		    uint32_t val, uint32_t xcc_id)
501 {
502 	struct mes_misc_op_input op_input;
503 	int r;
504 
505 	op_input.xcc_id = xcc_id;
506 	op_input.op = MES_MISC_OP_WRITE_REG;
507 	op_input.write_reg.reg_offset = reg;
508 	op_input.write_reg.reg_value = val;
509 
510 	if (!adev->mes.funcs->misc_op) {
511 		dev_err(adev->dev, "mes wreg is not supported!\n");
512 		r = -EINVAL;
513 		goto error;
514 	}
515 
516 	amdgpu_mes_lock(&adev->mes);
517 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
518 	amdgpu_mes_unlock(&adev->mes);
519 	if (r)
520 		dev_err(adev->dev, "failed to write reg (0x%x)\n", reg);
521 
522 error:
523 	return r;
524 }
525 
526 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
527 				  uint32_t reg0, uint32_t reg1,
528 				  uint32_t ref, uint32_t mask,
529 				  uint32_t xcc_id)
530 {
531 	struct mes_misc_op_input op_input;
532 	int r;
533 
534 	op_input.xcc_id = xcc_id;
535 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
536 	op_input.wrm_reg.reg0 = reg0;
537 	op_input.wrm_reg.reg1 = reg1;
538 	op_input.wrm_reg.ref = ref;
539 	op_input.wrm_reg.mask = mask;
540 
541 	if (!adev->mes.funcs->misc_op) {
542 		dev_err(adev->dev, "mes reg_write_reg_wait is not supported!\n");
543 		r = -EINVAL;
544 		goto error;
545 	}
546 
547 	amdgpu_mes_lock(&adev->mes);
548 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
549 	amdgpu_mes_unlock(&adev->mes);
550 	if (r)
551 		dev_err(adev->dev, "failed to reg_write_reg_wait\n");
552 
553 error:
554 	return r;
555 }
556 
557 int amdgpu_mes_hdp_flush(struct amdgpu_device *adev)
558 {
559 	uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask;
560 
561 	hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev);
562 	hdp_flush_done_offset = adev->nbio.funcs->get_hdp_flush_done_offset(adev);
563 	ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0;
564 
565 	return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset,
566 					     ref_and_mask, ref_and_mask, 0);
567 }
568 
569 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
570 				uint64_t process_context_addr,
571 				uint32_t spi_gdbg_per_vmid_cntl,
572 				const uint32_t *tcp_watch_cntl,
573 				uint32_t flags,
574 				bool trap_en,
575 				uint32_t xcc_id)
576 {
577 	struct mes_misc_op_input op_input = {0};
578 	int r;
579 
580 	if (!adev->mes.funcs->misc_op) {
581 		dev_err(adev->dev,
582 			"mes set shader debugger is not supported!\n");
583 		return -EINVAL;
584 	}
585 
586 	op_input.xcc_id = xcc_id;
587 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
588 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
589 	op_input.set_shader_debugger.flags.u32all = flags;
590 
591 	/* use amdgpu mes_flush_shader_debugger instead */
592 	if (op_input.set_shader_debugger.flags.process_ctx_flush)
593 		return -EINVAL;
594 
595 	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
596 	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
597 			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
598 
599 	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
600 			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
601 		op_input.set_shader_debugger.trap_en = trap_en;
602 
603 	amdgpu_mes_lock(&adev->mes);
604 
605 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
606 	if (r)
607 		dev_err(adev->dev, "failed to set_shader_debugger\n");
608 
609 	amdgpu_mes_unlock(&adev->mes);
610 
611 	return r;
612 }
613 
614 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
615 				     uint64_t process_context_addr,
616 				     uint32_t xcc_id)
617 {
618 	struct mes_misc_op_input op_input = {0};
619 	int r;
620 
621 	if (!adev->mes.funcs->misc_op) {
622 		dev_err(adev->dev,
623 			"mes flush shader debugger is not supported!\n");
624 		return -EINVAL;
625 	}
626 
627 	op_input.xcc_id = xcc_id;
628 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
629 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
630 	op_input.set_shader_debugger.flags.process_ctx_flush = true;
631 
632 	amdgpu_mes_lock(&adev->mes);
633 
634 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
635 	if (r)
636 		dev_err(adev->dev, "failed to set_shader_debugger\n");
637 
638 	amdgpu_mes_unlock(&adev->mes);
639 
640 	return r;
641 }
642 
643 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
644 						   enum amdgpu_mes_priority_level prio)
645 {
646 	return adev->mes.aggregated_doorbells[prio];
647 }
648 
649 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
650 {
651 	const struct mes_firmware_header_v1_0 *mes_hdr;
652 	struct amdgpu_firmware_info *info;
653 	char ucode_prefix[30];
654 	char fw_name[50];
655 	bool need_retry = false;
656 	u32 *ucode_ptr;
657 	int r;
658 
659 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
660 				       sizeof(ucode_prefix));
661 	if (adev->enable_uni_mes) {
662 		snprintf(fw_name, sizeof(fw_name),
663 			 "amdgpu/%s_uni_mes.bin", ucode_prefix);
664 	} else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
665 	    amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
666 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
667 			 ucode_prefix,
668 			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
669 		need_retry = true;
670 	} else {
671 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
672 			 ucode_prefix,
673 			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
674 	}
675 
676 	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], AMDGPU_UCODE_REQUIRED,
677 				 "%s", fw_name);
678 	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
679 		dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
680 		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
681 					 AMDGPU_UCODE_REQUIRED,
682 					 "amdgpu/%s_mes.bin", ucode_prefix);
683 	}
684 
685 	if (r)
686 		goto out;
687 
688 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
689 		adev->mes.fw[pipe]->data;
690 	adev->mes.uc_start_addr[pipe] =
691 		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
692 		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
693 	adev->mes.data_start_addr[pipe] =
694 		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
695 		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
696 	ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data +
697 			  sizeof(union amdgpu_firmware_header));
698 	adev->mes.fw_version[pipe] =
699 		le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK;
700 
701 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
702 		int ucode, ucode_data;
703 
704 		if (pipe == AMDGPU_MES_SCHED_PIPE) {
705 			ucode = AMDGPU_UCODE_ID_CP_MES;
706 			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
707 		} else {
708 			ucode = AMDGPU_UCODE_ID_CP_MES1;
709 			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
710 		}
711 
712 		info = &adev->firmware.ucode[ucode];
713 		info->ucode_id = ucode;
714 		info->fw = adev->mes.fw[pipe];
715 		adev->firmware.fw_size +=
716 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
717 			      PAGE_SIZE);
718 
719 		info = &adev->firmware.ucode[ucode_data];
720 		info->ucode_id = ucode_data;
721 		info->fw = adev->mes.fw[pipe];
722 		adev->firmware.fw_size +=
723 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
724 			      PAGE_SIZE);
725 	}
726 
727 	return 0;
728 out:
729 	amdgpu_ucode_release(&adev->mes.fw[pipe]);
730 	return r;
731 }
732 
733 bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
734 {
735 	uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
736 
737 	return ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
738 		 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
739 		 mes_rev >= 0x63) ||
740 		amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0));
741 }
742 
743 /* Fix me -- node_id is used to identify the correct MES instances in the future */
744 static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
745 					    uint32_t node_id, bool enable)
746 {
747 	struct mes_misc_op_input op_input = {0};
748 	int r;
749 
750 	op_input.op = MES_MISC_OP_CHANGE_CONFIG;
751 	op_input.change_config.option.limit_single_process = enable ? 1 : 0;
752 
753 	if (!adev->mes.funcs->misc_op) {
754 		dev_err(adev->dev, "mes change config is not supported!\n");
755 		r = -EINVAL;
756 		goto error;
757 	}
758 
759 	amdgpu_mes_lock(&adev->mes);
760 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
761 	amdgpu_mes_unlock(&adev->mes);
762 	if (r)
763 		dev_err(adev->dev, "failed to change_config.\n");
764 
765 error:
766 	return r;
767 }
768 
769 int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
770 {
771 	int i, r = 0;
772 
773 	if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
774 		mutex_lock(&adev->enforce_isolation_mutex);
775 		for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
776 			if (adev->enforce_isolation[i] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
777 				r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
778 			else
779 				r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
780 		}
781 		mutex_unlock(&adev->enforce_isolation_mutex);
782 	}
783 	return r;
784 }
785 
786 #if defined(CONFIG_DEBUG_FS)
787 
788 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
789 {
790 	struct amdgpu_device *adev = m->private;
791 	uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
792 
793 	seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
794 		     mem, adev->mes.event_log_size, false);
795 
796 	return 0;
797 }
798 
799 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
800 
801 #endif
802 
803 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
804 {
805 
806 #if defined(CONFIG_DEBUG_FS)
807 	struct drm_minor *minor = adev_to_drm(adev)->primary;
808 	struct dentry *root = minor->debugfs_root;
809 	if (adev->enable_mes && amdgpu_mes_log_enable)
810 		debugfs_create_file("amdgpu_mes_event_log", 0444, root,
811 				    adev, &amdgpu_debugfs_mes_event_log_fops);
812 
813 #endif
814 }
815