xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c (revision e332935a540eb76dd656663ca908eb0544d96757)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26 
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31 
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34 
amdgpu_mes_doorbell_process_slice(struct amdgpu_device * adev)35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 		       PAGE_SIZE);
40 }
41 
amdgpu_mes_doorbell_init(struct amdgpu_device * adev)42 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
43 {
44 	int i;
45 	struct amdgpu_mes *mes = &adev->mes;
46 
47 	/* Bitmap for dynamic allocation of kernel doorbells */
48 	mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
49 	if (!mes->doorbell_bitmap) {
50 		DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
51 		return -ENOMEM;
52 	}
53 
54 	mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
55 	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
56 		adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
57 		set_bit(i, mes->doorbell_bitmap);
58 	}
59 
60 	return 0;
61 }
62 
amdgpu_mes_event_log_init(struct amdgpu_device * adev)63 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
64 {
65 	int r;
66 
67 	if (!amdgpu_mes_log_enable)
68 		return 0;
69 
70 	r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
71 				    AMDGPU_GEM_DOMAIN_VRAM,
72 				    &adev->mes.event_log_gpu_obj,
73 				    &adev->mes.event_log_gpu_addr,
74 				    &adev->mes.event_log_cpu_addr);
75 	if (r) {
76 		dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
77 		return r;
78 	}
79 
80 	memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
81 
82 	return  0;
83 
84 }
85 
amdgpu_mes_doorbell_free(struct amdgpu_device * adev)86 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
87 {
88 	bitmap_free(adev->mes.doorbell_bitmap);
89 }
90 
amdgpu_mes_init(struct amdgpu_device * adev)91 int amdgpu_mes_init(struct amdgpu_device *adev)
92 {
93 	int i, r, num_pipes;
94 
95 	adev->mes.adev = adev;
96 
97 	idr_init(&adev->mes.pasid_idr);
98 	idr_init(&adev->mes.gang_id_idr);
99 	idr_init(&adev->mes.queue_id_idr);
100 	ida_init(&adev->mes.doorbell_ida);
101 	spin_lock_init(&adev->mes.queue_id_lock);
102 	mutex_init(&adev->mes.mutex_hidden);
103 
104 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
105 		spin_lock_init(&adev->mes.ring_lock[i]);
106 
107 	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
108 	adev->mes.vmid_mask_mmhub = 0xffffff00;
109 	adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00;
110 
111 	num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
112 	if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
113 		dev_warn(adev->dev, "more gfx pipes than supported by MES! (%d vs %d)\n",
114 			 num_pipes, AMDGPU_MES_MAX_GFX_PIPES);
115 
116 	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) {
117 		if (i >= num_pipes)
118 			break;
119 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
120 		    IP_VERSION(12, 0, 0))
121 			/*
122 			 * GFX V12 has only one GFX pipe, but 8 queues in it.
123 			 * GFX pipe 0 queue 0 is being used by Kernel queue.
124 			 * Set GFX pipe 0 queue 1-7 for MES scheduling
125 			 * mask = 1111 1110b
126 			 */
127 			adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0xFF : 0xFE;
128 		else
129 			/*
130 			 * GFX pipe 0 queue 0 is being used by Kernel queue.
131 			 * Set GFX pipe 0 queue 1 for MES scheduling
132 			 * mask = 10b
133 			 */
134 			adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0x3 : 0x2;
135 	}
136 
137 	num_pipes = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec;
138 	if (num_pipes > AMDGPU_MES_MAX_COMPUTE_PIPES)
139 		dev_warn(adev->dev, "more compute pipes than supported by MES! (%d vs %d)\n",
140 			 num_pipes, AMDGPU_MES_MAX_COMPUTE_PIPES);
141 
142 	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
143 		if (i >= num_pipes)
144 			break;
145 		adev->mes.compute_hqd_mask[i] = adev->gfx.disable_kq ? 0xF : 0xC;
146 	}
147 
148 	num_pipes = adev->sdma.num_instances;
149 	if (num_pipes > AMDGPU_MES_MAX_SDMA_PIPES)
150 		dev_warn(adev->dev, "more SDMA pipes than supported by MES! (%d vs %d)\n",
151 			 num_pipes, AMDGPU_MES_MAX_SDMA_PIPES);
152 
153 	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
154 		if (i >= num_pipes)
155 			break;
156 		adev->mes.sdma_hqd_mask[i] = 0xfc;
157 	}
158 
159 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
160 		r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
161 		if (r) {
162 			dev_err(adev->dev,
163 				"(%d) ring trail_fence_offs wb alloc failed\n",
164 				r);
165 			goto error;
166 		}
167 		adev->mes.sch_ctx_gpu_addr[i] =
168 			adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
169 		adev->mes.sch_ctx_ptr[i] =
170 			(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
171 
172 		r = amdgpu_device_wb_get(adev,
173 				 &adev->mes.query_status_fence_offs[i]);
174 		if (r) {
175 			dev_err(adev->dev,
176 			      "(%d) query_status_fence_offs wb alloc failed\n",
177 			      r);
178 			goto error;
179 		}
180 		adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
181 			(adev->mes.query_status_fence_offs[i] * 4);
182 		adev->mes.query_status_fence_ptr[i] =
183 			(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
184 	}
185 
186 	r = amdgpu_mes_doorbell_init(adev);
187 	if (r)
188 		goto error;
189 
190 	r = amdgpu_mes_event_log_init(adev);
191 	if (r)
192 		goto error_doorbell;
193 
194 	return 0;
195 
196 error_doorbell:
197 	amdgpu_mes_doorbell_free(adev);
198 error:
199 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
200 		if (adev->mes.sch_ctx_ptr[i])
201 			amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
202 		if (adev->mes.query_status_fence_ptr[i])
203 			amdgpu_device_wb_free(adev,
204 				      adev->mes.query_status_fence_offs[i]);
205 	}
206 
207 	idr_destroy(&adev->mes.pasid_idr);
208 	idr_destroy(&adev->mes.gang_id_idr);
209 	idr_destroy(&adev->mes.queue_id_idr);
210 	ida_destroy(&adev->mes.doorbell_ida);
211 	mutex_destroy(&adev->mes.mutex_hidden);
212 	return r;
213 }
214 
amdgpu_mes_fini(struct amdgpu_device * adev)215 void amdgpu_mes_fini(struct amdgpu_device *adev)
216 {
217 	int i;
218 
219 	amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
220 			      &adev->mes.event_log_gpu_addr,
221 			      &adev->mes.event_log_cpu_addr);
222 
223 	for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
224 		if (adev->mes.sch_ctx_ptr[i])
225 			amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
226 		if (adev->mes.query_status_fence_ptr[i])
227 			amdgpu_device_wb_free(adev,
228 				      adev->mes.query_status_fence_offs[i]);
229 	}
230 
231 	amdgpu_mes_doorbell_free(adev);
232 
233 	idr_destroy(&adev->mes.pasid_idr);
234 	idr_destroy(&adev->mes.gang_id_idr);
235 	idr_destroy(&adev->mes.queue_id_idr);
236 	ida_destroy(&adev->mes.doorbell_ida);
237 	mutex_destroy(&adev->mes.mutex_hidden);
238 }
239 
amdgpu_mes_suspend(struct amdgpu_device * adev)240 int amdgpu_mes_suspend(struct amdgpu_device *adev)
241 {
242 	struct mes_suspend_gang_input input;
243 	int r;
244 
245 	if (!amdgpu_mes_suspend_resume_all_supported(adev))
246 		return 0;
247 
248 	memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
249 	input.suspend_all_gangs = 1;
250 
251 	/*
252 	 * Avoid taking any other locks under MES lock to avoid circular
253 	 * lock dependencies.
254 	 */
255 	amdgpu_mes_lock(&adev->mes);
256 	r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
257 	amdgpu_mes_unlock(&adev->mes);
258 	if (r)
259 		DRM_ERROR("failed to suspend all gangs");
260 
261 	return r;
262 }
263 
amdgpu_mes_resume(struct amdgpu_device * adev)264 int amdgpu_mes_resume(struct amdgpu_device *adev)
265 {
266 	struct mes_resume_gang_input input;
267 	int r;
268 
269 	if (!amdgpu_mes_suspend_resume_all_supported(adev))
270 		return 0;
271 
272 	memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
273 	input.resume_all_gangs = 1;
274 
275 	/*
276 	 * Avoid taking any other locks under MES lock to avoid circular
277 	 * lock dependencies.
278 	 */
279 	amdgpu_mes_lock(&adev->mes);
280 	r = adev->mes.funcs->resume_gang(&adev->mes, &input);
281 	amdgpu_mes_unlock(&adev->mes);
282 	if (r)
283 		DRM_ERROR("failed to resume all gangs");
284 
285 	return r;
286 }
287 
amdgpu_mes_map_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring)288 int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
289 				struct amdgpu_ring *ring)
290 {
291 	struct mes_map_legacy_queue_input queue_input;
292 	int r;
293 
294 	memset(&queue_input, 0, sizeof(queue_input));
295 
296 	queue_input.queue_type = ring->funcs->type;
297 	queue_input.doorbell_offset = ring->doorbell_index;
298 	queue_input.pipe_id = ring->pipe;
299 	queue_input.queue_id = ring->queue;
300 	queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
301 	queue_input.wptr_addr = ring->wptr_gpu_addr;
302 
303 	amdgpu_mes_lock(&adev->mes);
304 	r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
305 	amdgpu_mes_unlock(&adev->mes);
306 	if (r)
307 		DRM_ERROR("failed to map legacy queue\n");
308 
309 	return r;
310 }
311 
amdgpu_mes_unmap_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)312 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
313 				  struct amdgpu_ring *ring,
314 				  enum amdgpu_unmap_queues_action action,
315 				  u64 gpu_addr, u64 seq)
316 {
317 	struct mes_unmap_legacy_queue_input queue_input;
318 	int r;
319 
320 	queue_input.action = action;
321 	queue_input.queue_type = ring->funcs->type;
322 	queue_input.doorbell_offset = ring->doorbell_index;
323 	queue_input.pipe_id = ring->pipe;
324 	queue_input.queue_id = ring->queue;
325 	queue_input.trail_fence_addr = gpu_addr;
326 	queue_input.trail_fence_data = seq;
327 
328 	amdgpu_mes_lock(&adev->mes);
329 	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
330 	amdgpu_mes_unlock(&adev->mes);
331 	if (r)
332 		DRM_ERROR("failed to unmap legacy queue\n");
333 
334 	return r;
335 }
336 
amdgpu_mes_reset_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int vmid,bool use_mmio)337 int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
338 				  struct amdgpu_ring *ring,
339 				  unsigned int vmid,
340 				  bool use_mmio)
341 {
342 	struct mes_reset_queue_input queue_input;
343 	int r;
344 
345 	memset(&queue_input, 0, sizeof(queue_input));
346 
347 	queue_input.queue_type = ring->funcs->type;
348 	queue_input.doorbell_offset = ring->doorbell_index;
349 	queue_input.me_id = ring->me;
350 	queue_input.pipe_id = ring->pipe;
351 	queue_input.queue_id = ring->queue;
352 	queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0;
353 	queue_input.wptr_addr = ring->wptr_gpu_addr;
354 	queue_input.vmid = vmid;
355 	queue_input.use_mmio = use_mmio;
356 	queue_input.is_kq = true;
357 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX)
358 		queue_input.legacy_gfx = true;
359 
360 	amdgpu_mes_lock(&adev->mes);
361 	r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
362 	amdgpu_mes_unlock(&adev->mes);
363 	if (r)
364 		DRM_ERROR("failed to reset legacy queue\n");
365 
366 	return r;
367 }
368 
amdgpu_mes_rreg(struct amdgpu_device * adev,uint32_t reg)369 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
370 {
371 	struct mes_misc_op_input op_input;
372 	int r, val = 0;
373 	uint32_t addr_offset = 0;
374 	uint64_t read_val_gpu_addr;
375 	uint32_t *read_val_ptr;
376 
377 	if (amdgpu_device_wb_get(adev, &addr_offset)) {
378 		dev_err(adev->dev, "critical bug! too many mes readers\n");
379 		goto error;
380 	}
381 	read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
382 	read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset];
383 	op_input.op = MES_MISC_OP_READ_REG;
384 	op_input.read_reg.reg_offset = reg;
385 	op_input.read_reg.buffer_addr = read_val_gpu_addr;
386 
387 	if (!adev->mes.funcs->misc_op) {
388 		dev_err(adev->dev, "mes rreg is not supported!\n");
389 		goto error;
390 	}
391 
392 	amdgpu_mes_lock(&adev->mes);
393 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
394 	amdgpu_mes_unlock(&adev->mes);
395 	if (r)
396 		dev_err(adev->dev, "failed to read reg (0x%x)\n", reg);
397 	else
398 		val = *(read_val_ptr);
399 
400 error:
401 	if (addr_offset)
402 		amdgpu_device_wb_free(adev, addr_offset);
403 	return val;
404 }
405 
amdgpu_mes_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t val)406 int amdgpu_mes_wreg(struct amdgpu_device *adev,
407 		    uint32_t reg, uint32_t val)
408 {
409 	struct mes_misc_op_input op_input;
410 	int r;
411 
412 	op_input.op = MES_MISC_OP_WRITE_REG;
413 	op_input.write_reg.reg_offset = reg;
414 	op_input.write_reg.reg_value = val;
415 
416 	if (!adev->mes.funcs->misc_op) {
417 		dev_err(adev->dev, "mes wreg is not supported!\n");
418 		r = -EINVAL;
419 		goto error;
420 	}
421 
422 	amdgpu_mes_lock(&adev->mes);
423 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
424 	amdgpu_mes_unlock(&adev->mes);
425 	if (r)
426 		dev_err(adev->dev, "failed to write reg (0x%x)\n", reg);
427 
428 error:
429 	return r;
430 }
431 
amdgpu_mes_reg_write_reg_wait(struct amdgpu_device * adev,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)432 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
433 				  uint32_t reg0, uint32_t reg1,
434 				  uint32_t ref, uint32_t mask)
435 {
436 	struct mes_misc_op_input op_input;
437 	int r;
438 
439 	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
440 	op_input.wrm_reg.reg0 = reg0;
441 	op_input.wrm_reg.reg1 = reg1;
442 	op_input.wrm_reg.ref = ref;
443 	op_input.wrm_reg.mask = mask;
444 
445 	if (!adev->mes.funcs->misc_op) {
446 		dev_err(adev->dev, "mes reg_write_reg_wait is not supported!\n");
447 		r = -EINVAL;
448 		goto error;
449 	}
450 
451 	amdgpu_mes_lock(&adev->mes);
452 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
453 	amdgpu_mes_unlock(&adev->mes);
454 	if (r)
455 		dev_err(adev->dev, "failed to reg_write_reg_wait\n");
456 
457 error:
458 	return r;
459 }
460 
amdgpu_mes_set_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr,uint32_t spi_gdbg_per_vmid_cntl,const uint32_t * tcp_watch_cntl,uint32_t flags,bool trap_en)461 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
462 				uint64_t process_context_addr,
463 				uint32_t spi_gdbg_per_vmid_cntl,
464 				const uint32_t *tcp_watch_cntl,
465 				uint32_t flags,
466 				bool trap_en)
467 {
468 	struct mes_misc_op_input op_input = {0};
469 	int r;
470 
471 	if (!adev->mes.funcs->misc_op) {
472 		DRM_ERROR("mes set shader debugger is not supported!\n");
473 		return -EINVAL;
474 	}
475 
476 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
477 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
478 	op_input.set_shader_debugger.flags.u32all = flags;
479 
480 	/* use amdgpu mes_flush_shader_debugger instead */
481 	if (op_input.set_shader_debugger.flags.process_ctx_flush)
482 		return -EINVAL;
483 
484 	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
485 	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
486 			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
487 
488 	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
489 			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
490 		op_input.set_shader_debugger.trap_en = trap_en;
491 
492 	amdgpu_mes_lock(&adev->mes);
493 
494 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
495 	if (r)
496 		DRM_ERROR("failed to set_shader_debugger\n");
497 
498 	amdgpu_mes_unlock(&adev->mes);
499 
500 	return r;
501 }
502 
amdgpu_mes_flush_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr)503 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
504 				     uint64_t process_context_addr)
505 {
506 	struct mes_misc_op_input op_input = {0};
507 	int r;
508 
509 	if (!adev->mes.funcs->misc_op) {
510 		DRM_ERROR("mes flush shader debugger is not supported!\n");
511 		return -EINVAL;
512 	}
513 
514 	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
515 	op_input.set_shader_debugger.process_context_addr = process_context_addr;
516 	op_input.set_shader_debugger.flags.process_ctx_flush = true;
517 
518 	amdgpu_mes_lock(&adev->mes);
519 
520 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
521 	if (r)
522 		DRM_ERROR("failed to set_shader_debugger\n");
523 
524 	amdgpu_mes_unlock(&adev->mes);
525 
526 	return r;
527 }
528 
amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device * adev,enum amdgpu_mes_priority_level prio)529 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
530 						   enum amdgpu_mes_priority_level prio)
531 {
532 	return adev->mes.aggregated_doorbells[prio];
533 }
534 
amdgpu_mes_init_microcode(struct amdgpu_device * adev,int pipe)535 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
536 {
537 	const struct mes_firmware_header_v1_0 *mes_hdr;
538 	struct amdgpu_firmware_info *info;
539 	char ucode_prefix[30];
540 	char fw_name[50];
541 	bool need_retry = false;
542 	u32 *ucode_ptr;
543 	int r;
544 
545 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
546 				       sizeof(ucode_prefix));
547 	if (adev->enable_uni_mes) {
548 		snprintf(fw_name, sizeof(fw_name),
549 			 "amdgpu/%s_uni_mes.bin", ucode_prefix);
550 	} else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
551 	    amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
552 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
553 			 ucode_prefix,
554 			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
555 		need_retry = true;
556 	} else {
557 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
558 			 ucode_prefix,
559 			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
560 	}
561 
562 	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], AMDGPU_UCODE_REQUIRED,
563 				 "%s", fw_name);
564 	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
565 		dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
566 		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
567 					 AMDGPU_UCODE_REQUIRED,
568 					 "amdgpu/%s_mes.bin", ucode_prefix);
569 	}
570 
571 	if (r)
572 		goto out;
573 
574 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
575 		adev->mes.fw[pipe]->data;
576 	adev->mes.uc_start_addr[pipe] =
577 		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
578 		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
579 	adev->mes.data_start_addr[pipe] =
580 		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
581 		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
582 	ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data +
583 			  sizeof(union amdgpu_firmware_header));
584 	adev->mes.fw_version[pipe] =
585 		le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK;
586 
587 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
588 		int ucode, ucode_data;
589 
590 		if (pipe == AMDGPU_MES_SCHED_PIPE) {
591 			ucode = AMDGPU_UCODE_ID_CP_MES;
592 			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
593 		} else {
594 			ucode = AMDGPU_UCODE_ID_CP_MES1;
595 			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
596 		}
597 
598 		info = &adev->firmware.ucode[ucode];
599 		info->ucode_id = ucode;
600 		info->fw = adev->mes.fw[pipe];
601 		adev->firmware.fw_size +=
602 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
603 			      PAGE_SIZE);
604 
605 		info = &adev->firmware.ucode[ucode_data];
606 		info->ucode_id = ucode_data;
607 		info->fw = adev->mes.fw[pipe];
608 		adev->firmware.fw_size +=
609 			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
610 			      PAGE_SIZE);
611 	}
612 
613 	return 0;
614 out:
615 	amdgpu_ucode_release(&adev->mes.fw[pipe]);
616 	return r;
617 }
618 
amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device * adev)619 bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
620 {
621 	uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
622 	bool is_supported = false;
623 
624 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
625 	    amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
626 	    mes_rev >= 0x63)
627 		is_supported = true;
628 
629 	return is_supported;
630 }
631 
632 /* Fix me -- node_id is used to identify the correct MES instances in the future */
amdgpu_mes_set_enforce_isolation(struct amdgpu_device * adev,uint32_t node_id,bool enable)633 static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
634 					    uint32_t node_id, bool enable)
635 {
636 	struct mes_misc_op_input op_input = {0};
637 	int r;
638 
639 	op_input.op = MES_MISC_OP_CHANGE_CONFIG;
640 	op_input.change_config.option.limit_single_process = enable ? 1 : 0;
641 
642 	if (!adev->mes.funcs->misc_op) {
643 		dev_err(adev->dev, "mes change config is not supported!\n");
644 		r = -EINVAL;
645 		goto error;
646 	}
647 
648 	amdgpu_mes_lock(&adev->mes);
649 	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
650 	amdgpu_mes_unlock(&adev->mes);
651 	if (r)
652 		dev_err(adev->dev, "failed to change_config.\n");
653 
654 error:
655 	return r;
656 }
657 
amdgpu_mes_update_enforce_isolation(struct amdgpu_device * adev)658 int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
659 {
660 	int i, r = 0;
661 
662 	if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
663 		mutex_lock(&adev->enforce_isolation_mutex);
664 		for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
665 			if (adev->enforce_isolation[i] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
666 				r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
667 			else
668 				r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
669 		}
670 		mutex_unlock(&adev->enforce_isolation_mutex);
671 	}
672 	return r;
673 }
674 
675 #if defined(CONFIG_DEBUG_FS)
676 
amdgpu_debugfs_mes_event_log_show(struct seq_file * m,void * unused)677 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
678 {
679 	struct amdgpu_device *adev = m->private;
680 	uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
681 
682 	seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
683 		     mem, adev->mes.event_log_size, false);
684 
685 	return 0;
686 }
687 
688 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
689 
690 #endif
691 
amdgpu_debugfs_mes_event_log_init(struct amdgpu_device * adev)692 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
693 {
694 
695 #if defined(CONFIG_DEBUG_FS)
696 	struct drm_minor *minor = adev_to_drm(adev)->primary;
697 	struct dentry *root = minor->debugfs_root;
698 	if (adev->enable_mes && amdgpu_mes_log_enable)
699 		debugfs_create_file("amdgpu_mes_event_log", 0444, root,
700 				    adev, &amdgpu_debugfs_mes_event_log_fops);
701 
702 #endif
703 }
704