xref: /linux/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c (revision 90574d2a675947858b47008df8d07f75ea50d0d0)
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/module.h>
26 #include "amdgpu.h"
27 #include "soc15_common.h"
28 #include "soc21.h"
29 #include "gc/gc_12_0_0_offset.h"
30 #include "gc/gc_12_0_0_sh_mask.h"
31 #include "gc/gc_11_0_0_default.h"
32 #include "v12_structs.h"
33 #include "mes_v12_api_def.h"
34 
35 MODULE_FIRMWARE("amdgpu/gc_12_0_0_mes.bin");
36 MODULE_FIRMWARE("amdgpu/gc_12_0_0_mes1.bin");
37 MODULE_FIRMWARE("amdgpu/gc_12_0_0_uni_mes.bin");
38 MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes.bin");
39 MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes1.bin");
40 MODULE_FIRMWARE("amdgpu/gc_12_0_1_uni_mes.bin");
41 
42 static int mes_v12_0_hw_init(void *handle);
43 static int mes_v12_0_hw_fini(void *handle);
44 static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev);
45 static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);
46 
47 #define MES_EOP_SIZE   2048
48 
49 static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring)
50 {
51 	struct amdgpu_device *adev = ring->adev;
52 
53 	if (ring->use_doorbell) {
54 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
55 			     ring->wptr);
56 		WDOORBELL64(ring->doorbell_index, ring->wptr);
57 	} else {
58 		BUG();
59 	}
60 }
61 
62 static u64 mes_v12_0_ring_get_rptr(struct amdgpu_ring *ring)
63 {
64 	return *ring->rptr_cpu_addr;
65 }
66 
67 static u64 mes_v12_0_ring_get_wptr(struct amdgpu_ring *ring)
68 {
69 	u64 wptr;
70 
71 	if (ring->use_doorbell)
72 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
73 	else
74 		BUG();
75 	return wptr;
76 }
77 
78 static const struct amdgpu_ring_funcs mes_v12_0_ring_funcs = {
79 	.type = AMDGPU_RING_TYPE_MES,
80 	.align_mask = 1,
81 	.nop = 0,
82 	.support_64bit_ptrs = true,
83 	.get_rptr = mes_v12_0_ring_get_rptr,
84 	.get_wptr = mes_v12_0_ring_get_wptr,
85 	.set_wptr = mes_v12_0_ring_set_wptr,
86 	.insert_nop = amdgpu_ring_insert_nop,
87 };
88 
89 static const char *mes_v12_0_opcodes[] = {
90 	"SET_HW_RSRC",
91 	"SET_SCHEDULING_CONFIG",
92 	"ADD_QUEUE",
93 	"REMOVE_QUEUE",
94 	"PERFORM_YIELD",
95 	"SET_GANG_PRIORITY_LEVEL",
96 	"SUSPEND",
97 	"RESUME",
98 	"RESET",
99 	"SET_LOG_BUFFER",
100 	"CHANGE_GANG_PRORITY",
101 	"QUERY_SCHEDULER_STATUS",
102 	"unused",
103 	"SET_DEBUG_VMID",
104 	"MISC",
105 	"UPDATE_ROOT_PAGE_TABLE",
106 	"AMD_LOG",
107 	"SET_SE_MODE",
108 	"SET_GANG_SUBMIT",
109 	"SET_HW_RSRC_1",
110 };
111 
112 static const char *mes_v12_0_misc_opcodes[] = {
113 	"WRITE_REG",
114 	"INV_GART",
115 	"QUERY_STATUS",
116 	"READ_REG",
117 	"WAIT_REG_MEM",
118 	"SET_SHADER_DEBUGGER",
119 	"NOTIFY_WORK_ON_UNMAPPED_QUEUE",
120 	"NOTIFY_TO_UNMAP_PROCESSES",
121 };
122 
123 static const char *mes_v12_0_get_op_string(union MESAPI__MISC *x_pkt)
124 {
125 	const char *op_str = NULL;
126 
127 	if (x_pkt->header.opcode < ARRAY_SIZE(mes_v12_0_opcodes))
128 		op_str = mes_v12_0_opcodes[x_pkt->header.opcode];
129 
130 	return op_str;
131 }
132 
133 static const char *mes_v12_0_get_misc_op_string(union MESAPI__MISC *x_pkt)
134 {
135 	const char *op_str = NULL;
136 
137 	if ((x_pkt->header.opcode == MES_SCH_API_MISC) &&
138 	    (x_pkt->opcode < ARRAY_SIZE(mes_v12_0_misc_opcodes)))
139 		op_str = mes_v12_0_misc_opcodes[x_pkt->opcode];
140 
141 	return op_str;
142 }
143 
144 static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
145 						    void *pkt, int size,
146 						    int api_status_off)
147 {
148 	union MESAPI__QUERY_MES_STATUS mes_status_pkt;
149 	signed long timeout = 3000000; /* 3000 ms */
150 	struct amdgpu_device *adev = mes->adev;
151 	struct amdgpu_ring *ring = &mes->ring;
152 	struct MES_API_STATUS *api_status;
153 	union MESAPI__MISC *x_pkt = pkt;
154 	const char *op_str, *misc_op_str;
155 	unsigned long flags;
156 	u64 status_gpu_addr;
157 	u32 status_offset;
158 	u64 *status_ptr;
159 	signed long r;
160 	int ret;
161 
162 	if (x_pkt->header.opcode >= MES_SCH_API_MAX)
163 		return -EINVAL;
164 
165 	if (amdgpu_emu_mode) {
166 		timeout *= 100;
167 	} else if (amdgpu_sriov_vf(adev)) {
168 		/* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */
169 		timeout = 15 * 600 * 1000;
170 	}
171 
172 	ret = amdgpu_device_wb_get(adev, &status_offset);
173 	if (ret)
174 		return ret;
175 
176 	status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4);
177 	status_ptr = (u64 *)&adev->wb.wb[status_offset];
178 	*status_ptr = 0;
179 
180 	spin_lock_irqsave(&mes->ring_lock, flags);
181 	r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
182 	if (r)
183 		goto error_unlock_free;
184 
185 	api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
186 	api_status->api_completion_fence_addr = status_gpu_addr;
187 	api_status->api_completion_fence_value = 1;
188 
189 	amdgpu_ring_write_multiple(ring, pkt, size / 4);
190 
191 	memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
192 	mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
193 	mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
194 	mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
195 	mes_status_pkt.api_status.api_completion_fence_addr =
196 		ring->fence_drv.gpu_addr;
197 	mes_status_pkt.api_status.api_completion_fence_value =
198 		++ring->fence_drv.sync_seq;
199 
200 	amdgpu_ring_write_multiple(ring, &mes_status_pkt,
201 				   sizeof(mes_status_pkt) / 4);
202 
203 	amdgpu_ring_commit(ring);
204 	spin_unlock_irqrestore(&mes->ring_lock, flags);
205 
206 	op_str = mes_v12_0_get_op_string(x_pkt);
207 	misc_op_str = mes_v12_0_get_misc_op_string(x_pkt);
208 
209 	if (misc_op_str)
210 		dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str,
211 			misc_op_str);
212 	else if (op_str)
213 		dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
214 	else
215 		dev_dbg(adev->dev, "MES msg=%d was emitted\n",
216 			x_pkt->header.opcode);
217 
218 	r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout);
219 	if (r < 1 || !*status_ptr) {
220 
221 		if (misc_op_str)
222 			dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
223 				op_str, misc_op_str);
224 		else if (op_str)
225 			dev_err(adev->dev, "MES failed to respond to msg=%s\n",
226 				op_str);
227 		else
228 			dev_err(adev->dev, "MES failed to respond to msg=%d\n",
229 				x_pkt->header.opcode);
230 
231 		while (halt_if_hws_hang)
232 			schedule();
233 
234 		r = -ETIMEDOUT;
235 		goto error_wb_free;
236 	}
237 
238 	amdgpu_device_wb_free(adev, status_offset);
239 	return 0;
240 
241 error_unlock_free:
242 	spin_unlock_irqrestore(&mes->ring_lock, flags);
243 
244 error_wb_free:
245 	amdgpu_device_wb_free(adev, status_offset);
246 	return r;
247 }
248 
249 static int convert_to_mes_queue_type(int queue_type)
250 {
251 	if (queue_type == AMDGPU_RING_TYPE_GFX)
252 		return MES_QUEUE_TYPE_GFX;
253 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
254 		return MES_QUEUE_TYPE_COMPUTE;
255 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
256 		return MES_QUEUE_TYPE_SDMA;
257 	else
258 		BUG();
259 	return -1;
260 }
261 
262 static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes,
263 				  struct mes_add_queue_input *input)
264 {
265 	struct amdgpu_device *adev = mes->adev;
266 	union MESAPI__ADD_QUEUE mes_add_queue_pkt;
267 	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
268 	uint32_t vm_cntx_cntl = hub->vm_cntx_cntl;
269 
270 	memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
271 
272 	mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
273 	mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE;
274 	mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
275 
276 	mes_add_queue_pkt.process_id = input->process_id;
277 	mes_add_queue_pkt.page_table_base_addr = input->page_table_base_addr;
278 	mes_add_queue_pkt.process_va_start = input->process_va_start;
279 	mes_add_queue_pkt.process_va_end = input->process_va_end;
280 	mes_add_queue_pkt.process_quantum = input->process_quantum;
281 	mes_add_queue_pkt.process_context_addr = input->process_context_addr;
282 	mes_add_queue_pkt.gang_quantum = input->gang_quantum;
283 	mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
284 	mes_add_queue_pkt.inprocess_gang_priority =
285 		input->inprocess_gang_priority;
286 	mes_add_queue_pkt.gang_global_priority_level =
287 		input->gang_global_priority_level;
288 	mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
289 	mes_add_queue_pkt.mqd_addr = input->mqd_addr;
290 
291 	mes_add_queue_pkt.wptr_addr = input->wptr_mc_addr;
292 
293 	mes_add_queue_pkt.queue_type =
294 		convert_to_mes_queue_type(input->queue_type);
295 	mes_add_queue_pkt.paging = input->paging;
296 	mes_add_queue_pkt.vm_context_cntl = vm_cntx_cntl;
297 	mes_add_queue_pkt.gws_base = input->gws_base;
298 	mes_add_queue_pkt.gws_size = input->gws_size;
299 	mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
300 	mes_add_queue_pkt.tma_addr = input->tma_addr;
301 	mes_add_queue_pkt.trap_en = input->trap_en;
302 	mes_add_queue_pkt.skip_process_ctx_clear = input->skip_process_ctx_clear;
303 	mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
304 
305 	/* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
306 	mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
307 	mes_add_queue_pkt.gds_size = input->queue_size;
308 
309 	/* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
310 	mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
311 	mes_add_queue_pkt.gds_size = input->queue_size;
312 
313 	return mes_v12_0_submit_pkt_and_poll_completion(mes,
314 			&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
315 			offsetof(union MESAPI__ADD_QUEUE, api_status));
316 }
317 
318 static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
319 				     struct mes_remove_queue_input *input)
320 {
321 	union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
322 
323 	memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
324 
325 	mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
326 	mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
327 	mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
328 
329 	mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
330 	mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
331 
332 	return mes_v12_0_submit_pkt_and_poll_completion(mes,
333 			&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
334 			offsetof(union MESAPI__REMOVE_QUEUE, api_status));
335 }
336 
337 static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
338 				      struct mes_map_legacy_queue_input *input)
339 {
340 	union MESAPI__ADD_QUEUE mes_add_queue_pkt;
341 
342 	memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
343 
344 	mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
345 	mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE;
346 	mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
347 
348 	mes_add_queue_pkt.pipe_id = input->pipe_id;
349 	mes_add_queue_pkt.queue_id = input->queue_id;
350 	mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
351 	mes_add_queue_pkt.mqd_addr = input->mqd_addr;
352 	mes_add_queue_pkt.wptr_addr = input->wptr_addr;
353 	mes_add_queue_pkt.queue_type =
354 		convert_to_mes_queue_type(input->queue_type);
355 	mes_add_queue_pkt.map_legacy_kq = 1;
356 
357 	return mes_v12_0_submit_pkt_and_poll_completion(mes,
358 			&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
359 			offsetof(union MESAPI__ADD_QUEUE, api_status));
360 }
361 
362 static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes,
363 			struct mes_unmap_legacy_queue_input *input)
364 {
365 	union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
366 
367 	memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
368 
369 	mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
370 	mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
371 	mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
372 
373 	mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
374 	mes_remove_queue_pkt.gang_context_addr = 0;
375 
376 	mes_remove_queue_pkt.pipe_id = input->pipe_id;
377 	mes_remove_queue_pkt.queue_id = input->queue_id;
378 
379 	if (input->action == PREEMPT_QUEUES_NO_UNMAP) {
380 		mes_remove_queue_pkt.preempt_legacy_gfx_queue = 1;
381 		mes_remove_queue_pkt.tf_addr = input->trail_fence_addr;
382 		mes_remove_queue_pkt.tf_data =
383 			lower_32_bits(input->trail_fence_data);
384 	} else {
385 		mes_remove_queue_pkt.unmap_legacy_queue = 1;
386 		mes_remove_queue_pkt.queue_type =
387 			convert_to_mes_queue_type(input->queue_type);
388 	}
389 
390 	return mes_v12_0_submit_pkt_and_poll_completion(mes,
391 			&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
392 			offsetof(union MESAPI__REMOVE_QUEUE, api_status));
393 }
394 
395 static int mes_v12_0_suspend_gang(struct amdgpu_mes *mes,
396 				  struct mes_suspend_gang_input *input)
397 {
398 	return 0;
399 }
400 
401 static int mes_v12_0_resume_gang(struct amdgpu_mes *mes,
402 				 struct mes_resume_gang_input *input)
403 {
404 	return 0;
405 }
406 
407 static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes)
408 {
409 	union MESAPI__QUERY_MES_STATUS mes_status_pkt;
410 
411 	memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
412 
413 	mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
414 	mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
415 	mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
416 
417 	return mes_v12_0_submit_pkt_and_poll_completion(mes,
418 			&mes_status_pkt, sizeof(mes_status_pkt),
419 			offsetof(union MESAPI__QUERY_MES_STATUS, api_status));
420 }
421 
422 static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
423 			     struct mes_misc_op_input *input)
424 {
425 	union MESAPI__MISC misc_pkt;
426 
427 	memset(&misc_pkt, 0, sizeof(misc_pkt));
428 
429 	misc_pkt.header.type = MES_API_TYPE_SCHEDULER;
430 	misc_pkt.header.opcode = MES_SCH_API_MISC;
431 	misc_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
432 
433 	switch (input->op) {
434 	case MES_MISC_OP_READ_REG:
435 		misc_pkt.opcode = MESAPI_MISC__READ_REG;
436 		misc_pkt.read_reg.reg_offset = input->read_reg.reg_offset;
437 		misc_pkt.read_reg.buffer_addr = input->read_reg.buffer_addr;
438 		break;
439 	case MES_MISC_OP_WRITE_REG:
440 		misc_pkt.opcode = MESAPI_MISC__WRITE_REG;
441 		misc_pkt.write_reg.reg_offset = input->write_reg.reg_offset;
442 		misc_pkt.write_reg.reg_value = input->write_reg.reg_value;
443 		break;
444 	case MES_MISC_OP_WRM_REG_WAIT:
445 		misc_pkt.opcode = MESAPI_MISC__WAIT_REG_MEM;
446 		misc_pkt.wait_reg_mem.op = WRM_OPERATION__WAIT_REG_MEM;
447 		misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref;
448 		misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask;
449 		misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0;
450 		misc_pkt.wait_reg_mem.reg_offset2 = 0;
451 		break;
452 	case MES_MISC_OP_WRM_REG_WR_WAIT:
453 		misc_pkt.opcode = MESAPI_MISC__WAIT_REG_MEM;
454 		misc_pkt.wait_reg_mem.op = WRM_OPERATION__WR_WAIT_WR_REG;
455 		misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref;
456 		misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask;
457 		misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0;
458 		misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1;
459 		break;
460 	case MES_MISC_OP_SET_SHADER_DEBUGGER:
461 		misc_pkt.opcode = MESAPI_MISC__SET_SHADER_DEBUGGER;
462 		misc_pkt.set_shader_debugger.process_context_addr =
463 				input->set_shader_debugger.process_context_addr;
464 		misc_pkt.set_shader_debugger.flags.u32all =
465 				input->set_shader_debugger.flags.u32all;
466 		misc_pkt.set_shader_debugger.spi_gdbg_per_vmid_cntl =
467 				input->set_shader_debugger.spi_gdbg_per_vmid_cntl;
468 		memcpy(misc_pkt.set_shader_debugger.tcp_watch_cntl,
469 				input->set_shader_debugger.tcp_watch_cntl,
470 				sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl));
471 		misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en;
472 		break;
473 	default:
474 		DRM_ERROR("unsupported misc op (%d) \n", input->op);
475 		return -EINVAL;
476 	}
477 
478 	return mes_v12_0_submit_pkt_and_poll_completion(mes,
479 			&misc_pkt, sizeof(misc_pkt),
480 			offsetof(union MESAPI__MISC, api_status));
481 }
482 
483 static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes)
484 {
485 	union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_1_pkt;
486 
487 	memset(&mes_set_hw_res_1_pkt, 0, sizeof(mes_set_hw_res_1_pkt));
488 
489 	mes_set_hw_res_1_pkt.header.type = MES_API_TYPE_SCHEDULER;
490 	mes_set_hw_res_1_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1;
491 	mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
492 	mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 100;
493 
494 	return mes_v12_0_submit_pkt_and_poll_completion(mes,
495 			&mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt),
496 			offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
497 }
498 
499 static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes)
500 {
501 	int i;
502 	struct amdgpu_device *adev = mes->adev;
503 	union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt;
504 
505 	memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
506 
507 	mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER;
508 	mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC;
509 	mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
510 
511 	mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
512 	mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
513 	mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
514 	mes_set_hw_res_pkt.paging_vmid = 0;
515 	mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr;
516 	mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
517 		mes->query_status_fence_gpu_addr;
518 
519 	for (i = 0; i < MAX_COMPUTE_PIPES; i++)
520 		mes_set_hw_res_pkt.compute_hqd_mask[i] =
521 			mes->compute_hqd_mask[i];
522 
523 	for (i = 0; i < MAX_GFX_PIPES; i++)
524 		mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
525 
526 	for (i = 0; i < MAX_SDMA_PIPES; i++)
527 		mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
528 
529 	for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
530 		mes_set_hw_res_pkt.aggregated_doorbells[i] =
531 			mes->aggregated_doorbells[i];
532 
533 	for (i = 0; i < 5; i++) {
534 		mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i];
535 		mes_set_hw_res_pkt.mmhub_base[i] =
536 				adev->reg_offset[MMHUB_HWIP][0][i];
537 		mes_set_hw_res_pkt.osssys_base[i] =
538 		adev->reg_offset[OSSSYS_HWIP][0][i];
539 	}
540 
541 	mes_set_hw_res_pkt.disable_reset = 1;
542 	mes_set_hw_res_pkt.disable_mes_log = 1;
543 	mes_set_hw_res_pkt.use_different_vmid_compute = 1;
544 	mes_set_hw_res_pkt.enable_reg_active_poll = 1;
545 
546 	/*
547 	 * Keep oversubscribe timer for sdma . When we have unmapped doorbell
548 	 * handling support, other queue will not use the oversubscribe timer.
549 	 * handling  mode - 0: disabled; 1: basic version; 2: basic+ version
550 	 */
551 	mes_set_hw_res_pkt.oversubscription_timer = 50;
552 	mes_set_hw_res_pkt.unmapped_doorbell_handling = 1;
553 
554 	if (amdgpu_mes_log_enable) {
555 		mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
556 		mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
557 	}
558 
559 	return mes_v12_0_submit_pkt_and_poll_completion(mes,
560 			&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
561 			offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
562 }
563 
564 static void mes_v12_0_init_aggregated_doorbell(struct amdgpu_mes *mes)
565 {
566 	struct amdgpu_device *adev = mes->adev;
567 	uint32_t data;
568 
569 	data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL1);
570 	data &= ~(CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET_MASK |
571 		  CP_MES_DOORBELL_CONTROL1__DOORBELL_EN_MASK |
572 		  CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT_MASK);
573 	data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_LOW] <<
574 		CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET__SHIFT;
575 	data |= 1 << CP_MES_DOORBELL_CONTROL1__DOORBELL_EN__SHIFT;
576 	WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL1, data);
577 
578 	data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL2);
579 	data &= ~(CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET_MASK |
580 		  CP_MES_DOORBELL_CONTROL2__DOORBELL_EN_MASK |
581 		  CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT_MASK);
582 	data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_NORMAL] <<
583 		CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET__SHIFT;
584 	data |= 1 << CP_MES_DOORBELL_CONTROL2__DOORBELL_EN__SHIFT;
585 	WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL2, data);
586 
587 	data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL3);
588 	data &= ~(CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET_MASK |
589 		  CP_MES_DOORBELL_CONTROL3__DOORBELL_EN_MASK |
590 		  CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT_MASK);
591 	data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_MEDIUM] <<
592 		CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET__SHIFT;
593 	data |= 1 << CP_MES_DOORBELL_CONTROL3__DOORBELL_EN__SHIFT;
594 	WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL3, data);
595 
596 	data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL4);
597 	data &= ~(CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET_MASK |
598 		  CP_MES_DOORBELL_CONTROL4__DOORBELL_EN_MASK |
599 		  CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT_MASK);
600 	data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_HIGH] <<
601 		CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET__SHIFT;
602 	data |= 1 << CP_MES_DOORBELL_CONTROL4__DOORBELL_EN__SHIFT;
603 	WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL4, data);
604 
605 	data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL5);
606 	data &= ~(CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET_MASK |
607 		  CP_MES_DOORBELL_CONTROL5__DOORBELL_EN_MASK |
608 		  CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT_MASK);
609 	data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_REALTIME] <<
610 		CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET__SHIFT;
611 	data |= 1 << CP_MES_DOORBELL_CONTROL5__DOORBELL_EN__SHIFT;
612 	WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL5, data);
613 
614 	data = 1 << CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT;
615 	WREG32_SOC15(GC, 0, regCP_HQD_GFX_CONTROL, data);
616 }
617 
618 
619 static void mes_v12_0_enable_unmapped_doorbell_handling(
620 		struct amdgpu_mes *mes, bool enable)
621 {
622 	struct amdgpu_device *adev = mes->adev;
623 	uint32_t data = RREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL);
624 
625 	/*
626 	 * The default PROC_LSB settng is 0xc which means doorbell
627 	 * addr[16:12] gives the doorbell page number. For kfd, each
628 	 * process will use 2 pages of doorbell, we need to change the
629 	 * setting to 0xd
630 	 */
631 	data &= ~CP_UNMAPPED_DOORBELL__PROC_LSB_MASK;
632 	data |= 0xd <<  CP_UNMAPPED_DOORBELL__PROC_LSB__SHIFT;
633 
634 	data |= (enable ? 1 : 0) << CP_UNMAPPED_DOORBELL__ENABLE__SHIFT;
635 
636 	WREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL, data);
637 }
638 
639 static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
640 	.add_hw_queue = mes_v12_0_add_hw_queue,
641 	.remove_hw_queue = mes_v12_0_remove_hw_queue,
642 	.map_legacy_queue = mes_v12_0_map_legacy_queue,
643 	.unmap_legacy_queue = mes_v12_0_unmap_legacy_queue,
644 	.suspend_gang = mes_v12_0_suspend_gang,
645 	.resume_gang = mes_v12_0_resume_gang,
646 	.misc_op = mes_v12_0_misc_op,
647 };
648 
649 static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
650 					   enum admgpu_mes_pipe pipe)
651 {
652 	int r;
653 	const struct mes_firmware_header_v1_0 *mes_hdr;
654 	const __le32 *fw_data;
655 	unsigned fw_size;
656 
657 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
658 		adev->mes.fw[pipe]->data;
659 
660 	fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
661 		   le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
662 	fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
663 
664 	r = amdgpu_bo_create_reserved(adev, fw_size,
665 				      PAGE_SIZE,
666 				      AMDGPU_GEM_DOMAIN_VRAM,
667 				      &adev->mes.ucode_fw_obj[pipe],
668 				      &adev->mes.ucode_fw_gpu_addr[pipe],
669 				      (void **)&adev->mes.ucode_fw_ptr[pipe]);
670 	if (r) {
671 		dev_err(adev->dev, "(%d) failed to create mes fw bo\n", r);
672 		return r;
673 	}
674 
675 	memcpy(adev->mes.ucode_fw_ptr[pipe], fw_data, fw_size);
676 
677 	amdgpu_bo_kunmap(adev->mes.ucode_fw_obj[pipe]);
678 	amdgpu_bo_unreserve(adev->mes.ucode_fw_obj[pipe]);
679 
680 	return 0;
681 }
682 
683 static int mes_v12_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
684 						enum admgpu_mes_pipe pipe)
685 {
686 	int r;
687 	const struct mes_firmware_header_v1_0 *mes_hdr;
688 	const __le32 *fw_data;
689 	unsigned fw_size;
690 
691 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
692 		adev->mes.fw[pipe]->data;
693 
694 	fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
695 		   le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
696 	fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
697 
698 	r = amdgpu_bo_create_reserved(adev, fw_size,
699 				      64 * 1024,
700 				      AMDGPU_GEM_DOMAIN_VRAM,
701 				      &adev->mes.data_fw_obj[pipe],
702 				      &adev->mes.data_fw_gpu_addr[pipe],
703 				      (void **)&adev->mes.data_fw_ptr[pipe]);
704 	if (r) {
705 		dev_err(adev->dev, "(%d) failed to create mes data fw bo\n", r);
706 		return r;
707 	}
708 
709 	memcpy(adev->mes.data_fw_ptr[pipe], fw_data, fw_size);
710 
711 	amdgpu_bo_kunmap(adev->mes.data_fw_obj[pipe]);
712 	amdgpu_bo_unreserve(adev->mes.data_fw_obj[pipe]);
713 
714 	return 0;
715 }
716 
717 static void mes_v12_0_free_ucode_buffers(struct amdgpu_device *adev,
718 					 enum admgpu_mes_pipe pipe)
719 {
720 	amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe],
721 			      &adev->mes.data_fw_gpu_addr[pipe],
722 			      (void **)&adev->mes.data_fw_ptr[pipe]);
723 
724 	amdgpu_bo_free_kernel(&adev->mes.ucode_fw_obj[pipe],
725 			      &adev->mes.ucode_fw_gpu_addr[pipe],
726 			      (void **)&adev->mes.ucode_fw_ptr[pipe]);
727 }
728 
729 static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable)
730 {
731 	uint64_t ucode_addr;
732 	uint32_t pipe, data = 0;
733 
734 	if (enable) {
735 		data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
736 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
737 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET,
738 		       (!adev->enable_uni_mes && adev->enable_mes_kiq) ? 1 : 0);
739 		WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
740 
741 		mutex_lock(&adev->srbm_mutex);
742 		for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
743 			if ((!adev->enable_mes_kiq || adev->enable_uni_mes) &&
744 			    pipe == AMDGPU_MES_KIQ_PIPE)
745 				continue;
746 
747 			soc21_grbm_select(adev, 3, pipe, 0, 0);
748 
749 			ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
750 			WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
751 				     lower_32_bits(ucode_addr));
752 			WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
753 				     upper_32_bits(ucode_addr));
754 		}
755 		soc21_grbm_select(adev, 0, 0, 0, 0);
756 		mutex_unlock(&adev->srbm_mutex);
757 
758 		/* unhalt MES and activate pipe0 */
759 		data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
760 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE,
761 		       (!adev->enable_uni_mes && adev->enable_mes_kiq) ? 1 : 0);
762 		WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
763 
764 		if (amdgpu_emu_mode)
765 			msleep(100);
766 		else if (adev->enable_uni_mes)
767 			udelay(500);
768 		else
769 			udelay(50);
770 	} else {
771 		data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
772 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_ACTIVE, 0);
773 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 0);
774 		data = REG_SET_FIELD(data, CP_MES_CNTL,
775 				     MES_INVALIDATE_ICACHE, 1);
776 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
777 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET,
778 		       (!adev->enable_uni_mes && adev->enable_mes_kiq) ? 1 : 0);
779 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_HALT, 1);
780 		WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
781 	}
782 }
783 
784 static void mes_v12_0_set_ucode_start_addr(struct amdgpu_device *adev)
785 {
786 	uint64_t ucode_addr;
787 	int pipe;
788 
789 	mes_v12_0_enable(adev, false);
790 
791 	mutex_lock(&adev->srbm_mutex);
792 	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
793 		if ((!adev->enable_mes_kiq || adev->enable_uni_mes) &&
794 		    pipe == AMDGPU_MES_KIQ_PIPE)
795 			continue;
796 
797 		/* me=3, queue=0 */
798 		soc21_grbm_select(adev, 3, pipe, 0, 0);
799 
800 		/* set ucode start address */
801 		ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
802 		WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
803 				lower_32_bits(ucode_addr));
804 		WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
805 				upper_32_bits(ucode_addr));
806 
807 		soc21_grbm_select(adev, 0, 0, 0, 0);
808 	}
809 	mutex_unlock(&adev->srbm_mutex);
810 }
811 
812 /* This function is for backdoor MES firmware */
813 static int mes_v12_0_load_microcode(struct amdgpu_device *adev,
814 				    enum admgpu_mes_pipe pipe, bool prime_icache)
815 {
816 	int r;
817 	uint32_t data;
818 
819 	mes_v12_0_enable(adev, false);
820 
821 	if (!adev->mes.fw[pipe])
822 		return -EINVAL;
823 
824 	r = mes_v12_0_allocate_ucode_buffer(adev, pipe);
825 	if (r)
826 		return r;
827 
828 	r = mes_v12_0_allocate_ucode_data_buffer(adev, pipe);
829 	if (r) {
830 		mes_v12_0_free_ucode_buffers(adev, pipe);
831 		return r;
832 	}
833 
834 	mutex_lock(&adev->srbm_mutex);
835 	/* me=3, pipe=0, queue=0 */
836 	soc21_grbm_select(adev, 3, pipe, 0, 0);
837 
838 	WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_CNTL, 0);
839 
840 	/* set ucode fimrware address */
841 	WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_LO,
842 		     lower_32_bits(adev->mes.ucode_fw_gpu_addr[pipe]));
843 	WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_HI,
844 		     upper_32_bits(adev->mes.ucode_fw_gpu_addr[pipe]));
845 
846 	/* set ucode instruction cache boundary to 2M-1 */
847 	WREG32_SOC15(GC, 0, regCP_MES_MIBOUND_LO, 0x1FFFFF);
848 
849 	/* set ucode data firmware address */
850 	WREG32_SOC15(GC, 0, regCP_MES_MDBASE_LO,
851 		     lower_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
852 	WREG32_SOC15(GC, 0, regCP_MES_MDBASE_HI,
853 		     upper_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
854 
855 	/* Set data cache boundary CP_MES_MDBOUND_LO */
856 	WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x7FFFF);
857 
858 	if (prime_icache) {
859 		/* invalidate ICACHE */
860 		data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
861 		data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
862 		data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
863 		WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
864 
865 		/* prime the ICACHE. */
866 		data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
867 		data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
868 		WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
869 	}
870 
871 	soc21_grbm_select(adev, 0, 0, 0, 0);
872 	mutex_unlock(&adev->srbm_mutex);
873 
874 	return 0;
875 }
876 
877 static int mes_v12_0_allocate_eop_buf(struct amdgpu_device *adev,
878 				      enum admgpu_mes_pipe pipe)
879 {
880 	int r;
881 	u32 *eop;
882 
883 	r = amdgpu_bo_create_reserved(adev, MES_EOP_SIZE, PAGE_SIZE,
884 			      AMDGPU_GEM_DOMAIN_GTT,
885 			      &adev->mes.eop_gpu_obj[pipe],
886 			      &adev->mes.eop_gpu_addr[pipe],
887 			      (void **)&eop);
888 	if (r) {
889 		dev_warn(adev->dev, "(%d) create EOP bo failed\n", r);
890 		return r;
891 	}
892 
893 	memset(eop, 0,
894 	       adev->mes.eop_gpu_obj[pipe]->tbo.base.size);
895 
896 	amdgpu_bo_kunmap(adev->mes.eop_gpu_obj[pipe]);
897 	amdgpu_bo_unreserve(adev->mes.eop_gpu_obj[pipe]);
898 
899 	return 0;
900 }
901 
902 static int mes_v12_0_mqd_init(struct amdgpu_ring *ring)
903 {
904 	struct v12_compute_mqd *mqd = ring->mqd_ptr;
905 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
906 	uint32_t tmp;
907 
908 	mqd->header = 0xC0310800;
909 	mqd->compute_pipelinestat_enable = 0x00000001;
910 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
911 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
912 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
913 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
914 	mqd->compute_misc_reserved = 0x00000007;
915 
916 	eop_base_addr = ring->eop_gpu_addr >> 8;
917 
918 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
919 	tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
920 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
921 			(order_base_2(MES_EOP_SIZE / 4) - 1));
922 
923 	mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_base_addr);
924 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
925 	mqd->cp_hqd_eop_control = tmp;
926 
927 	/* disable the queue if it's active */
928 	ring->wptr = 0;
929 	mqd->cp_hqd_pq_rptr = 0;
930 	mqd->cp_hqd_pq_wptr_lo = 0;
931 	mqd->cp_hqd_pq_wptr_hi = 0;
932 
933 	/* set the pointer to the MQD */
934 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
935 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
936 
937 	/* set MQD vmid to 0 */
938 	tmp = regCP_MQD_CONTROL_DEFAULT;
939 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
940 	mqd->cp_mqd_control = tmp;
941 
942 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
943 	hqd_gpu_addr = ring->gpu_addr >> 8;
944 	mqd->cp_hqd_pq_base_lo = lower_32_bits(hqd_gpu_addr);
945 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
946 
947 	/* set the wb address whether it's enabled or not */
948 	wb_gpu_addr = ring->rptr_gpu_addr;
949 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
950 	mqd->cp_hqd_pq_rptr_report_addr_hi =
951 		upper_32_bits(wb_gpu_addr) & 0xffff;
952 
953 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
954 	wb_gpu_addr = ring->wptr_gpu_addr;
955 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffff8;
956 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
957 
958 	/* set up the HQD, this is similar to CP_RB0_CNTL */
959 	tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
960 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
961 			    (order_base_2(ring->ring_size / 4) - 1));
962 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
963 			    ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
964 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
965 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
966 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
967 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
968 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, NO_UPDATE_RPTR, 1);
969 	mqd->cp_hqd_pq_control = tmp;
970 
971 	/* enable doorbell */
972 	tmp = 0;
973 	if (ring->use_doorbell) {
974 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
975 				    DOORBELL_OFFSET, ring->doorbell_index);
976 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
977 				    DOORBELL_EN, 1);
978 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
979 				    DOORBELL_SOURCE, 0);
980 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
981 				    DOORBELL_HIT, 0);
982 	} else {
983 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
984 				    DOORBELL_EN, 0);
985 	}
986 	mqd->cp_hqd_pq_doorbell_control = tmp;
987 
988 	mqd->cp_hqd_vmid = 0;
989 	/* activate the queue */
990 	mqd->cp_hqd_active = 1;
991 
992 	tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
993 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE,
994 			    PRELOAD_SIZE, 0x55);
995 	mqd->cp_hqd_persistent_state = tmp;
996 
997 	mqd->cp_hqd_ib_control = regCP_HQD_IB_CONTROL_DEFAULT;
998 	mqd->cp_hqd_iq_timer = regCP_HQD_IQ_TIMER_DEFAULT;
999 	mqd->cp_hqd_quantum = regCP_HQD_QUANTUM_DEFAULT;
1000 
1001 	/*
1002 	 * Set CP_HQD_GFX_CONTROL.DB_UPDATED_MSG_EN[15] to enable unmapped
1003 	 * doorbell handling. This is a reserved CP internal register can
1004 	 * not be accesss by others
1005 	 */
1006 	mqd->reserved_184 = BIT(15);
1007 
1008 	return 0;
1009 }
1010 
1011 static void mes_v12_0_queue_init_register(struct amdgpu_ring *ring)
1012 {
1013 	struct v12_compute_mqd *mqd = ring->mqd_ptr;
1014 	struct amdgpu_device *adev = ring->adev;
1015 	uint32_t data = 0;
1016 
1017 	mutex_lock(&adev->srbm_mutex);
1018 	soc21_grbm_select(adev, 3, ring->pipe, 0, 0);
1019 
1020 	/* set CP_HQD_VMID.VMID = 0. */
1021 	data = RREG32_SOC15(GC, 0, regCP_HQD_VMID);
1022 	data = REG_SET_FIELD(data, CP_HQD_VMID, VMID, 0);
1023 	WREG32_SOC15(GC, 0, regCP_HQD_VMID, data);
1024 
1025 	/* set CP_HQD_PQ_DOORBELL_CONTROL.DOORBELL_EN=0 */
1026 	data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
1027 	data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
1028 			     DOORBELL_EN, 0);
1029 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
1030 
1031 	/* set CP_MQD_BASE_ADDR/HI with the MQD base address */
1032 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
1033 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
1034 
1035 	/* set CP_MQD_CONTROL.VMID=0 */
1036 	data = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
1037 	data = REG_SET_FIELD(data, CP_MQD_CONTROL, VMID, 0);
1038 	WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 0);
1039 
1040 	/* set CP_HQD_PQ_BASE/HI with the ring buffer base address */
1041 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
1042 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
1043 
1044 	/* set CP_HQD_PQ_RPTR_REPORT_ADDR/HI */
1045 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
1046 		     mqd->cp_hqd_pq_rptr_report_addr_lo);
1047 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
1048 		     mqd->cp_hqd_pq_rptr_report_addr_hi);
1049 
1050 	/* set CP_HQD_PQ_CONTROL */
1051 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control);
1052 
1053 	/* set CP_HQD_PQ_WPTR_POLL_ADDR/HI */
1054 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
1055 		     mqd->cp_hqd_pq_wptr_poll_addr_lo);
1056 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
1057 		     mqd->cp_hqd_pq_wptr_poll_addr_hi);
1058 
1059 	/* set CP_HQD_PQ_DOORBELL_CONTROL */
1060 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
1061 		     mqd->cp_hqd_pq_doorbell_control);
1062 
1063 	/* set CP_HQD_PERSISTENT_STATE.PRELOAD_SIZE=0x53 */
1064 	WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state);
1065 
1066 	/* set CP_HQD_ACTIVE.ACTIVE=1 */
1067 	WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, mqd->cp_hqd_active);
1068 
1069 	soc21_grbm_select(adev, 0, 0, 0, 0);
1070 	mutex_unlock(&adev->srbm_mutex);
1071 }
1072 
1073 static int mes_v12_0_kiq_enable_queue(struct amdgpu_device *adev)
1074 {
1075 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
1076 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
1077 	int r;
1078 
1079 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
1080 		return -EINVAL;
1081 
1082 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
1083 	if (r) {
1084 		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
1085 		return r;
1086 	}
1087 
1088 	kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
1089 
1090 	r = amdgpu_ring_test_ring(kiq_ring);
1091 	if (r) {
1092 		DRM_ERROR("kfq enable failed\n");
1093 		kiq_ring->sched.ready = false;
1094 	}
1095 	return r;
1096 }
1097 
1098 static int mes_v12_0_queue_init(struct amdgpu_device *adev,
1099 				enum admgpu_mes_pipe pipe)
1100 {
1101 	struct amdgpu_ring *ring;
1102 	int r;
1103 
1104 	if (pipe == AMDGPU_MES_KIQ_PIPE)
1105 		ring = &adev->gfx.kiq[0].ring;
1106 	else if (pipe == AMDGPU_MES_SCHED_PIPE)
1107 		ring = &adev->mes.ring;
1108 	else
1109 		BUG();
1110 
1111 	if ((pipe == AMDGPU_MES_SCHED_PIPE) &&
1112 	    (amdgpu_in_reset(adev) || adev->in_suspend)) {
1113 		*(ring->wptr_cpu_addr) = 0;
1114 		*(ring->rptr_cpu_addr) = 0;
1115 		amdgpu_ring_clear_ring(ring);
1116 	}
1117 
1118 	r = mes_v12_0_mqd_init(ring);
1119 	if (r)
1120 		return r;
1121 
1122 	if (pipe == AMDGPU_MES_SCHED_PIPE) {
1123 		if (adev->enable_uni_mes) {
1124 			mes_v12_0_queue_init_register(ring);
1125 		} else {
1126 			r = mes_v12_0_kiq_enable_queue(adev);
1127 			if (r)
1128 				return r;
1129 		}
1130 	} else {
1131 		mes_v12_0_queue_init_register(ring);
1132 	}
1133 
1134 	/* get MES scheduler/KIQ versions */
1135 	mutex_lock(&adev->srbm_mutex);
1136 	soc21_grbm_select(adev, 3, pipe, 0, 0);
1137 
1138 	if (pipe == AMDGPU_MES_SCHED_PIPE)
1139 		adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
1140 	else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
1141 		adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
1142 
1143 	soc21_grbm_select(adev, 0, 0, 0, 0);
1144 	mutex_unlock(&adev->srbm_mutex);
1145 
1146 	return 0;
1147 }
1148 
1149 static int mes_v12_0_ring_init(struct amdgpu_device *adev)
1150 {
1151 	struct amdgpu_ring *ring;
1152 
1153 	ring = &adev->mes.ring;
1154 
1155 	ring->funcs = &mes_v12_0_ring_funcs;
1156 
1157 	ring->me = 3;
1158 	ring->pipe = 0;
1159 	ring->queue = 0;
1160 
1161 	ring->ring_obj = NULL;
1162 	ring->use_doorbell = true;
1163 	ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1;
1164 	ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_SCHED_PIPE];
1165 	ring->no_scheduler = true;
1166 	sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1167 
1168 	return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1169 				AMDGPU_RING_PRIO_DEFAULT, NULL);
1170 }
1171 
1172 static int mes_v12_0_kiq_ring_init(struct amdgpu_device *adev)
1173 {
1174 	struct amdgpu_ring *ring;
1175 
1176 	spin_lock_init(&adev->gfx.kiq[0].ring_lock);
1177 
1178 	ring = &adev->gfx.kiq[0].ring;
1179 
1180 	ring->me = 3;
1181 	ring->pipe = adev->enable_uni_mes ? 0 : 1;
1182 	ring->queue = 0;
1183 
1184 	ring->adev = NULL;
1185 	ring->ring_obj = NULL;
1186 	ring->use_doorbell = true;
1187 	ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1;
1188 	ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_KIQ_PIPE];
1189 	ring->no_scheduler = true;
1190 	sprintf(ring->name, "mes_kiq_%d.%d.%d",
1191 		ring->me, ring->pipe, ring->queue);
1192 
1193 	return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1194 				AMDGPU_RING_PRIO_DEFAULT, NULL);
1195 }
1196 
1197 static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev,
1198 				 enum admgpu_mes_pipe pipe)
1199 {
1200 	int r, mqd_size = sizeof(struct v12_compute_mqd);
1201 	struct amdgpu_ring *ring;
1202 
1203 	if (pipe == AMDGPU_MES_KIQ_PIPE)
1204 		ring = &adev->gfx.kiq[0].ring;
1205 	else if (pipe == AMDGPU_MES_SCHED_PIPE)
1206 		ring = &adev->mes.ring;
1207 	else
1208 		BUG();
1209 
1210 	if (ring->mqd_obj)
1211 		return 0;
1212 
1213 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
1214 				    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
1215 				    &ring->mqd_gpu_addr, &ring->mqd_ptr);
1216 	if (r) {
1217 		dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
1218 		return r;
1219 	}
1220 
1221 	memset(ring->mqd_ptr, 0, mqd_size);
1222 
1223 	/* prepare MQD backup */
1224 	adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL);
1225 	if (!adev->mes.mqd_backup[pipe])
1226 		dev_warn(adev->dev,
1227 			 "no memory to create MQD backup for ring %s\n",
1228 			 ring->name);
1229 
1230 	return 0;
1231 }
1232 
1233 static int mes_v12_0_sw_init(void *handle)
1234 {
1235 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1236 	int pipe, r;
1237 
1238 	adev->mes.funcs = &mes_v12_0_funcs;
1239 	adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init;
1240 	adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
1241 
1242 	adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
1243 
1244 	r = amdgpu_mes_init(adev);
1245 	if (r)
1246 		return r;
1247 
1248 	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1249 		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
1250 			continue;
1251 
1252 		r = mes_v12_0_allocate_eop_buf(adev, pipe);
1253 		if (r)
1254 			return r;
1255 
1256 		r = mes_v12_0_mqd_sw_init(adev, pipe);
1257 		if (r)
1258 			return r;
1259 	}
1260 
1261 	if (adev->enable_mes_kiq) {
1262 		r = mes_v12_0_kiq_ring_init(adev);
1263 		if (r)
1264 			return r;
1265 	}
1266 
1267 	r = mes_v12_0_ring_init(adev);
1268 	if (r)
1269 		return r;
1270 
1271 	return 0;
1272 }
1273 
1274 static int mes_v12_0_sw_fini(void *handle)
1275 {
1276 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1277 	int pipe;
1278 
1279 	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
1280 	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
1281 
1282 	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1283 		kfree(adev->mes.mqd_backup[pipe]);
1284 
1285 		amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
1286 				      &adev->mes.eop_gpu_addr[pipe],
1287 				      NULL);
1288 		amdgpu_ucode_release(&adev->mes.fw[pipe]);
1289 	}
1290 
1291 	amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
1292 			      &adev->gfx.kiq[0].ring.mqd_gpu_addr,
1293 			      &adev->gfx.kiq[0].ring.mqd_ptr);
1294 
1295 	amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
1296 			      &adev->mes.ring.mqd_gpu_addr,
1297 			      &adev->mes.ring.mqd_ptr);
1298 
1299 	amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
1300 	amdgpu_ring_fini(&adev->mes.ring);
1301 
1302 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1303 		mes_v12_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
1304 		mes_v12_0_free_ucode_buffers(adev, AMDGPU_MES_SCHED_PIPE);
1305 	}
1306 
1307 	amdgpu_mes_fini(adev);
1308 	return 0;
1309 }
1310 
1311 static void mes_v12_0_kiq_dequeue_sched(struct amdgpu_device *adev)
1312 {
1313 	uint32_t data;
1314 	int i;
1315 
1316 	mutex_lock(&adev->srbm_mutex);
1317 	soc21_grbm_select(adev, 3, AMDGPU_MES_SCHED_PIPE, 0, 0);
1318 
1319 	/* disable the queue if it's active */
1320 	if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
1321 		WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
1322 		for (i = 0; i < adev->usec_timeout; i++) {
1323 			if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
1324 				break;
1325 			udelay(1);
1326 		}
1327 	}
1328 	data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
1329 	data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
1330 				DOORBELL_EN, 0);
1331 	data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
1332 				DOORBELL_HIT, 1);
1333 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
1334 
1335 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0);
1336 
1337 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 0);
1338 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 0);
1339 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 0);
1340 
1341 	soc21_grbm_select(adev, 0, 0, 0, 0);
1342 	mutex_unlock(&adev->srbm_mutex);
1343 
1344 	adev->mes.ring.sched.ready = false;
1345 }
1346 
1347 static void mes_v12_0_kiq_setting(struct amdgpu_ring *ring)
1348 {
1349 	uint32_t tmp;
1350 	struct amdgpu_device *adev = ring->adev;
1351 
1352 	/* tell RLC which is KIQ queue */
1353 	tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
1354 	tmp &= 0xffffff00;
1355 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1356 	WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
1357 	tmp |= 0x80;
1358 	WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
1359 }
1360 
1361 static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
1362 {
1363 	int r = 0;
1364 
1365 	mes_v12_0_kiq_setting(&adev->gfx.kiq[0].ring);
1366 
1367 	if (adev->enable_uni_mes)
1368 		return mes_v12_0_hw_init(adev);
1369 
1370 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1371 
1372 		r = mes_v12_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE, false);
1373 		if (r) {
1374 			DRM_ERROR("failed to load MES fw, r=%d\n", r);
1375 			return r;
1376 		}
1377 
1378 		r = mes_v12_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE, true);
1379 		if (r) {
1380 			DRM_ERROR("failed to load MES kiq fw, r=%d\n", r);
1381 			return r;
1382 		}
1383 
1384 		mes_v12_0_set_ucode_start_addr(adev);
1385 
1386 	} else if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1387 		mes_v12_0_set_ucode_start_addr(adev);
1388 
1389 	mes_v12_0_enable(adev, true);
1390 
1391 	r = mes_v12_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
1392 	if (r)
1393 		goto failure;
1394 
1395 	r = mes_v12_0_hw_init(adev);
1396 	if (r)
1397 		goto failure;
1398 
1399 	return r;
1400 
1401 failure:
1402 	mes_v12_0_hw_fini(adev);
1403 	return r;
1404 }
1405 
1406 static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev)
1407 {
1408 	if (adev->mes.ring.sched.ready) {
1409 		mes_v12_0_kiq_dequeue_sched(adev);
1410 		adev->mes.ring.sched.ready = false;
1411 	}
1412 
1413 	mes_v12_0_enable(adev, false);
1414 
1415 	return 0;
1416 }
1417 
1418 static int mes_v12_0_hw_init(void *handle)
1419 {
1420 	int r;
1421 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1422 
1423 	if (adev->mes.ring.sched.ready)
1424 		goto out;
1425 
1426 	if (!adev->enable_mes_kiq || adev->enable_uni_mes) {
1427 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1428 			r = mes_v12_0_load_microcode(adev,
1429 					     AMDGPU_MES_SCHED_PIPE, true);
1430 			if (r) {
1431 				DRM_ERROR("failed to MES fw, r=%d\n", r);
1432 				return r;
1433 			}
1434 
1435 			mes_v12_0_set_ucode_start_addr(adev);
1436 
1437 		} else if (adev->firmware.load_type ==
1438 			   AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1439 
1440 			mes_v12_0_set_ucode_start_addr(adev);
1441 		}
1442 
1443 		mes_v12_0_enable(adev, true);
1444 	}
1445 
1446 	r = mes_v12_0_queue_init(adev, AMDGPU_MES_SCHED_PIPE);
1447 	if (r)
1448 		goto failure;
1449 
1450 	r = mes_v12_0_set_hw_resources(&adev->mes);
1451 	if (r)
1452 		goto failure;
1453 
1454 	if (adev->enable_uni_mes)
1455 		mes_v12_0_set_hw_resources_1(&adev->mes);
1456 
1457 	mes_v12_0_init_aggregated_doorbell(&adev->mes);
1458 
1459 	/* Enable the MES to handle doorbell ring on unmapped queue */
1460 	mes_v12_0_enable_unmapped_doorbell_handling(&adev->mes, true);
1461 
1462 	r = mes_v12_0_query_sched_status(&adev->mes);
1463 	if (r) {
1464 		DRM_ERROR("MES is busy\n");
1465 		goto failure;
1466 	}
1467 
1468 out:
1469 	/*
1470 	 * Disable KIQ ring usage from the driver once MES is enabled.
1471 	 * MES uses KIQ ring exclusively so driver cannot access KIQ ring
1472 	 * with MES enabled.
1473 	 */
1474 	adev->gfx.kiq[0].ring.sched.ready = false;
1475 	adev->mes.ring.sched.ready = true;
1476 
1477 	return 0;
1478 
1479 failure:
1480 	mes_v12_0_hw_fini(adev);
1481 	return r;
1482 }
1483 
1484 static int mes_v12_0_hw_fini(void *handle)
1485 {
1486 	return 0;
1487 }
1488 
1489 static int mes_v12_0_suspend(void *handle)
1490 {
1491 	int r;
1492 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1493 
1494 	r = amdgpu_mes_suspend(adev);
1495 	if (r)
1496 		return r;
1497 
1498 	return mes_v12_0_hw_fini(adev);
1499 }
1500 
1501 static int mes_v12_0_resume(void *handle)
1502 {
1503 	int r;
1504 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1505 
1506 	r = mes_v12_0_hw_init(adev);
1507 	if (r)
1508 		return r;
1509 
1510 	return amdgpu_mes_resume(adev);
1511 }
1512 
1513 static int mes_v12_0_early_init(void *handle)
1514 {
1515 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1516 	int pipe, r;
1517 
1518 	if (adev->enable_uni_mes) {
1519 		r = amdgpu_mes_init_microcode(adev, AMDGPU_MES_SCHED_PIPE);
1520 		if (!r)
1521 			return 0;
1522 
1523 		adev->enable_uni_mes = false;
1524 	}
1525 
1526 	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1527 		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
1528 			continue;
1529 		r = amdgpu_mes_init_microcode(adev, pipe);
1530 		if (r)
1531 			return r;
1532 	}
1533 
1534 	return 0;
1535 }
1536 
1537 static int mes_v12_0_late_init(void *handle)
1538 {
1539 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1540 
1541 	/* it's only intended for use in mes_self_test case, not for s0ix and reset */
1542 	if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend)
1543 		amdgpu_mes_self_test(adev);
1544 
1545 	return 0;
1546 }
1547 
1548 static const struct amd_ip_funcs mes_v12_0_ip_funcs = {
1549 	.name = "mes_v12_0",
1550 	.early_init = mes_v12_0_early_init,
1551 	.late_init = mes_v12_0_late_init,
1552 	.sw_init = mes_v12_0_sw_init,
1553 	.sw_fini = mes_v12_0_sw_fini,
1554 	.hw_init = mes_v12_0_hw_init,
1555 	.hw_fini = mes_v12_0_hw_fini,
1556 	.suspend = mes_v12_0_suspend,
1557 	.resume = mes_v12_0_resume,
1558 };
1559 
1560 const struct amdgpu_ip_block_version mes_v12_0_ip_block = {
1561 	.type = AMD_IP_BLOCK_TYPE_MES,
1562 	.major = 12,
1563 	.minor = 0,
1564 	.rev = 0,
1565 	.funcs = &mes_v12_0_ip_funcs,
1566 };
1567