xref: /linux/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c (revision f81cd793119e7f4b426a825435d49cc10a081c7a)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/module.h>
26 #include "amdgpu.h"
27 #include "soc15_common.h"
28 #include "soc21.h"
29 #include "gfx_v11_0.h"
30 #include "gc/gc_11_0_0_offset.h"
31 #include "gc/gc_11_0_0_sh_mask.h"
32 #include "gc/gc_11_0_0_default.h"
33 #include "v11_structs.h"
34 #include "mes_v11_api_def.h"
35 
36 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes.bin");
37 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes_2.bin");
38 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes1.bin");
39 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes.bin");
40 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes_2.bin");
41 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes1.bin");
42 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin");
43 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes_2.bin");
44 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin");
45 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin");
46 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes_2.bin");
47 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin");
48 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes.bin");
49 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes_2.bin");
50 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes1.bin");
51 MODULE_FIRMWARE("amdgpu/gc_11_5_0_mes_2.bin");
52 MODULE_FIRMWARE("amdgpu/gc_11_5_0_mes1.bin");
53 MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes_2.bin");
54 MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes1.bin");
55 MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes_2.bin");
56 MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes1.bin");
57 MODULE_FIRMWARE("amdgpu/gc_11_5_3_mes_2.bin");
58 MODULE_FIRMWARE("amdgpu/gc_11_5_3_mes1.bin");
59 
60 static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block);
61 static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block);
62 static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
63 static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
64 
65 #define MES_EOP_SIZE   2048
66 #define GFX_MES_DRAM_SIZE	0x80000
67 #define MES11_HW_RESOURCE_1_SIZE (128 * AMDGPU_GPU_PAGE_SIZE)
68 
69 static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)
70 {
71 	struct amdgpu_device *adev = ring->adev;
72 
73 	if (ring->use_doorbell) {
74 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
75 			     ring->wptr);
76 		WDOORBELL64(ring->doorbell_index, ring->wptr);
77 	} else {
78 		BUG();
79 	}
80 }
81 
82 static u64 mes_v11_0_ring_get_rptr(struct amdgpu_ring *ring)
83 {
84 	return *ring->rptr_cpu_addr;
85 }
86 
87 static u64 mes_v11_0_ring_get_wptr(struct amdgpu_ring *ring)
88 {
89 	u64 wptr;
90 
91 	if (ring->use_doorbell)
92 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
93 	else
94 		BUG();
95 	return wptr;
96 }
97 
98 static const struct amdgpu_ring_funcs mes_v11_0_ring_funcs = {
99 	.type = AMDGPU_RING_TYPE_MES,
100 	.align_mask = 1,
101 	.nop = 0,
102 	.support_64bit_ptrs = true,
103 	.get_rptr = mes_v11_0_ring_get_rptr,
104 	.get_wptr = mes_v11_0_ring_get_wptr,
105 	.set_wptr = mes_v11_0_ring_set_wptr,
106 	.insert_nop = amdgpu_ring_insert_nop,
107 };
108 
109 static const char *mes_v11_0_opcodes[] = {
110 	"SET_HW_RSRC",
111 	"SET_SCHEDULING_CONFIG",
112 	"ADD_QUEUE",
113 	"REMOVE_QUEUE",
114 	"PERFORM_YIELD",
115 	"SET_GANG_PRIORITY_LEVEL",
116 	"SUSPEND",
117 	"RESUME",
118 	"RESET",
119 	"SET_LOG_BUFFER",
120 	"CHANGE_GANG_PRORITY",
121 	"QUERY_SCHEDULER_STATUS",
122 	"PROGRAM_GDS",
123 	"SET_DEBUG_VMID",
124 	"MISC",
125 	"UPDATE_ROOT_PAGE_TABLE",
126 	"AMD_LOG",
127 	"unused",
128 	"unused",
129 	"SET_HW_RSRC_1",
130 };
131 
132 static const char *mes_v11_0_misc_opcodes[] = {
133 	"WRITE_REG",
134 	"INV_GART",
135 	"QUERY_STATUS",
136 	"READ_REG",
137 	"WAIT_REG_MEM",
138 	"SET_SHADER_DEBUGGER",
139 };
140 
141 static const char *mes_v11_0_get_op_string(union MESAPI__MISC *x_pkt)
142 {
143 	const char *op_str = NULL;
144 
145 	if (x_pkt->header.opcode < ARRAY_SIZE(mes_v11_0_opcodes))
146 		op_str = mes_v11_0_opcodes[x_pkt->header.opcode];
147 
148 	return op_str;
149 }
150 
151 static const char *mes_v11_0_get_misc_op_string(union MESAPI__MISC *x_pkt)
152 {
153 	const char *op_str = NULL;
154 
155 	if ((x_pkt->header.opcode == MES_SCH_API_MISC) &&
156 	    (x_pkt->opcode < ARRAY_SIZE(mes_v11_0_misc_opcodes)))
157 		op_str = mes_v11_0_misc_opcodes[x_pkt->opcode];
158 
159 	return op_str;
160 }
161 
162 static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
163 						    void *pkt, int size,
164 						    int api_status_off)
165 {
166 	union MESAPI__QUERY_MES_STATUS mes_status_pkt;
167 	signed long timeout = 2100000; /* 2100 ms */
168 	struct amdgpu_device *adev = mes->adev;
169 	struct amdgpu_ring *ring = &mes->ring[0];
170 	struct MES_API_STATUS *api_status;
171 	union MESAPI__MISC *x_pkt = pkt;
172 	const char *op_str, *misc_op_str;
173 	unsigned long flags;
174 	u64 status_gpu_addr;
175 	u32 seq, status_offset;
176 	u64 *status_ptr;
177 	signed long r;
178 	int ret;
179 
180 	if (x_pkt->header.opcode >= MES_SCH_API_MAX)
181 		return -EINVAL;
182 
183 	if (amdgpu_emu_mode) {
184 		timeout *= 100;
185 	} else if (amdgpu_sriov_vf(adev)) {
186 		/* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */
187 		timeout = 15 * 600 * 1000;
188 	}
189 
190 	ret = amdgpu_device_wb_get(adev, &status_offset);
191 	if (ret)
192 		return ret;
193 
194 	status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4);
195 	status_ptr = (u64 *)&adev->wb.wb[status_offset];
196 	*status_ptr = 0;
197 
198 	spin_lock_irqsave(&mes->ring_lock[0], flags);
199 	r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
200 	if (r)
201 		goto error_unlock_free;
202 
203 	seq = ++ring->fence_drv.sync_seq;
204 	r = amdgpu_fence_wait_polling(ring,
205 				      seq - ring->fence_drv.num_fences_mask,
206 				      timeout);
207 	if (r < 1)
208 		goto error_undo;
209 
210 	api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
211 	api_status->api_completion_fence_addr = status_gpu_addr;
212 	api_status->api_completion_fence_value = 1;
213 
214 	amdgpu_ring_write_multiple(ring, pkt, size / 4);
215 
216 	memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
217 	mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
218 	mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
219 	mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
220 	mes_status_pkt.api_status.api_completion_fence_addr =
221 		ring->fence_drv.gpu_addr;
222 	mes_status_pkt.api_status.api_completion_fence_value = seq;
223 
224 	amdgpu_ring_write_multiple(ring, &mes_status_pkt,
225 				   sizeof(mes_status_pkt) / 4);
226 
227 	amdgpu_ring_commit(ring);
228 	spin_unlock_irqrestore(&mes->ring_lock[0], flags);
229 
230 	op_str = mes_v11_0_get_op_string(x_pkt);
231 	misc_op_str = mes_v11_0_get_misc_op_string(x_pkt);
232 
233 	if (misc_op_str)
234 		dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str,
235 			misc_op_str);
236 	else if (op_str)
237 		dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
238 	else
239 		dev_dbg(adev->dev, "MES msg=%d was emitted\n",
240 			x_pkt->header.opcode);
241 
242 	r = amdgpu_fence_wait_polling(ring, seq, timeout);
243 	if (r < 1 || !*status_ptr) {
244 
245 		if (misc_op_str)
246 			dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
247 				op_str, misc_op_str);
248 		else if (op_str)
249 			dev_err(adev->dev, "MES failed to respond to msg=%s\n",
250 				op_str);
251 		else
252 			dev_err(adev->dev, "MES failed to respond to msg=%d\n",
253 				x_pkt->header.opcode);
254 
255 		while (halt_if_hws_hang)
256 			schedule();
257 
258 		r = -ETIMEDOUT;
259 		goto error_wb_free;
260 	}
261 
262 	amdgpu_device_wb_free(adev, status_offset);
263 	return 0;
264 
265 error_undo:
266 	dev_err(adev->dev, "MES ring buffer is full.\n");
267 	amdgpu_ring_undo(ring);
268 
269 error_unlock_free:
270 	spin_unlock_irqrestore(&mes->ring_lock[0], flags);
271 
272 error_wb_free:
273 	amdgpu_device_wb_free(adev, status_offset);
274 	return r;
275 }
276 
277 static int convert_to_mes_queue_type(int queue_type)
278 {
279 	if (queue_type == AMDGPU_RING_TYPE_GFX)
280 		return MES_QUEUE_TYPE_GFX;
281 	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
282 		return MES_QUEUE_TYPE_COMPUTE;
283 	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
284 		return MES_QUEUE_TYPE_SDMA;
285 	else
286 		BUG();
287 	return -1;
288 }
289 
290 static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
291 				  struct mes_add_queue_input *input)
292 {
293 	struct amdgpu_device *adev = mes->adev;
294 	union MESAPI__ADD_QUEUE mes_add_queue_pkt;
295 	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
296 	uint32_t vm_cntx_cntl = hub->vm_cntx_cntl;
297 
298 	memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
299 
300 	mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
301 	mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE;
302 	mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
303 
304 	mes_add_queue_pkt.process_id = input->process_id;
305 	mes_add_queue_pkt.page_table_base_addr = input->page_table_base_addr;
306 	mes_add_queue_pkt.process_va_start = input->process_va_start;
307 	mes_add_queue_pkt.process_va_end = input->process_va_end;
308 	mes_add_queue_pkt.process_quantum = input->process_quantum;
309 	mes_add_queue_pkt.process_context_addr = input->process_context_addr;
310 	mes_add_queue_pkt.gang_quantum = input->gang_quantum;
311 	mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
312 	mes_add_queue_pkt.inprocess_gang_priority =
313 		input->inprocess_gang_priority;
314 	mes_add_queue_pkt.gang_global_priority_level =
315 		input->gang_global_priority_level;
316 	mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
317 	mes_add_queue_pkt.mqd_addr = input->mqd_addr;
318 
319 	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
320 			AMDGPU_MES_API_VERSION_SHIFT) >= 2)
321 		mes_add_queue_pkt.wptr_addr = input->wptr_mc_addr;
322 	else
323 		mes_add_queue_pkt.wptr_addr = input->wptr_addr;
324 
325 	mes_add_queue_pkt.queue_type =
326 		convert_to_mes_queue_type(input->queue_type);
327 	mes_add_queue_pkt.paging = input->paging;
328 	mes_add_queue_pkt.vm_context_cntl = vm_cntx_cntl;
329 	mes_add_queue_pkt.gws_base = input->gws_base;
330 	mes_add_queue_pkt.gws_size = input->gws_size;
331 	mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
332 	mes_add_queue_pkt.tma_addr = input->tma_addr;
333 	mes_add_queue_pkt.trap_en = input->trap_en;
334 	mes_add_queue_pkt.skip_process_ctx_clear = input->skip_process_ctx_clear;
335 	mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
336 
337 	/* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
338 	mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
339 	mes_add_queue_pkt.gds_size = input->queue_size;
340 
341 	mes_add_queue_pkt.exclusively_scheduled = input->exclusively_scheduled;
342 
343 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
344 			&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
345 			offsetof(union MESAPI__ADD_QUEUE, api_status));
346 }
347 
348 static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
349 				     struct mes_remove_queue_input *input)
350 {
351 	union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
352 
353 	memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
354 
355 	mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
356 	mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
357 	mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
358 
359 	mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
360 	mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
361 
362 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
363 			&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
364 			offsetof(union MESAPI__REMOVE_QUEUE, api_status));
365 }
366 
367 static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_type,
368 				      uint32_t me_id, uint32_t pipe_id,
369 				      uint32_t queue_id, uint32_t vmid)
370 {
371 	struct amdgpu_device *adev = mes->adev;
372 	uint32_t value, reg;
373 	int i, r = 0;
374 
375 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
376 
377 	if (queue_type == AMDGPU_RING_TYPE_GFX) {
378 		dev_info(adev->dev, "reset gfx queue (%d:%d:%d: vmid:%d)\n",
379 			 me_id, pipe_id, queue_id, vmid);
380 
381 		mutex_lock(&adev->gfx.reset_sem_mutex);
382 		gfx_v11_0_request_gfx_index_mutex(adev, true);
383 		/* all se allow writes */
384 		WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX,
385 			     (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
386 		value = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
387 		if (pipe_id == 0)
388 			value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE0_QUEUES, 1 << queue_id);
389 		else
390 			value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE1_QUEUES, 1 << queue_id);
391 		WREG32_SOC15(GC, 0, regCP_VMID_RESET, value);
392 		gfx_v11_0_request_gfx_index_mutex(adev, false);
393 		mutex_unlock(&adev->gfx.reset_sem_mutex);
394 
395 		mutex_lock(&adev->srbm_mutex);
396 		soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
397 		/* wait till dequeue take effects */
398 		for (i = 0; i < adev->usec_timeout; i++) {
399 			if (!(RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE) & 1))
400 				break;
401 			udelay(1);
402 		}
403 		if (i >= adev->usec_timeout) {
404 			dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n");
405 			r = -ETIMEDOUT;
406 		}
407 
408 		soc21_grbm_select(adev, 0, 0, 0, 0);
409 		mutex_unlock(&adev->srbm_mutex);
410 	} else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
411 		dev_info(adev->dev, "reset compute queue (%d:%d:%d)\n",
412 			 me_id, pipe_id, queue_id);
413 		mutex_lock(&adev->srbm_mutex);
414 		soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
415 		WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
416 		WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
417 
418 		/* wait till dequeue take effects */
419 		for (i = 0; i < adev->usec_timeout; i++) {
420 			if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
421 				break;
422 			udelay(1);
423 		}
424 		if (i >= adev->usec_timeout) {
425 			dev_err(adev->dev, "failed to wait on hqd deactivate\n");
426 			r = -ETIMEDOUT;
427 		}
428 		soc21_grbm_select(adev, 0, 0, 0, 0);
429 		mutex_unlock(&adev->srbm_mutex);
430 	} else if (queue_type == AMDGPU_RING_TYPE_SDMA) {
431 		dev_info(adev->dev, "reset sdma queue (%d:%d:%d)\n",
432 			 me_id, pipe_id, queue_id);
433 		switch (me_id) {
434 		case 1:
435 			reg = SOC15_REG_OFFSET(GC, 0, regSDMA1_QUEUE_RESET_REQ);
436 			break;
437 		case 0:
438 		default:
439 			reg = SOC15_REG_OFFSET(GC, 0, regSDMA0_QUEUE_RESET_REQ);
440 			break;
441 		}
442 
443 		value = 1 << queue_id;
444 		WREG32(reg, value);
445 		/* wait for queue reset done */
446 		for (i = 0; i < adev->usec_timeout; i++) {
447 			if (!(RREG32(reg) & value))
448 				break;
449 			udelay(1);
450 		}
451 		if (i >= adev->usec_timeout) {
452 			dev_err(adev->dev, "failed to wait on sdma queue reset done\n");
453 			r = -ETIMEDOUT;
454 		}
455 	}
456 
457 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
458 	return r;
459 }
460 
461 static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
462 				    struct mes_reset_queue_input *input)
463 {
464 	if (input->use_mmio)
465 		return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
466 						  input->me_id, input->pipe_id,
467 						  input->queue_id, input->vmid);
468 
469 	union MESAPI__RESET mes_reset_queue_pkt;
470 
471 	memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
472 
473 	mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
474 	mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
475 	mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
476 
477 	mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
478 	mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
479 	/*mes_reset_queue_pkt.reset_queue_only = 1;*/
480 
481 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
482 			&mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
483 			offsetof(union MESAPI__REMOVE_QUEUE, api_status));
484 }
485 
486 static int mes_v11_0_map_legacy_queue(struct amdgpu_mes *mes,
487 				      struct mes_map_legacy_queue_input *input)
488 {
489 	union MESAPI__ADD_QUEUE mes_add_queue_pkt;
490 
491 	memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
492 
493 	mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
494 	mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE;
495 	mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
496 
497 	mes_add_queue_pkt.pipe_id = input->pipe_id;
498 	mes_add_queue_pkt.queue_id = input->queue_id;
499 	mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
500 	mes_add_queue_pkt.mqd_addr = input->mqd_addr;
501 	mes_add_queue_pkt.wptr_addr = input->wptr_addr;
502 	mes_add_queue_pkt.queue_type =
503 		convert_to_mes_queue_type(input->queue_type);
504 	mes_add_queue_pkt.map_legacy_kq = 1;
505 
506 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
507 			&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
508 			offsetof(union MESAPI__ADD_QUEUE, api_status));
509 }
510 
511 static int mes_v11_0_unmap_legacy_queue(struct amdgpu_mes *mes,
512 			struct mes_unmap_legacy_queue_input *input)
513 {
514 	union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
515 
516 	memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
517 
518 	mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
519 	mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
520 	mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
521 
522 	mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
523 	mes_remove_queue_pkt.gang_context_addr = 0;
524 
525 	mes_remove_queue_pkt.pipe_id = input->pipe_id;
526 	mes_remove_queue_pkt.queue_id = input->queue_id;
527 
528 	if (input->action == PREEMPT_QUEUES_NO_UNMAP) {
529 		mes_remove_queue_pkt.preempt_legacy_gfx_queue = 1;
530 		mes_remove_queue_pkt.tf_addr = input->trail_fence_addr;
531 		mes_remove_queue_pkt.tf_data =
532 			lower_32_bits(input->trail_fence_data);
533 	} else {
534 		mes_remove_queue_pkt.unmap_legacy_queue = 1;
535 		mes_remove_queue_pkt.queue_type =
536 			convert_to_mes_queue_type(input->queue_type);
537 	}
538 
539 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
540 			&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
541 			offsetof(union MESAPI__REMOVE_QUEUE, api_status));
542 }
543 
544 static int mes_v11_0_suspend_gang(struct amdgpu_mes *mes,
545 				  struct mes_suspend_gang_input *input)
546 {
547 	union MESAPI__SUSPEND mes_suspend_gang_pkt;
548 
549 	memset(&mes_suspend_gang_pkt, 0, sizeof(mes_suspend_gang_pkt));
550 
551 	mes_suspend_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
552 	mes_suspend_gang_pkt.header.opcode = MES_SCH_API_SUSPEND;
553 	mes_suspend_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
554 
555 	mes_suspend_gang_pkt.suspend_all_gangs = input->suspend_all_gangs;
556 	mes_suspend_gang_pkt.gang_context_addr = input->gang_context_addr;
557 	mes_suspend_gang_pkt.suspend_fence_addr = input->suspend_fence_addr;
558 	mes_suspend_gang_pkt.suspend_fence_value = input->suspend_fence_value;
559 
560 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
561 			&mes_suspend_gang_pkt, sizeof(mes_suspend_gang_pkt),
562 			offsetof(union MESAPI__SUSPEND, api_status));
563 }
564 
565 static int mes_v11_0_resume_gang(struct amdgpu_mes *mes,
566 				 struct mes_resume_gang_input *input)
567 {
568 	union MESAPI__RESUME mes_resume_gang_pkt;
569 
570 	memset(&mes_resume_gang_pkt, 0, sizeof(mes_resume_gang_pkt));
571 
572 	mes_resume_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
573 	mes_resume_gang_pkt.header.opcode = MES_SCH_API_RESUME;
574 	mes_resume_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
575 
576 	mes_resume_gang_pkt.resume_all_gangs = input->resume_all_gangs;
577 	mes_resume_gang_pkt.gang_context_addr = input->gang_context_addr;
578 
579 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
580 			&mes_resume_gang_pkt, sizeof(mes_resume_gang_pkt),
581 			offsetof(union MESAPI__RESUME, api_status));
582 }
583 
584 static int mes_v11_0_query_sched_status(struct amdgpu_mes *mes)
585 {
586 	union MESAPI__QUERY_MES_STATUS mes_status_pkt;
587 
588 	memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
589 
590 	mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
591 	mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
592 	mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
593 
594 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
595 			&mes_status_pkt, sizeof(mes_status_pkt),
596 			offsetof(union MESAPI__QUERY_MES_STATUS, api_status));
597 }
598 
599 static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
600 			     struct mes_misc_op_input *input)
601 {
602 	union MESAPI__MISC misc_pkt;
603 
604 	memset(&misc_pkt, 0, sizeof(misc_pkt));
605 
606 	misc_pkt.header.type = MES_API_TYPE_SCHEDULER;
607 	misc_pkt.header.opcode = MES_SCH_API_MISC;
608 	misc_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
609 
610 	switch (input->op) {
611 	case MES_MISC_OP_READ_REG:
612 		misc_pkt.opcode = MESAPI_MISC__READ_REG;
613 		misc_pkt.read_reg.reg_offset = input->read_reg.reg_offset;
614 		misc_pkt.read_reg.buffer_addr = input->read_reg.buffer_addr;
615 		break;
616 	case MES_MISC_OP_WRITE_REG:
617 		misc_pkt.opcode = MESAPI_MISC__WRITE_REG;
618 		misc_pkt.write_reg.reg_offset = input->write_reg.reg_offset;
619 		misc_pkt.write_reg.reg_value = input->write_reg.reg_value;
620 		break;
621 	case MES_MISC_OP_WRM_REG_WAIT:
622 		misc_pkt.opcode = MESAPI_MISC__WAIT_REG_MEM;
623 		misc_pkt.wait_reg_mem.op = WRM_OPERATION__WAIT_REG_MEM;
624 		misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref;
625 		misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask;
626 		misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0;
627 		misc_pkt.wait_reg_mem.reg_offset2 = 0;
628 		break;
629 	case MES_MISC_OP_WRM_REG_WR_WAIT:
630 		misc_pkt.opcode = MESAPI_MISC__WAIT_REG_MEM;
631 		misc_pkt.wait_reg_mem.op = WRM_OPERATION__WR_WAIT_WR_REG;
632 		misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref;
633 		misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask;
634 		misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0;
635 		misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1;
636 		break;
637 	case MES_MISC_OP_SET_SHADER_DEBUGGER:
638 		misc_pkt.opcode = MESAPI_MISC__SET_SHADER_DEBUGGER;
639 		misc_pkt.set_shader_debugger.process_context_addr =
640 				input->set_shader_debugger.process_context_addr;
641 		misc_pkt.set_shader_debugger.flags.u32all =
642 				input->set_shader_debugger.flags.u32all;
643 		misc_pkt.set_shader_debugger.spi_gdbg_per_vmid_cntl =
644 				input->set_shader_debugger.spi_gdbg_per_vmid_cntl;
645 		memcpy(misc_pkt.set_shader_debugger.tcp_watch_cntl,
646 				input->set_shader_debugger.tcp_watch_cntl,
647 				sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl));
648 		misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en;
649 		break;
650 	case MES_MISC_OP_CHANGE_CONFIG:
651 		if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) {
652 			dev_err(mes->adev->dev, "MES FW versoin must be larger than 0x63 to support limit single process feature.\n");
653 			return -EINVAL;
654 		}
655 		misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
656 		misc_pkt.change_config.opcode =
657 				MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS;
658 		misc_pkt.change_config.option.bits.limit_single_process =
659 				input->change_config.option.limit_single_process;
660 		break;
661 
662 	default:
663 		DRM_ERROR("unsupported misc op (%d) \n", input->op);
664 		return -EINVAL;
665 	}
666 
667 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
668 			&misc_pkt, sizeof(misc_pkt),
669 			offsetof(union MESAPI__MISC, api_status));
670 }
671 
672 static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
673 {
674 	int i;
675 	struct amdgpu_device *adev = mes->adev;
676 	union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt;
677 
678 	memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
679 
680 	mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER;
681 	mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC;
682 	mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
683 
684 	mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
685 	mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
686 	mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
687 	mes_set_hw_res_pkt.paging_vmid = 0;
688 	mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr[0];
689 	mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
690 		mes->query_status_fence_gpu_addr[0];
691 
692 	for (i = 0; i < MAX_COMPUTE_PIPES; i++)
693 		mes_set_hw_res_pkt.compute_hqd_mask[i] =
694 			mes->compute_hqd_mask[i];
695 
696 	for (i = 0; i < MAX_GFX_PIPES; i++)
697 		mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
698 
699 	for (i = 0; i < MAX_SDMA_PIPES; i++)
700 		mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
701 
702 	for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
703 		mes_set_hw_res_pkt.aggregated_doorbells[i] =
704 			mes->aggregated_doorbells[i];
705 
706 	for (i = 0; i < 5; i++) {
707 		mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i];
708 		mes_set_hw_res_pkt.mmhub_base[i] =
709 				adev->reg_offset[MMHUB_HWIP][0][i];
710 		mes_set_hw_res_pkt.osssys_base[i] =
711 		adev->reg_offset[OSSSYS_HWIP][0][i];
712 	}
713 
714 	mes_set_hw_res_pkt.disable_reset = 1;
715 	mes_set_hw_res_pkt.disable_mes_log = 1;
716 	mes_set_hw_res_pkt.use_different_vmid_compute = 1;
717 	mes_set_hw_res_pkt.enable_reg_active_poll = 1;
718 	mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
719 	mes_set_hw_res_pkt.oversubscription_timer = 50;
720 	if (amdgpu_mes_log_enable) {
721 		mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
722 		mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
723 					mes->event_log_gpu_addr;
724 	}
725 
726 	if (enforce_isolation)
727 		mes_set_hw_res_pkt.limit_single_process = 1;
728 
729 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
730 			&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
731 			offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
732 }
733 
734 static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
735 {
736 	union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_pkt;
737 	memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
738 
739 	mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER;
740 	mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1;
741 	mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
742 	mes_set_hw_res_pkt.enable_mes_info_ctx = 1;
743 
744 	mes_set_hw_res_pkt.cleaner_shader_fence_mc_addr = mes->resource_1_gpu_addr[0];
745 	if (amdgpu_sriov_is_mes_info_enable(mes->adev)) {
746 		mes_set_hw_res_pkt.mes_info_ctx_mc_addr =
747 			mes->resource_1_gpu_addr[0] + AMDGPU_GPU_PAGE_SIZE;
748 		mes_set_hw_res_pkt.mes_info_ctx_size = MES11_HW_RESOURCE_1_SIZE;
749 	}
750 
751 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
752 			&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
753 			offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
754 }
755 
756 static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
757 					struct mes_reset_legacy_queue_input *input)
758 {
759 	union MESAPI__RESET mes_reset_queue_pkt;
760 
761 	if (input->use_mmio)
762 		return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
763 						  input->me_id, input->pipe_id,
764 						  input->queue_id, input->vmid);
765 
766 	memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
767 
768 	mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
769 	mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
770 	mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
771 
772 	mes_reset_queue_pkt.queue_type =
773 		convert_to_mes_queue_type(input->queue_type);
774 
775 	if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
776 		mes_reset_queue_pkt.reset_legacy_gfx = 1;
777 		mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
778 		mes_reset_queue_pkt.queue_id_lp = input->queue_id;
779 		mes_reset_queue_pkt.mqd_mc_addr_lp = input->mqd_addr;
780 		mes_reset_queue_pkt.doorbell_offset_lp = input->doorbell_offset;
781 		mes_reset_queue_pkt.wptr_addr_lp = input->wptr_addr;
782 		mes_reset_queue_pkt.vmid_id_lp = input->vmid;
783 	} else {
784 		mes_reset_queue_pkt.reset_queue_only = 1;
785 		mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
786 	}
787 
788 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
789 			&mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
790 			offsetof(union MESAPI__RESET, api_status));
791 }
792 
793 static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
794 	.add_hw_queue = mes_v11_0_add_hw_queue,
795 	.remove_hw_queue = mes_v11_0_remove_hw_queue,
796 	.map_legacy_queue = mes_v11_0_map_legacy_queue,
797 	.unmap_legacy_queue = mes_v11_0_unmap_legacy_queue,
798 	.suspend_gang = mes_v11_0_suspend_gang,
799 	.resume_gang = mes_v11_0_resume_gang,
800 	.misc_op = mes_v11_0_misc_op,
801 	.reset_legacy_queue = mes_v11_0_reset_legacy_queue,
802 	.reset_hw_queue = mes_v11_0_reset_hw_queue,
803 };
804 
805 static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
806 					   enum amdgpu_mes_pipe pipe)
807 {
808 	int r;
809 	const struct mes_firmware_header_v1_0 *mes_hdr;
810 	const __le32 *fw_data;
811 	unsigned fw_size;
812 
813 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
814 		adev->mes.fw[pipe]->data;
815 
816 	fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
817 		   le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
818 	fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
819 
820 	r = amdgpu_bo_create_reserved(adev, fw_size,
821 				      PAGE_SIZE,
822 				      AMDGPU_GEM_DOMAIN_VRAM |
823 				      AMDGPU_GEM_DOMAIN_GTT,
824 				      &adev->mes.ucode_fw_obj[pipe],
825 				      &adev->mes.ucode_fw_gpu_addr[pipe],
826 				      (void **)&adev->mes.ucode_fw_ptr[pipe]);
827 	if (r) {
828 		dev_err(adev->dev, "(%d) failed to create mes fw bo\n", r);
829 		return r;
830 	}
831 
832 	memcpy(adev->mes.ucode_fw_ptr[pipe], fw_data, fw_size);
833 
834 	amdgpu_bo_kunmap(adev->mes.ucode_fw_obj[pipe]);
835 	amdgpu_bo_unreserve(adev->mes.ucode_fw_obj[pipe]);
836 
837 	return 0;
838 }
839 
840 static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
841 						enum amdgpu_mes_pipe pipe)
842 {
843 	int r;
844 	const struct mes_firmware_header_v1_0 *mes_hdr;
845 	const __le32 *fw_data;
846 	unsigned fw_size;
847 
848 	mes_hdr = (const struct mes_firmware_header_v1_0 *)
849 		adev->mes.fw[pipe]->data;
850 
851 	fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
852 		   le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
853 	fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
854 
855 	if (fw_size > GFX_MES_DRAM_SIZE) {
856 		dev_err(adev->dev, "PIPE%d ucode data fw size (%d) is greater than dram size (%d)\n",
857 			pipe, fw_size, GFX_MES_DRAM_SIZE);
858 		return -EINVAL;
859 	}
860 
861 	r = amdgpu_bo_create_reserved(adev, GFX_MES_DRAM_SIZE,
862 				      64 * 1024,
863 				      AMDGPU_GEM_DOMAIN_VRAM |
864 				      AMDGPU_GEM_DOMAIN_GTT,
865 				      &adev->mes.data_fw_obj[pipe],
866 				      &adev->mes.data_fw_gpu_addr[pipe],
867 				      (void **)&adev->mes.data_fw_ptr[pipe]);
868 	if (r) {
869 		dev_err(adev->dev, "(%d) failed to create mes data fw bo\n", r);
870 		return r;
871 	}
872 
873 	memcpy(adev->mes.data_fw_ptr[pipe], fw_data, fw_size);
874 
875 	amdgpu_bo_kunmap(adev->mes.data_fw_obj[pipe]);
876 	amdgpu_bo_unreserve(adev->mes.data_fw_obj[pipe]);
877 
878 	return 0;
879 }
880 
881 static void mes_v11_0_free_ucode_buffers(struct amdgpu_device *adev,
882 					 enum amdgpu_mes_pipe pipe)
883 {
884 	amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe],
885 			      &adev->mes.data_fw_gpu_addr[pipe],
886 			      (void **)&adev->mes.data_fw_ptr[pipe]);
887 
888 	amdgpu_bo_free_kernel(&adev->mes.ucode_fw_obj[pipe],
889 			      &adev->mes.ucode_fw_gpu_addr[pipe],
890 			      (void **)&adev->mes.ucode_fw_ptr[pipe]);
891 }
892 
893 static void mes_v11_0_get_fw_version(struct amdgpu_device *adev)
894 {
895 	int pipe;
896 
897 	/* get MES scheduler/KIQ versions */
898 	mutex_lock(&adev->srbm_mutex);
899 
900 	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
901 		soc21_grbm_select(adev, 3, pipe, 0, 0);
902 
903 		if (pipe == AMDGPU_MES_SCHED_PIPE)
904 			adev->mes.sched_version =
905 				RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
906 		else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
907 			adev->mes.kiq_version =
908 				RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
909 	}
910 
911 	soc21_grbm_select(adev, 0, 0, 0, 0);
912 	mutex_unlock(&adev->srbm_mutex);
913 }
914 
915 static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
916 {
917 	uint64_t ucode_addr;
918 	uint32_t pipe, data = 0;
919 
920 	if (enable) {
921 		if (amdgpu_mes_log_enable) {
922 			WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO,
923 				lower_32_bits(adev->mes.event_log_gpu_addr + AMDGPU_MES_LOG_BUFFER_SIZE));
924 			WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI,
925 				upper_32_bits(adev->mes.event_log_gpu_addr + AMDGPU_MES_LOG_BUFFER_SIZE));
926 			dev_info(adev->dev, "Setup CP MES MSCRATCH address : 0x%x. 0x%x\n",
927 				RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI),
928 				RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO));
929 		}
930 
931 		data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
932 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
933 		data = REG_SET_FIELD(data, CP_MES_CNTL,
934 			     MES_PIPE1_RESET, adev->enable_mes_kiq ? 1 : 0);
935 		WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
936 
937 		mutex_lock(&adev->srbm_mutex);
938 		for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
939 			if (!adev->enable_mes_kiq &&
940 			    pipe == AMDGPU_MES_KIQ_PIPE)
941 				continue;
942 
943 			soc21_grbm_select(adev, 3, pipe, 0, 0);
944 
945 			ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
946 			WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
947 				     lower_32_bits(ucode_addr));
948 			WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
949 				     upper_32_bits(ucode_addr));
950 		}
951 		soc21_grbm_select(adev, 0, 0, 0, 0);
952 		mutex_unlock(&adev->srbm_mutex);
953 
954 		/* unhalt MES and activate pipe0 */
955 		data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
956 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE,
957 				     adev->enable_mes_kiq ? 1 : 0);
958 		WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
959 
960 		if (amdgpu_emu_mode)
961 			msleep(100);
962 		else
963 			udelay(500);
964 	} else {
965 		data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
966 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_ACTIVE, 0);
967 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 0);
968 		data = REG_SET_FIELD(data, CP_MES_CNTL,
969 				     MES_INVALIDATE_ICACHE, 1);
970 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
971 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET,
972 				     adev->enable_mes_kiq ? 1 : 0);
973 		data = REG_SET_FIELD(data, CP_MES_CNTL, MES_HALT, 1);
974 		WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
975 	}
976 }
977 
978 /* This function is for backdoor MES firmware */
979 static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
980 				    enum amdgpu_mes_pipe pipe, bool prime_icache)
981 {
982 	int r;
983 	uint32_t data;
984 	uint64_t ucode_addr;
985 
986 	mes_v11_0_enable(adev, false);
987 
988 	if (!adev->mes.fw[pipe])
989 		return -EINVAL;
990 
991 	r = mes_v11_0_allocate_ucode_buffer(adev, pipe);
992 	if (r)
993 		return r;
994 
995 	r = mes_v11_0_allocate_ucode_data_buffer(adev, pipe);
996 	if (r) {
997 		mes_v11_0_free_ucode_buffers(adev, pipe);
998 		return r;
999 	}
1000 
1001 	mutex_lock(&adev->srbm_mutex);
1002 	/* me=3, pipe=0, queue=0 */
1003 	soc21_grbm_select(adev, 3, pipe, 0, 0);
1004 
1005 	WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_CNTL, 0);
1006 
1007 	/* set ucode start address */
1008 	ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
1009 	WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
1010 		     lower_32_bits(ucode_addr));
1011 	WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
1012 		     upper_32_bits(ucode_addr));
1013 
1014 	/* set ucode fimrware address */
1015 	WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_LO,
1016 		     lower_32_bits(adev->mes.ucode_fw_gpu_addr[pipe]));
1017 	WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_HI,
1018 		     upper_32_bits(adev->mes.ucode_fw_gpu_addr[pipe]));
1019 
1020 	/* set ucode instruction cache boundary to 2M-1 */
1021 	WREG32_SOC15(GC, 0, regCP_MES_MIBOUND_LO, 0x1FFFFF);
1022 
1023 	/* set ucode data firmware address */
1024 	WREG32_SOC15(GC, 0, regCP_MES_MDBASE_LO,
1025 		     lower_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
1026 	WREG32_SOC15(GC, 0, regCP_MES_MDBASE_HI,
1027 		     upper_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
1028 
1029 	/* Set 0x7FFFF (512K-1) to CP_MES_MDBOUND_LO */
1030 	WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x7FFFF);
1031 
1032 	if (prime_icache) {
1033 		/* invalidate ICACHE */
1034 		data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
1035 		data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
1036 		data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
1037 		WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
1038 
1039 		/* prime the ICACHE. */
1040 		data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
1041 		data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
1042 		WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
1043 	}
1044 
1045 	soc21_grbm_select(adev, 0, 0, 0, 0);
1046 	mutex_unlock(&adev->srbm_mutex);
1047 
1048 	return 0;
1049 }
1050 
1051 static int mes_v11_0_allocate_eop_buf(struct amdgpu_device *adev,
1052 				      enum amdgpu_mes_pipe pipe)
1053 {
1054 	int r;
1055 	u32 *eop;
1056 
1057 	r = amdgpu_bo_create_reserved(adev, MES_EOP_SIZE, PAGE_SIZE,
1058 			      AMDGPU_GEM_DOMAIN_GTT,
1059 			      &adev->mes.eop_gpu_obj[pipe],
1060 			      &adev->mes.eop_gpu_addr[pipe],
1061 			      (void **)&eop);
1062 	if (r) {
1063 		dev_warn(adev->dev, "(%d) create EOP bo failed\n", r);
1064 		return r;
1065 	}
1066 
1067 	memset(eop, 0,
1068 	       adev->mes.eop_gpu_obj[pipe]->tbo.base.size);
1069 
1070 	amdgpu_bo_kunmap(adev->mes.eop_gpu_obj[pipe]);
1071 	amdgpu_bo_unreserve(adev->mes.eop_gpu_obj[pipe]);
1072 
1073 	return 0;
1074 }
1075 
1076 static int mes_v11_0_mqd_init(struct amdgpu_ring *ring)
1077 {
1078 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
1079 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
1080 	uint32_t tmp;
1081 
1082 	memset(mqd, 0, sizeof(*mqd));
1083 
1084 	mqd->header = 0xC0310800;
1085 	mqd->compute_pipelinestat_enable = 0x00000001;
1086 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
1087 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
1088 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
1089 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
1090 	mqd->compute_misc_reserved = 0x00000007;
1091 
1092 	eop_base_addr = ring->eop_gpu_addr >> 8;
1093 
1094 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1095 	tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
1096 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
1097 			(order_base_2(MES_EOP_SIZE / 4) - 1));
1098 
1099 	mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_base_addr);
1100 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
1101 	mqd->cp_hqd_eop_control = tmp;
1102 
1103 	/* disable the queue if it's active */
1104 	ring->wptr = 0;
1105 	mqd->cp_hqd_pq_rptr = 0;
1106 	mqd->cp_hqd_pq_wptr_lo = 0;
1107 	mqd->cp_hqd_pq_wptr_hi = 0;
1108 
1109 	/* set the pointer to the MQD */
1110 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
1111 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
1112 
1113 	/* set MQD vmid to 0 */
1114 	tmp = regCP_MQD_CONTROL_DEFAULT;
1115 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
1116 	mqd->cp_mqd_control = tmp;
1117 
1118 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1119 	hqd_gpu_addr = ring->gpu_addr >> 8;
1120 	mqd->cp_hqd_pq_base_lo = lower_32_bits(hqd_gpu_addr);
1121 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
1122 
1123 	/* set the wb address whether it's enabled or not */
1124 	wb_gpu_addr = ring->rptr_gpu_addr;
1125 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
1126 	mqd->cp_hqd_pq_rptr_report_addr_hi =
1127 		upper_32_bits(wb_gpu_addr) & 0xffff;
1128 
1129 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1130 	wb_gpu_addr = ring->wptr_gpu_addr;
1131 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffff8;
1132 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
1133 
1134 	/* set up the HQD, this is similar to CP_RB0_CNTL */
1135 	tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
1136 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
1137 			    (order_base_2(ring->ring_size / 4) - 1));
1138 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
1139 			    ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
1140 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
1141 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
1142 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
1143 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
1144 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, NO_UPDATE_RPTR, 1);
1145 	mqd->cp_hqd_pq_control = tmp;
1146 
1147 	/* enable doorbell */
1148 	tmp = 0;
1149 	if (ring->use_doorbell) {
1150 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1151 				    DOORBELL_OFFSET, ring->doorbell_index);
1152 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1153 				    DOORBELL_EN, 1);
1154 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1155 				    DOORBELL_SOURCE, 0);
1156 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1157 				    DOORBELL_HIT, 0);
1158 	} else
1159 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1160 				    DOORBELL_EN, 0);
1161 	mqd->cp_hqd_pq_doorbell_control = tmp;
1162 
1163 	mqd->cp_hqd_vmid = 0;
1164 	/* activate the queue */
1165 	mqd->cp_hqd_active = 1;
1166 
1167 	tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
1168 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE,
1169 			    PRELOAD_SIZE, 0x55);
1170 	mqd->cp_hqd_persistent_state = tmp;
1171 
1172 	mqd->cp_hqd_ib_control = regCP_HQD_IB_CONTROL_DEFAULT;
1173 	mqd->cp_hqd_iq_timer = regCP_HQD_IQ_TIMER_DEFAULT;
1174 	mqd->cp_hqd_quantum = regCP_HQD_QUANTUM_DEFAULT;
1175 
1176 	amdgpu_device_flush_hdp(ring->adev, NULL);
1177 	return 0;
1178 }
1179 
1180 static void mes_v11_0_queue_init_register(struct amdgpu_ring *ring)
1181 {
1182 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
1183 	struct amdgpu_device *adev = ring->adev;
1184 	uint32_t data = 0;
1185 
1186 	mutex_lock(&adev->srbm_mutex);
1187 	soc21_grbm_select(adev, 3, ring->pipe, 0, 0);
1188 
1189 	/* set CP_HQD_VMID.VMID = 0. */
1190 	data = RREG32_SOC15(GC, 0, regCP_HQD_VMID);
1191 	data = REG_SET_FIELD(data, CP_HQD_VMID, VMID, 0);
1192 	WREG32_SOC15(GC, 0, regCP_HQD_VMID, data);
1193 
1194 	/* set CP_HQD_PQ_DOORBELL_CONTROL.DOORBELL_EN=0 */
1195 	data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
1196 	data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
1197 			     DOORBELL_EN, 0);
1198 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
1199 
1200 	/* set CP_MQD_BASE_ADDR/HI with the MQD base address */
1201 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
1202 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
1203 
1204 	/* set CP_MQD_CONTROL.VMID=0 */
1205 	data = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
1206 	data = REG_SET_FIELD(data, CP_MQD_CONTROL, VMID, 0);
1207 	WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 0);
1208 
1209 	/* set CP_HQD_PQ_BASE/HI with the ring buffer base address */
1210 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
1211 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
1212 
1213 	/* set CP_HQD_PQ_RPTR_REPORT_ADDR/HI */
1214 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
1215 		     mqd->cp_hqd_pq_rptr_report_addr_lo);
1216 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
1217 		     mqd->cp_hqd_pq_rptr_report_addr_hi);
1218 
1219 	/* set CP_HQD_PQ_CONTROL */
1220 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control);
1221 
1222 	/* set CP_HQD_PQ_WPTR_POLL_ADDR/HI */
1223 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
1224 		     mqd->cp_hqd_pq_wptr_poll_addr_lo);
1225 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
1226 		     mqd->cp_hqd_pq_wptr_poll_addr_hi);
1227 
1228 	/* set CP_HQD_PQ_DOORBELL_CONTROL */
1229 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
1230 		     mqd->cp_hqd_pq_doorbell_control);
1231 
1232 	/* set CP_HQD_PERSISTENT_STATE.PRELOAD_SIZE=0x53 */
1233 	WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state);
1234 
1235 	/* set CP_HQD_ACTIVE.ACTIVE=1 */
1236 	WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, mqd->cp_hqd_active);
1237 
1238 	soc21_grbm_select(adev, 0, 0, 0, 0);
1239 	mutex_unlock(&adev->srbm_mutex);
1240 }
1241 
1242 static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
1243 {
1244 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
1245 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
1246 	int r;
1247 
1248 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
1249 		return -EINVAL;
1250 
1251 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
1252 	if (r) {
1253 		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
1254 		return r;
1255 	}
1256 
1257 	kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]);
1258 
1259 	return amdgpu_ring_test_helper(kiq_ring);
1260 }
1261 
1262 static int mes_v11_0_queue_init(struct amdgpu_device *adev,
1263 				enum amdgpu_mes_pipe pipe)
1264 {
1265 	struct amdgpu_ring *ring;
1266 	int r;
1267 
1268 	if (pipe == AMDGPU_MES_KIQ_PIPE)
1269 		ring = &adev->gfx.kiq[0].ring;
1270 	else if (pipe == AMDGPU_MES_SCHED_PIPE)
1271 		ring = &adev->mes.ring[0];
1272 	else
1273 		BUG();
1274 
1275 	if ((pipe == AMDGPU_MES_SCHED_PIPE) &&
1276 	    (amdgpu_in_reset(adev) || adev->in_suspend)) {
1277 		*(ring->wptr_cpu_addr) = 0;
1278 		*(ring->rptr_cpu_addr) = 0;
1279 		amdgpu_ring_clear_ring(ring);
1280 	}
1281 
1282 	r = mes_v11_0_mqd_init(ring);
1283 	if (r)
1284 		return r;
1285 
1286 	if (pipe == AMDGPU_MES_SCHED_PIPE) {
1287 		r = mes_v11_0_kiq_enable_queue(adev);
1288 		if (r)
1289 			return r;
1290 	} else {
1291 		mes_v11_0_queue_init_register(ring);
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 static int mes_v11_0_ring_init(struct amdgpu_device *adev)
1298 {
1299 	struct amdgpu_ring *ring;
1300 
1301 	ring = &adev->mes.ring[0];
1302 
1303 	ring->funcs = &mes_v11_0_ring_funcs;
1304 
1305 	ring->me = 3;
1306 	ring->pipe = 0;
1307 	ring->queue = 0;
1308 
1309 	ring->ring_obj = NULL;
1310 	ring->use_doorbell = true;
1311 	ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1;
1312 	ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_SCHED_PIPE];
1313 	ring->no_scheduler = true;
1314 	sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1315 
1316 	return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1317 				AMDGPU_RING_PRIO_DEFAULT, NULL);
1318 }
1319 
1320 static int mes_v11_0_kiq_ring_init(struct amdgpu_device *adev)
1321 {
1322 	struct amdgpu_ring *ring;
1323 
1324 	spin_lock_init(&adev->gfx.kiq[0].ring_lock);
1325 
1326 	ring = &adev->gfx.kiq[0].ring;
1327 
1328 	ring->me = 3;
1329 	ring->pipe = 1;
1330 	ring->queue = 0;
1331 
1332 	ring->adev = NULL;
1333 	ring->ring_obj = NULL;
1334 	ring->use_doorbell = true;
1335 	ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1;
1336 	ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_KIQ_PIPE];
1337 	ring->no_scheduler = true;
1338 	sprintf(ring->name, "mes_kiq_%d.%d.%d",
1339 		ring->me, ring->pipe, ring->queue);
1340 
1341 	return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1342 				AMDGPU_RING_PRIO_DEFAULT, NULL);
1343 }
1344 
1345 static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
1346 				 enum amdgpu_mes_pipe pipe)
1347 {
1348 	int r, mqd_size = sizeof(struct v11_compute_mqd);
1349 	struct amdgpu_ring *ring;
1350 
1351 	if (pipe == AMDGPU_MES_KIQ_PIPE)
1352 		ring = &adev->gfx.kiq[0].ring;
1353 	else if (pipe == AMDGPU_MES_SCHED_PIPE)
1354 		ring = &adev->mes.ring[0];
1355 	else
1356 		BUG();
1357 
1358 	if (ring->mqd_obj)
1359 		return 0;
1360 
1361 	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
1362 				    AMDGPU_GEM_DOMAIN_VRAM |
1363 				    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
1364 				    &ring->mqd_gpu_addr, &ring->mqd_ptr);
1365 	if (r) {
1366 		dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
1367 		return r;
1368 	}
1369 
1370 	memset(ring->mqd_ptr, 0, mqd_size);
1371 
1372 	/* prepare MQD backup */
1373 	adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL);
1374 	if (!adev->mes.mqd_backup[pipe]) {
1375 		dev_warn(adev->dev,
1376 			 "no memory to create MQD backup for ring %s\n",
1377 			 ring->name);
1378 		return -ENOMEM;
1379 	}
1380 
1381 	return 0;
1382 }
1383 
1384 static int mes_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
1385 {
1386 	struct amdgpu_device *adev = ip_block->adev;
1387 	int pipe, r, bo_size;
1388 
1389 	adev->mes.funcs = &mes_v11_0_funcs;
1390 	adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
1391 	adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
1392 
1393 	adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE;
1394 
1395 	r = amdgpu_mes_init(adev);
1396 	if (r)
1397 		return r;
1398 
1399 	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1400 		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
1401 			continue;
1402 
1403 		r = mes_v11_0_allocate_eop_buf(adev, pipe);
1404 		if (r)
1405 			return r;
1406 
1407 		r = mes_v11_0_mqd_sw_init(adev, pipe);
1408 		if (r)
1409 			return r;
1410 	}
1411 
1412 	if (adev->enable_mes_kiq) {
1413 		r = mes_v11_0_kiq_ring_init(adev);
1414 		if (r)
1415 			return r;
1416 	}
1417 
1418 	r = mes_v11_0_ring_init(adev);
1419 	if (r)
1420 		return r;
1421 
1422 	bo_size = AMDGPU_GPU_PAGE_SIZE;
1423 	if (amdgpu_sriov_is_mes_info_enable(adev))
1424 		bo_size += MES11_HW_RESOURCE_1_SIZE;
1425 
1426 	/* Only needed for AMDGPU_MES_SCHED_PIPE on MES 11*/
1427 	r = amdgpu_bo_create_kernel(adev,
1428 				    bo_size,
1429 				    PAGE_SIZE,
1430 				    AMDGPU_GEM_DOMAIN_VRAM,
1431 				    &adev->mes.resource_1[0],
1432 				    &adev->mes.resource_1_gpu_addr[0],
1433 				    &adev->mes.resource_1_addr[0]);
1434 	if (r) {
1435 		dev_err(adev->dev, "(%d) failed to create mes resource_1 bo\n", r);
1436 		return r;
1437 	}
1438 
1439 	return 0;
1440 }
1441 
1442 static int mes_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
1443 {
1444 	struct amdgpu_device *adev = ip_block->adev;
1445 	int pipe;
1446 
1447 	amdgpu_bo_free_kernel(&adev->mes.resource_1[0], &adev->mes.resource_1_gpu_addr[0],
1448 			      &adev->mes.resource_1_addr[0]);
1449 
1450 	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1451 		kfree(adev->mes.mqd_backup[pipe]);
1452 
1453 		amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
1454 				      &adev->mes.eop_gpu_addr[pipe],
1455 				      NULL);
1456 		amdgpu_ucode_release(&adev->mes.fw[pipe]);
1457 	}
1458 
1459 	amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
1460 			      &adev->gfx.kiq[0].ring.mqd_gpu_addr,
1461 			      &adev->gfx.kiq[0].ring.mqd_ptr);
1462 
1463 	amdgpu_bo_free_kernel(&adev->mes.ring[0].mqd_obj,
1464 			      &adev->mes.ring[0].mqd_gpu_addr,
1465 			      &adev->mes.ring[0].mqd_ptr);
1466 
1467 	amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
1468 	amdgpu_ring_fini(&adev->mes.ring[0]);
1469 
1470 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1471 		mes_v11_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
1472 		mes_v11_0_free_ucode_buffers(adev, AMDGPU_MES_SCHED_PIPE);
1473 	}
1474 
1475 	amdgpu_mes_fini(adev);
1476 	return 0;
1477 }
1478 
1479 static void mes_v11_0_kiq_dequeue(struct amdgpu_ring *ring)
1480 {
1481 	uint32_t data;
1482 	int i;
1483 	struct amdgpu_device *adev = ring->adev;
1484 
1485 	mutex_lock(&adev->srbm_mutex);
1486 	soc21_grbm_select(adev, 3, ring->pipe, 0, 0);
1487 
1488 	/* disable the queue if it's active */
1489 	if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
1490 		WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
1491 		for (i = 0; i < adev->usec_timeout; i++) {
1492 			if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
1493 				break;
1494 			udelay(1);
1495 		}
1496 	}
1497 	data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
1498 	data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
1499 				DOORBELL_EN, 0);
1500 	data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
1501 				DOORBELL_HIT, 1);
1502 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
1503 
1504 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0);
1505 
1506 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 0);
1507 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 0);
1508 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 0);
1509 
1510 	soc21_grbm_select(adev, 0, 0, 0, 0);
1511 	mutex_unlock(&adev->srbm_mutex);
1512 }
1513 
1514 static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring)
1515 {
1516 	uint32_t tmp;
1517 	struct amdgpu_device *adev = ring->adev;
1518 
1519 	/* tell RLC which is KIQ queue */
1520 	tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
1521 	tmp &= 0xffffff00;
1522 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1523 	WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
1524 }
1525 
1526 static void mes_v11_0_kiq_clear(struct amdgpu_device *adev)
1527 {
1528 	uint32_t tmp;
1529 
1530 	/* tell RLC which is KIQ dequeue */
1531 	tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
1532 	tmp &= ~RLC_CP_SCHEDULERS__scheduler0_MASK;
1533 	WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
1534 }
1535 
1536 static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
1537 {
1538 	int r = 0;
1539 	struct amdgpu_ip_block *ip_block;
1540 
1541 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1542 
1543 		r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE, false);
1544 		if (r) {
1545 			DRM_ERROR("failed to load MES fw, r=%d\n", r);
1546 			return r;
1547 		}
1548 
1549 		r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE, true);
1550 		if (r) {
1551 			DRM_ERROR("failed to load MES kiq fw, r=%d\n", r);
1552 			return r;
1553 		}
1554 
1555 	}
1556 
1557 	mes_v11_0_enable(adev, true);
1558 
1559 	mes_v11_0_get_fw_version(adev);
1560 
1561 	mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
1562 
1563 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES);
1564 	if (unlikely(!ip_block)) {
1565 		dev_err(adev->dev, "Failed to get MES handle\n");
1566 		return -EINVAL;
1567 	}
1568 
1569 	r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
1570 	if (r)
1571 		goto failure;
1572 
1573 	if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x47)
1574 		adev->mes.enable_legacy_queue_map = true;
1575 	else
1576 		adev->mes.enable_legacy_queue_map = false;
1577 
1578 	if (adev->mes.enable_legacy_queue_map) {
1579 		r = mes_v11_0_hw_init(ip_block);
1580 		if (r)
1581 			goto failure;
1582 	}
1583 
1584 	return r;
1585 
1586 failure:
1587 	mes_v11_0_hw_fini(ip_block);
1588 	return r;
1589 }
1590 
1591 static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
1592 {
1593 	if (adev->mes.ring[0].sched.ready) {
1594 		mes_v11_0_kiq_dequeue(&adev->mes.ring[0]);
1595 		adev->mes.ring[0].sched.ready = false;
1596 	}
1597 
1598 	if (amdgpu_sriov_vf(adev)) {
1599 		mes_v11_0_kiq_dequeue(&adev->gfx.kiq[0].ring);
1600 		mes_v11_0_kiq_clear(adev);
1601 	}
1602 
1603 	mes_v11_0_enable(adev, false);
1604 
1605 	return 0;
1606 }
1607 
1608 static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
1609 {
1610 	int r;
1611 	struct amdgpu_device *adev = ip_block->adev;
1612 
1613 	if (adev->mes.ring[0].sched.ready)
1614 		goto out;
1615 
1616 	if (!adev->enable_mes_kiq) {
1617 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1618 			r = mes_v11_0_load_microcode(adev,
1619 					     AMDGPU_MES_SCHED_PIPE, true);
1620 			if (r) {
1621 				DRM_ERROR("failed to MES fw, r=%d\n", r);
1622 				return r;
1623 			}
1624 		}
1625 
1626 		mes_v11_0_enable(adev, true);
1627 	}
1628 
1629 	r = mes_v11_0_queue_init(adev, AMDGPU_MES_SCHED_PIPE);
1630 	if (r)
1631 		goto failure;
1632 
1633 	r = mes_v11_0_set_hw_resources(&adev->mes);
1634 	if (r)
1635 		goto failure;
1636 
1637 	r = mes_v11_0_set_hw_resources_1(&adev->mes);
1638 	if (r) {
1639 		DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
1640 		goto failure;
1641 	}
1642 
1643 	r = mes_v11_0_query_sched_status(&adev->mes);
1644 	if (r) {
1645 		DRM_ERROR("MES is busy\n");
1646 		goto failure;
1647 	}
1648 
1649 	r = amdgpu_mes_update_enforce_isolation(adev);
1650 	if (r)
1651 		goto failure;
1652 
1653 out:
1654 	/*
1655 	 * Disable KIQ ring usage from the driver once MES is enabled.
1656 	 * MES uses KIQ ring exclusively so driver cannot access KIQ ring
1657 	 * with MES enabled.
1658 	 */
1659 	adev->gfx.kiq[0].ring.sched.ready = false;
1660 	adev->mes.ring[0].sched.ready = true;
1661 
1662 	return 0;
1663 
1664 failure:
1665 	mes_v11_0_hw_fini(ip_block);
1666 	return r;
1667 }
1668 
1669 static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
1670 {
1671 	return 0;
1672 }
1673 
1674 static int mes_v11_0_suspend(struct amdgpu_ip_block *ip_block)
1675 {
1676 	return mes_v11_0_hw_fini(ip_block);
1677 }
1678 
1679 static int mes_v11_0_resume(struct amdgpu_ip_block *ip_block)
1680 {
1681 	return mes_v11_0_hw_init(ip_block);
1682 }
1683 
1684 static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
1685 {
1686 	struct amdgpu_device *adev = ip_block->adev;
1687 	int pipe, r;
1688 
1689 	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1690 		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
1691 			continue;
1692 		r = amdgpu_mes_init_microcode(adev, pipe);
1693 		if (r)
1694 			return r;
1695 	}
1696 
1697 	return 0;
1698 }
1699 
1700 static int mes_v11_0_late_init(struct amdgpu_ip_block *ip_block)
1701 {
1702 	struct amdgpu_device *adev = ip_block->adev;
1703 
1704 	/* it's only intended for use in mes_self_test case, not for s0ix and reset */
1705 	if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
1706 	    (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3)))
1707 		amdgpu_mes_self_test(adev);
1708 
1709 	return 0;
1710 }
1711 
1712 static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
1713 	.name = "mes_v11_0",
1714 	.early_init = mes_v11_0_early_init,
1715 	.late_init = mes_v11_0_late_init,
1716 	.sw_init = mes_v11_0_sw_init,
1717 	.sw_fini = mes_v11_0_sw_fini,
1718 	.hw_init = mes_v11_0_hw_init,
1719 	.hw_fini = mes_v11_0_hw_fini,
1720 	.suspend = mes_v11_0_suspend,
1721 	.resume = mes_v11_0_resume,
1722 };
1723 
1724 const struct amdgpu_ip_block_version mes_v11_0_ip_block = {
1725 	.type = AMD_IP_BLOCK_TYPE_MES,
1726 	.major = 11,
1727 	.minor = 0,
1728 	.rev = 0,
1729 	.funcs = &mes_v11_0_ip_funcs,
1730 };
1731