xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/printk.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include "kfd_priv.h"
28 #include "kfd_mqd_manager.h"
29 #include "v11_structs.h"
30 #include "gc/gc_11_0_0_offset.h"
31 #include "gc/gc_11_0_0_sh_mask.h"
32 #include "amdgpu_amdkfd.h"
33 
34 static inline struct v11_compute_mqd *get_mqd(void *mqd)
35 {
36 	return (struct v11_compute_mqd *)mqd;
37 }
38 
39 static inline struct v11_sdma_mqd *get_sdma_mqd(void *mqd)
40 {
41 	return (struct v11_sdma_mqd *)mqd;
42 }
43 
44 static void update_cu_mask(struct mqd_manager *mm, void *mqd,
45 			   struct mqd_update_info *minfo)
46 {
47 	struct v11_compute_mqd *m;
48 	uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
49 	bool has_wa_flag = minfo && (minfo->update_flag & (UPDATE_FLAG_DBG_WA_ENABLE |
50 			UPDATE_FLAG_DBG_WA_DISABLE));
51 
52 	if (!minfo || !(has_wa_flag || minfo->cu_mask.ptr))
53 		return;
54 
55 	m = get_mqd(mqd);
56 
57 	if (has_wa_flag) {
58 		uint32_t wa_mask =
59 			(minfo->update_flag & UPDATE_FLAG_DBG_WA_ENABLE) ? 0xffff : 0xffffffff;
60 
61 		m->compute_static_thread_mgmt_se0 = wa_mask;
62 		m->compute_static_thread_mgmt_se1 = wa_mask;
63 		m->compute_static_thread_mgmt_se2 = wa_mask;
64 		m->compute_static_thread_mgmt_se3 = wa_mask;
65 		m->compute_static_thread_mgmt_se4 = wa_mask;
66 		m->compute_static_thread_mgmt_se5 = wa_mask;
67 		m->compute_static_thread_mgmt_se6 = wa_mask;
68 		m->compute_static_thread_mgmt_se7 = wa_mask;
69 
70 		return;
71 	}
72 
73 	mqd_symmetrically_map_cu_mask(mm,
74 		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
75 
76 	m->compute_static_thread_mgmt_se0 = se_mask[0];
77 	m->compute_static_thread_mgmt_se1 = se_mask[1];
78 	m->compute_static_thread_mgmt_se2 = se_mask[2];
79 	m->compute_static_thread_mgmt_se3 = se_mask[3];
80 	m->compute_static_thread_mgmt_se4 = se_mask[4];
81 	m->compute_static_thread_mgmt_se5 = se_mask[5];
82 	m->compute_static_thread_mgmt_se6 = se_mask[6];
83 	m->compute_static_thread_mgmt_se7 = se_mask[7];
84 
85 	pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
86 		m->compute_static_thread_mgmt_se0,
87 		m->compute_static_thread_mgmt_se1,
88 		m->compute_static_thread_mgmt_se2,
89 		m->compute_static_thread_mgmt_se3,
90 		m->compute_static_thread_mgmt_se4,
91 		m->compute_static_thread_mgmt_se5,
92 		m->compute_static_thread_mgmt_se6,
93 		m->compute_static_thread_mgmt_se7);
94 }
95 
96 static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q)
97 {
98 	m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
99 }
100 
101 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
102 		struct queue_properties *q)
103 {
104 	u32 mqd_size = AMDGPU_MQD_SIZE_ALIGN(mm->mqd_size);
105 	struct kfd_node *node = mm->dev;
106 	struct kfd_mem_obj *mqd_mem_obj;
107 
108 	if (kfd_gtt_sa_allocate(node, mqd_size, &mqd_mem_obj))
109 		return NULL;
110 
111 	return mqd_mem_obj;
112 }
113 
114 static void init_mqd(struct mqd_manager *mm, void **mqd,
115 			struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
116 			struct queue_properties *q)
117 {
118 	uint64_t addr;
119 	struct v11_compute_mqd *m;
120 	u32 mqd_size = AMDGPU_MQD_SIZE_ALIGN(mm->mqd_size);
121 	uint32_t wa_mask = q->is_dbg_wa ? 0xffff : 0xffffffff;
122 
123 	m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
124 	addr = mqd_mem_obj->gpu_addr;
125 
126 	memset(m, 0, mqd_size);
127 
128 	m->header = 0xC0310800;
129 	m->compute_pipelinestat_enable = 1;
130 
131 	m->compute_static_thread_mgmt_se0 = wa_mask;
132 	m->compute_static_thread_mgmt_se1 = wa_mask;
133 	m->compute_static_thread_mgmt_se2 = wa_mask;
134 	m->compute_static_thread_mgmt_se3 = wa_mask;
135 	m->compute_static_thread_mgmt_se4 = wa_mask;
136 	m->compute_static_thread_mgmt_se5 = wa_mask;
137 	m->compute_static_thread_mgmt_se6 = wa_mask;
138 	m->compute_static_thread_mgmt_se7 = wa_mask;
139 
140 	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
141 			0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
142 
143 	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
144 	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
145 	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
146 
147 	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
148 	m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
149 
150 	m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
151 			1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
152 			1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
153 
154 	/* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the
155 	 * DISPATCH_PTR.  This is required for the kfd debugger
156 	 */
157 	m->cp_hqd_hq_status0 = 1 << 14;
158 
159 	/*
160 	 * GFX11 RS64 CPFW version >= 509 supports PCIe atomics support
161 	 * acknowledgment.
162 	 */
163 	if (amdgpu_amdkfd_have_atomics_support(mm->dev->adev))
164 		m->cp_hqd_hq_status0 |= 1 << 29;
165 
166 	if (q->format == KFD_QUEUE_FORMAT_AQL) {
167 		m->cp_hqd_aql_control =
168 			1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
169 	}
170 
171 	if (mm->dev->kfd->cwsr_enabled) {
172 		m->cp_hqd_persistent_state |=
173 			(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
174 		m->cp_hqd_ctx_save_base_addr_lo =
175 			lower_32_bits(q->ctx_save_restore_area_address);
176 		m->cp_hqd_ctx_save_base_addr_hi =
177 			upper_32_bits(q->ctx_save_restore_area_address);
178 		m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
179 		m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
180 		m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
181 		m->cp_hqd_wg_state_offset = q->ctl_stack_size;
182 	}
183 
184 	*mqd = m;
185 	if (gart_addr)
186 		*gart_addr = addr;
187 	mm->update_mqd(mm, m, q, NULL);
188 }
189 
190 static int load_mqd(struct mqd_manager *mm, void *mqd,
191 			uint32_t pipe_id, uint32_t queue_id,
192 			struct queue_properties *p, struct mm_struct *mms)
193 {
194 	int r = 0;
195 	/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
196 	uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
197 
198 	r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
199 					  (uint32_t __user *)p->write_ptr,
200 					  wptr_shift, 0, mms, 0);
201 	return r;
202 }
203 
204 static void update_mqd(struct mqd_manager *mm, void *mqd,
205 		       struct queue_properties *q,
206 		       struct mqd_update_info *minfo)
207 {
208 	struct v11_compute_mqd *m;
209 
210 	m = get_mqd(mqd);
211 
212 	m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
213 	m->cp_hqd_pq_control |=
214 			ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
215 	pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
216 
217 	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
218 	m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
219 
220 	m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
221 	m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
222 	m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
223 	m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
224 
225 	m->cp_hqd_pq_doorbell_control =
226 		q->doorbell_off <<
227 			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
228 	pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
229 			m->cp_hqd_pq_doorbell_control);
230 
231 	m->cp_hqd_ib_control = 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT;
232 
233 	/*
234 	 * HW does not clamp this field correctly. Maximum EOP queue size
235 	 * is constrained by per-SE EOP done signal count, which is 8-bit.
236 	 * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
237 	 * more than (EOP entry count - 1) so a queue size of 0x800 dwords
238 	 * is safe, giving a maximum field value of 0xA.
239 	 */
240 	m->cp_hqd_eop_control = min(0xA,
241 		ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
242 	m->cp_hqd_eop_base_addr_lo =
243 			lower_32_bits(q->eop_ring_buffer_address >> 8);
244 	m->cp_hqd_eop_base_addr_hi =
245 			upper_32_bits(q->eop_ring_buffer_address >> 8);
246 
247 	m->cp_hqd_iq_timer = 0;
248 
249 	m->cp_hqd_vmid = q->vmid;
250 
251 	if (q->format == KFD_QUEUE_FORMAT_AQL) {
252 		/* GC 10 removed WPP_CLAMP from PQ Control */
253 		m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
254 				2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
255 				1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT ;
256 		m->cp_hqd_pq_doorbell_control |=
257 			1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
258 	}
259 	if (mm->dev->kfd->cwsr_enabled)
260 		m->cp_hqd_ctx_save_control = 0;
261 
262 	update_cu_mask(mm, mqd, minfo);
263 	set_priority(m, q);
264 
265 	q->is_active = QUEUE_IS_ACTIVE(*q);
266 }
267 
268 static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
269 {
270 	struct v11_compute_mqd *m = (struct v11_compute_mqd *)mqd;
271 
272 	return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
273 }
274 
275 static int get_wave_state(struct mqd_manager *mm, void *mqd,
276 			  struct queue_properties *q,
277 			  void __user *ctl_stack,
278 			  u32 *ctl_stack_used_size,
279 			  u32 *save_area_used_size)
280 {
281 	struct v11_compute_mqd *m;
282 	struct kfd_context_save_area_header header;
283 
284 	m = get_mqd(mqd);
285 
286 	/* Control stack is written backwards, while workgroup context data
287 	 * is written forwards. Both starts from m->cp_hqd_cntl_stack_size.
288 	 * Current position is at m->cp_hqd_cntl_stack_offset and
289 	 * m->cp_hqd_wg_state_offset, respectively.
290 	 */
291 	*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
292 		m->cp_hqd_cntl_stack_offset;
293 	*save_area_used_size = m->cp_hqd_wg_state_offset -
294 		m->cp_hqd_cntl_stack_size;
295 
296 	/* Control stack is not copied to user mode for GFXv11 because
297 	 * it's part of the context save area that is already
298 	 * accessible to user mode
299 	 */
300 	header.wave_state.control_stack_size = *ctl_stack_used_size;
301 	header.wave_state.wave_state_size = *save_area_used_size;
302 
303 	header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset;
304 	header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset;
305 
306 	if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state)))
307 		return -EFAULT;
308 
309 	return 0;
310 }
311 
312 static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
313 {
314 	struct v11_compute_mqd *m;
315 
316 	m = get_mqd(mqd);
317 
318 	memcpy(mqd_dst, m, sizeof(struct v11_compute_mqd));
319 }
320 
321 static void restore_mqd(struct mqd_manager *mm, void **mqd,
322 			struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
323 			struct queue_properties *qp,
324 			const void *mqd_src,
325 			const void *ctl_stack_src, const u32 ctl_stack_size)
326 {
327 	uint64_t addr;
328 	struct v11_compute_mqd *m;
329 
330 	m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
331 	addr = mqd_mem_obj->gpu_addr;
332 
333 	memcpy(m, mqd_src, sizeof(*m));
334 
335 	*mqd = m;
336 	if (gart_addr)
337 		*gart_addr = addr;
338 
339 	m->cp_hqd_pq_doorbell_control =
340 		qp->doorbell_off <<
341 			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
342 	pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
343 			m->cp_hqd_pq_doorbell_control);
344 
345 	qp->is_active = 0;
346 }
347 
348 
349 static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
350 			struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
351 			struct queue_properties *q)
352 {
353 	struct v11_compute_mqd *m;
354 
355 	init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
356 
357 	m = get_mqd(*mqd);
358 
359 	m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
360 			1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
361 }
362 
363 static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
364 			enum kfd_preempt_type type, unsigned int timeout,
365 			uint32_t pipe_id, uint32_t queue_id)
366 {
367 	int err;
368 	struct v11_compute_mqd *m;
369 	u32 doorbell_off;
370 
371 	m = get_mqd(mqd);
372 
373 	doorbell_off = m->cp_hqd_pq_doorbell_control >>
374 			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
375 
376 	err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
377 	if (err)
378 		pr_debug("Destroy HIQ MQD failed: %d\n", err);
379 
380 	return err;
381 }
382 
383 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
384 		struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
385 		struct queue_properties *q)
386 {
387 	struct v11_sdma_mqd *m;
388 	int size;
389 
390 	m = (struct v11_sdma_mqd *) mqd_mem_obj->cpu_ptr;
391 
392 	if (mm->dev->kfd->shared_resources.enable_mes)
393 		size = PAGE_SIZE;
394 	else
395 		size = sizeof(struct v11_sdma_mqd);
396 
397 	memset(m, 0, size);
398 	*mqd = m;
399 	if (gart_addr)
400 		*gart_addr = mqd_mem_obj->gpu_addr;
401 
402 	mm->update_mqd(mm, m, q, NULL);
403 }
404 
405 #define SDMA_RLC_DUMMY_DEFAULT 0xf
406 
407 static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
408 		struct queue_properties *q,
409 		struct mqd_update_info *minfo)
410 {
411 	struct v11_sdma_mqd *m;
412 
413 	m = get_sdma_mqd(mqd);
414 	m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
415 		<< SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
416 		q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT |
417 		1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
418 		6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
419 		1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
420 
421 	m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
422 	m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
423 	m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
424 	m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
425 	m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
426 	m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
427 	m->sdmax_rlcx_doorbell_offset =
428 		q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
429 
430 	m->sdmax_rlcx_sched_cntl = (amdgpu_sdma_phase_quantum
431 		<< SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT)
432 		 & SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK;
433 
434 	m->sdma_engine_id = q->sdma_engine_id;
435 	m->sdma_queue_id = q->sdma_queue_id;
436 	m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
437 
438 	q->is_active = QUEUE_IS_ACTIVE(*q);
439 }
440 
441 #if defined(CONFIG_DEBUG_FS)
442 
443 static int debugfs_show_mqd(struct seq_file *m, void *data)
444 {
445 	seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
446 		     data, sizeof(struct v11_compute_mqd), false);
447 	return 0;
448 }
449 
450 static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
451 {
452 	seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
453 		     data, sizeof(struct v11_sdma_mqd), false);
454 	return 0;
455 }
456 
457 #endif
458 
459 struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
460 		struct kfd_node *dev)
461 {
462 	struct mqd_manager *mqd;
463 
464 	if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
465 		return NULL;
466 
467 	mqd = kzalloc_obj(*mqd);
468 	if (!mqd)
469 		return NULL;
470 
471 	mqd->dev = dev;
472 
473 	switch (type) {
474 	case KFD_MQD_TYPE_CP:
475 		pr_debug("%s@%i\n", __func__, __LINE__);
476 		mqd->allocate_mqd = allocate_mqd;
477 		mqd->init_mqd = init_mqd;
478 		mqd->free_mqd = kfd_free_mqd_cp;
479 		mqd->load_mqd = load_mqd;
480 		mqd->update_mqd = update_mqd;
481 		mqd->destroy_mqd = kfd_destroy_mqd_cp;
482 		mqd->is_occupied = kfd_is_occupied_cp;
483 		mqd->mqd_size = sizeof(struct v11_compute_mqd);
484 		mqd->get_wave_state = get_wave_state;
485 		mqd->mqd_stride = kfd_mqd_stride;
486 		mqd->checkpoint_mqd = checkpoint_mqd;
487 		mqd->restore_mqd = restore_mqd;
488 #if defined(CONFIG_DEBUG_FS)
489 		mqd->debugfs_show_mqd = debugfs_show_mqd;
490 #endif
491 		pr_debug("%s@%i\n", __func__, __LINE__);
492 		break;
493 	case KFD_MQD_TYPE_HIQ:
494 		pr_debug("%s@%i\n", __func__, __LINE__);
495 		mqd->allocate_mqd = allocate_hiq_mqd;
496 		mqd->init_mqd = init_mqd_hiq;
497 		mqd->free_mqd = free_mqd_hiq_sdma;
498 		mqd->load_mqd = kfd_hiq_load_mqd_kiq;
499 		mqd->update_mqd = update_mqd;
500 		mqd->destroy_mqd = destroy_hiq_mqd;
501 		mqd->is_occupied = kfd_is_occupied_cp;
502 		mqd->mqd_size = sizeof(struct v11_compute_mqd);
503 		mqd->mqd_stride = kfd_mqd_stride;
504 #if defined(CONFIG_DEBUG_FS)
505 		mqd->debugfs_show_mqd = debugfs_show_mqd;
506 #endif
507 		mqd->check_preemption_failed = check_preemption_failed;
508 		pr_debug("%s@%i\n", __func__, __LINE__);
509 		break;
510 	case KFD_MQD_TYPE_DIQ:
511 		mqd->allocate_mqd = allocate_mqd;
512 		mqd->init_mqd = init_mqd_hiq;
513 		mqd->free_mqd = kfd_free_mqd_cp;
514 		mqd->load_mqd = load_mqd;
515 		mqd->update_mqd = update_mqd;
516 		mqd->destroy_mqd = kfd_destroy_mqd_cp;
517 		mqd->is_occupied = kfd_is_occupied_cp;
518 		mqd->mqd_size = sizeof(struct v11_compute_mqd);
519 #if defined(CONFIG_DEBUG_FS)
520 		mqd->debugfs_show_mqd = debugfs_show_mqd;
521 #endif
522 		break;
523 	case KFD_MQD_TYPE_SDMA:
524 		pr_debug("%s@%i\n", __func__, __LINE__);
525 		mqd->allocate_mqd = allocate_sdma_mqd;
526 		mqd->init_mqd = init_mqd_sdma;
527 		mqd->free_mqd = free_mqd_hiq_sdma;
528 		mqd->load_mqd = kfd_load_mqd_sdma;
529 		mqd->update_mqd = update_mqd_sdma;
530 		mqd->destroy_mqd = kfd_destroy_mqd_sdma;
531 		mqd->is_occupied = kfd_is_occupied_sdma;
532 		mqd->checkpoint_mqd = checkpoint_mqd;
533 		mqd->restore_mqd = restore_mqd;
534 		mqd->mqd_size = sizeof(struct v11_sdma_mqd);
535 		mqd->mqd_stride = kfd_mqd_stride;
536 #if defined(CONFIG_DEBUG_FS)
537 		mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
538 #endif
539 		/*
540 		 * To allocate SDMA MQDs by generic functions
541 		 * when MES is enabled.
542 		 */
543 		if (dev->kfd->shared_resources.enable_mes) {
544 			mqd->allocate_mqd = allocate_mqd;
545 			mqd->free_mqd = kfd_free_mqd_cp;
546 		}
547 		pr_debug("%s@%i\n", __func__, __LINE__);
548 		break;
549 	default:
550 		kfree(mqd);
551 		return NULL;
552 	}
553 
554 	return mqd;
555 }
556