xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/printk.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include "kfd_priv.h"
28 #include "kfd_mqd_manager.h"
29 #include "v11_structs.h"
30 #include "gc/gc_11_0_0_offset.h"
31 #include "gc/gc_11_0_0_sh_mask.h"
32 #include "amdgpu_amdkfd.h"
33 
34 static inline struct v11_compute_mqd *get_mqd(void *mqd)
35 {
36 	return (struct v11_compute_mqd *)mqd;
37 }
38 
39 static inline struct v11_sdma_mqd *get_sdma_mqd(void *mqd)
40 {
41 	return (struct v11_sdma_mqd *)mqd;
42 }
43 
44 static void update_cu_mask(struct mqd_manager *mm, void *mqd,
45 			   struct mqd_update_info *minfo)
46 {
47 	struct v11_compute_mqd *m;
48 	uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
49 	bool has_wa_flag = minfo && (minfo->update_flag & (UPDATE_FLAG_DBG_WA_ENABLE |
50 			UPDATE_FLAG_DBG_WA_DISABLE));
51 
52 	if (!minfo || !(has_wa_flag || minfo->cu_mask.ptr))
53 		return;
54 
55 	m = get_mqd(mqd);
56 
57 	if (has_wa_flag) {
58 		uint32_t wa_mask =
59 			(minfo->update_flag & UPDATE_FLAG_DBG_WA_ENABLE) ? 0xffff : 0xffffffff;
60 
61 		m->compute_static_thread_mgmt_se0 = wa_mask;
62 		m->compute_static_thread_mgmt_se1 = wa_mask;
63 		m->compute_static_thread_mgmt_se2 = wa_mask;
64 		m->compute_static_thread_mgmt_se3 = wa_mask;
65 		m->compute_static_thread_mgmt_se4 = wa_mask;
66 		m->compute_static_thread_mgmt_se5 = wa_mask;
67 		m->compute_static_thread_mgmt_se6 = wa_mask;
68 		m->compute_static_thread_mgmt_se7 = wa_mask;
69 
70 		return;
71 	}
72 
73 	mqd_symmetrically_map_cu_mask(mm,
74 		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
75 
76 	m->compute_static_thread_mgmt_se0 = se_mask[0];
77 	m->compute_static_thread_mgmt_se1 = se_mask[1];
78 	m->compute_static_thread_mgmt_se2 = se_mask[2];
79 	m->compute_static_thread_mgmt_se3 = se_mask[3];
80 	m->compute_static_thread_mgmt_se4 = se_mask[4];
81 	m->compute_static_thread_mgmt_se5 = se_mask[5];
82 	m->compute_static_thread_mgmt_se6 = se_mask[6];
83 	m->compute_static_thread_mgmt_se7 = se_mask[7];
84 
85 	pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
86 		m->compute_static_thread_mgmt_se0,
87 		m->compute_static_thread_mgmt_se1,
88 		m->compute_static_thread_mgmt_se2,
89 		m->compute_static_thread_mgmt_se3,
90 		m->compute_static_thread_mgmt_se4,
91 		m->compute_static_thread_mgmt_se5,
92 		m->compute_static_thread_mgmt_se6,
93 		m->compute_static_thread_mgmt_se7);
94 }
95 
96 static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q)
97 {
98 	m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
99 	/* m->cp_hqd_queue_priority = q->priority; */
100 }
101 
102 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
103 		struct queue_properties *q)
104 {
105 	u32 mqd_size = AMDGPU_MQD_SIZE_ALIGN(mm->mqd_size);
106 	struct kfd_node *node = mm->dev;
107 	struct kfd_mem_obj *mqd_mem_obj;
108 
109 	if (kfd_gtt_sa_allocate(node, mqd_size, &mqd_mem_obj))
110 		return NULL;
111 
112 	return mqd_mem_obj;
113 }
114 
115 static void init_mqd(struct mqd_manager *mm, void **mqd,
116 			struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
117 			struct queue_properties *q)
118 {
119 	uint64_t addr;
120 	struct v11_compute_mqd *m;
121 	u32 mqd_size = AMDGPU_MQD_SIZE_ALIGN(mm->mqd_size);
122 	uint32_t wa_mask = q->is_dbg_wa ? 0xffff : 0xffffffff;
123 
124 	m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
125 	addr = mqd_mem_obj->gpu_addr;
126 
127 	memset(m, 0, mqd_size);
128 
129 	m->header = 0xC0310800;
130 	m->compute_pipelinestat_enable = 1;
131 
132 	m->compute_static_thread_mgmt_se0 = wa_mask;
133 	m->compute_static_thread_mgmt_se1 = wa_mask;
134 	m->compute_static_thread_mgmt_se2 = wa_mask;
135 	m->compute_static_thread_mgmt_se3 = wa_mask;
136 	m->compute_static_thread_mgmt_se4 = wa_mask;
137 	m->compute_static_thread_mgmt_se5 = wa_mask;
138 	m->compute_static_thread_mgmt_se6 = wa_mask;
139 	m->compute_static_thread_mgmt_se7 = wa_mask;
140 
141 	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
142 			0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
143 
144 	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
145 	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
146 	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
147 
148 	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
149 	m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
150 
151 	m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
152 			1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
153 			1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
154 
155 	/* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the
156 	 * DISPATCH_PTR.  This is required for the kfd debugger
157 	 */
158 	m->cp_hqd_hq_status0 = 1 << 14;
159 
160 	/*
161 	 * GFX11 RS64 CPFW version >= 509 supports PCIe atomics support
162 	 * acknowledgment.
163 	 */
164 	if (amdgpu_amdkfd_have_atomics_support(mm->dev->adev))
165 		m->cp_hqd_hq_status0 |= 1 << 29;
166 
167 	if (q->format == KFD_QUEUE_FORMAT_AQL) {
168 		m->cp_hqd_aql_control =
169 			1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
170 	}
171 
172 	if (mm->dev->kfd->cwsr_enabled) {
173 		m->cp_hqd_persistent_state |=
174 			(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
175 		m->cp_hqd_ctx_save_base_addr_lo =
176 			lower_32_bits(q->ctx_save_restore_area_address);
177 		m->cp_hqd_ctx_save_base_addr_hi =
178 			upper_32_bits(q->ctx_save_restore_area_address);
179 		m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
180 		m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
181 		m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
182 		m->cp_hqd_wg_state_offset = q->ctl_stack_size;
183 	}
184 
185 	*mqd = m;
186 	if (gart_addr)
187 		*gart_addr = addr;
188 	mm->update_mqd(mm, m, q, NULL);
189 }
190 
191 static int load_mqd(struct mqd_manager *mm, void *mqd,
192 			uint32_t pipe_id, uint32_t queue_id,
193 			struct queue_properties *p, struct mm_struct *mms)
194 {
195 	int r = 0;
196 	/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
197 	uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
198 
199 	r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
200 					  (uint32_t __user *)p->write_ptr,
201 					  wptr_shift, 0, mms, 0);
202 	return r;
203 }
204 
205 static void update_mqd(struct mqd_manager *mm, void *mqd,
206 		       struct queue_properties *q,
207 		       struct mqd_update_info *minfo)
208 {
209 	struct v11_compute_mqd *m;
210 
211 	m = get_mqd(mqd);
212 
213 	m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
214 	m->cp_hqd_pq_control |=
215 			ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
216 	pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
217 
218 	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
219 	m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
220 
221 	m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
222 	m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
223 	m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
224 	m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
225 
226 	m->cp_hqd_pq_doorbell_control =
227 		q->doorbell_off <<
228 			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
229 	pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
230 			m->cp_hqd_pq_doorbell_control);
231 
232 	m->cp_hqd_ib_control = 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT;
233 
234 	/*
235 	 * HW does not clamp this field correctly. Maximum EOP queue size
236 	 * is constrained by per-SE EOP done signal count, which is 8-bit.
237 	 * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
238 	 * more than (EOP entry count - 1) so a queue size of 0x800 dwords
239 	 * is safe, giving a maximum field value of 0xA.
240 	 */
241 	m->cp_hqd_eop_control = min(0xA,
242 		ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
243 	m->cp_hqd_eop_base_addr_lo =
244 			lower_32_bits(q->eop_ring_buffer_address >> 8);
245 	m->cp_hqd_eop_base_addr_hi =
246 			upper_32_bits(q->eop_ring_buffer_address >> 8);
247 
248 	m->cp_hqd_iq_timer = 0;
249 
250 	m->cp_hqd_vmid = q->vmid;
251 
252 	if (q->format == KFD_QUEUE_FORMAT_AQL) {
253 		/* GC 10 removed WPP_CLAMP from PQ Control */
254 		m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
255 				2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
256 				1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT ;
257 		m->cp_hqd_pq_doorbell_control |=
258 			1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
259 	}
260 	if (mm->dev->kfd->cwsr_enabled)
261 		m->cp_hqd_ctx_save_control = 0;
262 
263 	update_cu_mask(mm, mqd, minfo);
264 	set_priority(m, q);
265 
266 	q->is_active = QUEUE_IS_ACTIVE(*q);
267 }
268 
269 static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
270 {
271 	struct v11_compute_mqd *m = (struct v11_compute_mqd *)mqd;
272 
273 	return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
274 }
275 
276 static int get_wave_state(struct mqd_manager *mm, void *mqd,
277 			  struct queue_properties *q,
278 			  void __user *ctl_stack,
279 			  u32 *ctl_stack_used_size,
280 			  u32 *save_area_used_size)
281 {
282 	struct v11_compute_mqd *m;
283 	struct kfd_context_save_area_header header;
284 
285 	m = get_mqd(mqd);
286 
287 	/* Control stack is written backwards, while workgroup context data
288 	 * is written forwards. Both starts from m->cp_hqd_cntl_stack_size.
289 	 * Current position is at m->cp_hqd_cntl_stack_offset and
290 	 * m->cp_hqd_wg_state_offset, respectively.
291 	 */
292 	*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
293 		m->cp_hqd_cntl_stack_offset;
294 	*save_area_used_size = m->cp_hqd_wg_state_offset -
295 		m->cp_hqd_cntl_stack_size;
296 
297 	/* Control stack is not copied to user mode for GFXv11 because
298 	 * it's part of the context save area that is already
299 	 * accessible to user mode
300 	 */
301 	header.wave_state.control_stack_size = *ctl_stack_used_size;
302 	header.wave_state.wave_state_size = *save_area_used_size;
303 
304 	header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset;
305 	header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset;
306 
307 	if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state)))
308 		return -EFAULT;
309 
310 	return 0;
311 }
312 
313 static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
314 {
315 	struct v11_compute_mqd *m;
316 
317 	m = get_mqd(mqd);
318 
319 	memcpy(mqd_dst, m, sizeof(struct v11_compute_mqd));
320 }
321 
322 static void restore_mqd(struct mqd_manager *mm, void **mqd,
323 			struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
324 			struct queue_properties *qp,
325 			const void *mqd_src,
326 			const void *ctl_stack_src, const u32 ctl_stack_size)
327 {
328 	uint64_t addr;
329 	struct v11_compute_mqd *m;
330 
331 	m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
332 	addr = mqd_mem_obj->gpu_addr;
333 
334 	memcpy(m, mqd_src, sizeof(*m));
335 
336 	*mqd = m;
337 	if (gart_addr)
338 		*gart_addr = addr;
339 
340 	m->cp_hqd_pq_doorbell_control =
341 		qp->doorbell_off <<
342 			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
343 	pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
344 			m->cp_hqd_pq_doorbell_control);
345 
346 	qp->is_active = 0;
347 }
348 
349 
350 static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
351 			struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
352 			struct queue_properties *q)
353 {
354 	struct v11_compute_mqd *m;
355 
356 	init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
357 
358 	m = get_mqd(*mqd);
359 
360 	m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
361 			1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
362 }
363 
364 static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
365 			enum kfd_preempt_type type, unsigned int timeout,
366 			uint32_t pipe_id, uint32_t queue_id)
367 {
368 	int err;
369 	struct v11_compute_mqd *m;
370 	u32 doorbell_off;
371 
372 	m = get_mqd(mqd);
373 
374 	doorbell_off = m->cp_hqd_pq_doorbell_control >>
375 			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
376 
377 	err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
378 	if (err)
379 		pr_debug("Destroy HIQ MQD failed: %d\n", err);
380 
381 	return err;
382 }
383 
384 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
385 		struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
386 		struct queue_properties *q)
387 {
388 	struct v11_sdma_mqd *m;
389 	int size;
390 
391 	m = (struct v11_sdma_mqd *) mqd_mem_obj->cpu_ptr;
392 
393 	if (mm->dev->kfd->shared_resources.enable_mes)
394 		size = PAGE_SIZE;
395 	else
396 		size = sizeof(struct v11_sdma_mqd);
397 
398 	memset(m, 0, size);
399 	*mqd = m;
400 	if (gart_addr)
401 		*gart_addr = mqd_mem_obj->gpu_addr;
402 
403 	mm->update_mqd(mm, m, q, NULL);
404 }
405 
406 #define SDMA_RLC_DUMMY_DEFAULT 0xf
407 
408 static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
409 		struct queue_properties *q,
410 		struct mqd_update_info *minfo)
411 {
412 	struct v11_sdma_mqd *m;
413 
414 	m = get_sdma_mqd(mqd);
415 	m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
416 		<< SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
417 		q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT |
418 		1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
419 		6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
420 		1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
421 
422 	m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
423 	m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
424 	m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
425 	m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
426 	m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
427 	m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
428 	m->sdmax_rlcx_doorbell_offset =
429 		q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
430 
431 	m->sdmax_rlcx_sched_cntl = (amdgpu_sdma_phase_quantum
432 		<< SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT)
433 		 & SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK;
434 
435 	m->sdma_engine_id = q->sdma_engine_id;
436 	m->sdma_queue_id = q->sdma_queue_id;
437 	m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
438 
439 	q->is_active = QUEUE_IS_ACTIVE(*q);
440 }
441 
442 #if defined(CONFIG_DEBUG_FS)
443 
444 static int debugfs_show_mqd(struct seq_file *m, void *data)
445 {
446 	seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
447 		     data, sizeof(struct v11_compute_mqd), false);
448 	return 0;
449 }
450 
451 static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
452 {
453 	seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
454 		     data, sizeof(struct v11_sdma_mqd), false);
455 	return 0;
456 }
457 
458 #endif
459 
460 struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
461 		struct kfd_node *dev)
462 {
463 	struct mqd_manager *mqd;
464 
465 	if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
466 		return NULL;
467 
468 	mqd = kzalloc_obj(*mqd);
469 	if (!mqd)
470 		return NULL;
471 
472 	mqd->dev = dev;
473 
474 	switch (type) {
475 	case KFD_MQD_TYPE_CP:
476 		pr_debug("%s@%i\n", __func__, __LINE__);
477 		mqd->allocate_mqd = allocate_mqd;
478 		mqd->init_mqd = init_mqd;
479 		mqd->free_mqd = kfd_free_mqd_cp;
480 		mqd->load_mqd = load_mqd;
481 		mqd->update_mqd = update_mqd;
482 		mqd->destroy_mqd = kfd_destroy_mqd_cp;
483 		mqd->is_occupied = kfd_is_occupied_cp;
484 		mqd->mqd_size = sizeof(struct v11_compute_mqd);
485 		mqd->get_wave_state = get_wave_state;
486 		mqd->mqd_stride = kfd_mqd_stride;
487 		mqd->checkpoint_mqd = checkpoint_mqd;
488 		mqd->restore_mqd = restore_mqd;
489 #if defined(CONFIG_DEBUG_FS)
490 		mqd->debugfs_show_mqd = debugfs_show_mqd;
491 #endif
492 		pr_debug("%s@%i\n", __func__, __LINE__);
493 		break;
494 	case KFD_MQD_TYPE_HIQ:
495 		pr_debug("%s@%i\n", __func__, __LINE__);
496 		mqd->allocate_mqd = allocate_hiq_mqd;
497 		mqd->init_mqd = init_mqd_hiq;
498 		mqd->free_mqd = free_mqd_hiq_sdma;
499 		mqd->load_mqd = kfd_hiq_load_mqd_kiq;
500 		mqd->update_mqd = update_mqd;
501 		mqd->destroy_mqd = destroy_hiq_mqd;
502 		mqd->is_occupied = kfd_is_occupied_cp;
503 		mqd->mqd_size = sizeof(struct v11_compute_mqd);
504 		mqd->mqd_stride = kfd_mqd_stride;
505 #if defined(CONFIG_DEBUG_FS)
506 		mqd->debugfs_show_mqd = debugfs_show_mqd;
507 #endif
508 		mqd->check_preemption_failed = check_preemption_failed;
509 		pr_debug("%s@%i\n", __func__, __LINE__);
510 		break;
511 	case KFD_MQD_TYPE_DIQ:
512 		mqd->allocate_mqd = allocate_mqd;
513 		mqd->init_mqd = init_mqd_hiq;
514 		mqd->free_mqd = kfd_free_mqd_cp;
515 		mqd->load_mqd = load_mqd;
516 		mqd->update_mqd = update_mqd;
517 		mqd->destroy_mqd = kfd_destroy_mqd_cp;
518 		mqd->is_occupied = kfd_is_occupied_cp;
519 		mqd->mqd_size = sizeof(struct v11_compute_mqd);
520 #if defined(CONFIG_DEBUG_FS)
521 		mqd->debugfs_show_mqd = debugfs_show_mqd;
522 #endif
523 		break;
524 	case KFD_MQD_TYPE_SDMA:
525 		pr_debug("%s@%i\n", __func__, __LINE__);
526 		mqd->allocate_mqd = allocate_sdma_mqd;
527 		mqd->init_mqd = init_mqd_sdma;
528 		mqd->free_mqd = free_mqd_hiq_sdma;
529 		mqd->load_mqd = kfd_load_mqd_sdma;
530 		mqd->update_mqd = update_mqd_sdma;
531 		mqd->destroy_mqd = kfd_destroy_mqd_sdma;
532 		mqd->is_occupied = kfd_is_occupied_sdma;
533 		mqd->checkpoint_mqd = checkpoint_mqd;
534 		mqd->restore_mqd = restore_mqd;
535 		mqd->mqd_size = sizeof(struct v11_sdma_mqd);
536 		mqd->mqd_stride = kfd_mqd_stride;
537 #if defined(CONFIG_DEBUG_FS)
538 		mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
539 #endif
540 		/*
541 		 * To allocate SDMA MQDs by generic functions
542 		 * when MES is enabled.
543 		 */
544 		if (dev->kfd->shared_resources.enable_mes) {
545 			mqd->allocate_mqd = allocate_mqd;
546 			mqd->free_mqd = kfd_free_mqd_cp;
547 		}
548 		pr_debug("%s@%i\n", __func__, __LINE__);
549 		break;
550 	default:
551 		kfree(mqd);
552 		return NULL;
553 	}
554 
555 	return mqd;
556 }
557