1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2016-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/printk.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 #include "kfd_priv.h"
29 #include "kfd_mqd_manager.h"
30 #include "v9_structs.h"
31 #include "gc/gc_9_0_offset.h"
32 #include "gc/gc_9_0_sh_mask.h"
33 #include "sdma0/sdma0_4_0_sh_mask.h"
34 #include "amdgpu_amdkfd.h"
35 #include "kfd_device_queue_manager.h"
36
37 static void update_mqd(struct mqd_manager *mm, void *mqd,
38 struct queue_properties *q,
39 struct mqd_update_info *minfo);
40
mqd_stride_v9(struct mqd_manager * mm,struct queue_properties * q)41 static uint64_t mqd_stride_v9(struct mqd_manager *mm,
42 struct queue_properties *q)
43 {
44 if (mm->dev->kfd->cwsr_enabled &&
45 q->type == KFD_QUEUE_TYPE_COMPUTE) {
46
47 /* On gfxv9, the MQD resides in the first 4K page,
48 * followed by the control stack. Align both to
49 * AMDGPU_GPU_PAGE_SIZE to maintain the required 4K boundary.
50 */
51
52 return ALIGN(ALIGN(q->ctl_stack_size, AMDGPU_GPU_PAGE_SIZE) +
53 ALIGN(sizeof(struct v9_mqd), AMDGPU_GPU_PAGE_SIZE), PAGE_SIZE);
54 }
55
56 return mm->mqd_size;
57 }
58
get_mqd(void * mqd)59 static inline struct v9_mqd *get_mqd(void *mqd)
60 {
61 return (struct v9_mqd *)mqd;
62 }
63
get_sdma_mqd(void * mqd)64 static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
65 {
66 return (struct v9_sdma_mqd *)mqd;
67 }
68
update_cu_mask(struct mqd_manager * mm,void * mqd,struct mqd_update_info * minfo,uint32_t inst)69 static void update_cu_mask(struct mqd_manager *mm, void *mqd,
70 struct mqd_update_info *minfo, uint32_t inst)
71 {
72 struct v9_mqd *m;
73 uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
74
75 if (!minfo || !minfo->cu_mask.ptr)
76 return;
77
78 mqd_symmetrically_map_cu_mask(mm,
79 minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, inst);
80
81 m = get_mqd(mqd);
82
83 m->compute_static_thread_mgmt_se0 = se_mask[0];
84 m->compute_static_thread_mgmt_se1 = se_mask[1];
85 m->compute_static_thread_mgmt_se2 = se_mask[2];
86 m->compute_static_thread_mgmt_se3 = se_mask[3];
87 if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) &&
88 KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4) &&
89 KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 5, 0)) {
90 m->compute_static_thread_mgmt_se4 = se_mask[4];
91 m->compute_static_thread_mgmt_se5 = se_mask[5];
92 m->compute_static_thread_mgmt_se6 = se_mask[6];
93 m->compute_static_thread_mgmt_se7 = se_mask[7];
94
95 pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
96 m->compute_static_thread_mgmt_se0,
97 m->compute_static_thread_mgmt_se1,
98 m->compute_static_thread_mgmt_se2,
99 m->compute_static_thread_mgmt_se3,
100 m->compute_static_thread_mgmt_se4,
101 m->compute_static_thread_mgmt_se5,
102 m->compute_static_thread_mgmt_se6,
103 m->compute_static_thread_mgmt_se7);
104 } else {
105 pr_debug("inst: %u, update cu mask to %#x %#x %#x %#x\n",
106 inst, m->compute_static_thread_mgmt_se0,
107 m->compute_static_thread_mgmt_se1,
108 m->compute_static_thread_mgmt_se2,
109 m->compute_static_thread_mgmt_se3);
110 }
111 }
112
set_priority(struct v9_mqd * m,struct queue_properties * q)113 static void set_priority(struct v9_mqd *m, struct queue_properties *q)
114 {
115 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
116 }
117
mqd_on_vram(struct amdgpu_device * adev)118 static bool mqd_on_vram(struct amdgpu_device *adev)
119 {
120 if (adev->apu_prefer_gtt)
121 return false;
122
123 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
124 case IP_VERSION(9, 4, 3):
125 case IP_VERSION(9, 5, 0):
126 return true;
127 default:
128 return false;
129 }
130 }
131
allocate_mqd(struct mqd_manager * mm,struct queue_properties * q)132 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
133 struct queue_properties *q)
134 {
135 int retval;
136 struct kfd_node *node = mm->dev;
137 struct kfd_mem_obj *mqd_mem_obj = NULL;
138
139 /* For V9 only, due to a HW bug, the control stack of a user mode
140 * compute queue needs to be allocated just behind the page boundary
141 * of its regular MQD buffer. So we allocate an enlarged MQD buffer:
142 * the first page of the buffer serves as the regular MQD buffer
143 * purpose and the remaining is for control stack. Although the two
144 * parts are in the same buffer object, they need different memory
145 * types: MQD part needs UC (uncached) as usual, while control stack
146 * needs NC (non coherent), which is different from the UC type which
147 * is used when control stack is allocated in user space.
148 *
149 * Because of all those, we use the gtt allocation function instead
150 * of sub-allocation function for this enlarged MQD buffer. Moreover,
151 * in order to achieve two memory types in a single buffer object, we
152 * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct
153 * amdgpu memory functions to do so.
154 */
155 if (node->kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
156 mqd_mem_obj = kzalloc_obj(struct kfd_mem_obj);
157 if (!mqd_mem_obj)
158 return NULL;
159 retval = amdgpu_amdkfd_alloc_kernel_mem(node->adev,
160 (ALIGN(ALIGN(q->ctl_stack_size, AMDGPU_GPU_PAGE_SIZE) +
161 ALIGN(sizeof(struct v9_mqd), AMDGPU_GPU_PAGE_SIZE), PAGE_SIZE)) *
162 NUM_XCC(node->xcc_mask),
163 mqd_on_vram(node->adev) ? AMDGPU_GEM_DOMAIN_VRAM :
164 AMDGPU_GEM_DOMAIN_GTT,
165 &(mqd_mem_obj->mem),
166 &(mqd_mem_obj->gpu_addr),
167 (void *)&(mqd_mem_obj->cpu_ptr), true);
168
169 if (retval) {
170 kfree(mqd_mem_obj);
171 return NULL;
172 }
173 } else {
174 retval = kfd_gtt_sa_allocate(node, sizeof(struct v9_mqd),
175 &mqd_mem_obj);
176 if (retval)
177 return NULL;
178 }
179
180 return mqd_mem_obj;
181 }
182
init_mqd(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * q)183 static void init_mqd(struct mqd_manager *mm, void **mqd,
184 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
185 struct queue_properties *q)
186 {
187 uint64_t addr;
188 struct v9_mqd *m;
189
190 m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
191 addr = mqd_mem_obj->gpu_addr;
192
193 memset(m, 0, sizeof(struct v9_mqd));
194
195 m->header = 0xC0310800;
196 m->compute_pipelinestat_enable = 1;
197 m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
198 m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
199 m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
200 m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
201 m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
202 m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
203 m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
204 m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
205
206 m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
207 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
208
209 m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
210 m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
211
212 m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
213
214 m->cp_mqd_base_addr_lo = lower_32_bits(addr);
215 m->cp_mqd_base_addr_hi = upper_32_bits(addr);
216
217 m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
218 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
219 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
220
221 /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the
222 * DISPATCH_PTR. This is required for the kfd debugger
223 */
224 m->cp_hqd_hq_status0 = 1 << 14;
225
226 if (q->format == KFD_QUEUE_FORMAT_AQL)
227 m->cp_hqd_aql_control =
228 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
229
230 if (q->tba_addr) {
231 m->compute_pgm_rsrc2 |=
232 (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
233 }
234
235 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) {
236 m->cp_hqd_persistent_state |=
237 (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
238 m->cp_hqd_ctx_save_base_addr_lo =
239 lower_32_bits(q->ctx_save_restore_area_address);
240 m->cp_hqd_ctx_save_base_addr_hi =
241 upper_32_bits(q->ctx_save_restore_area_address);
242 m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
243 m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
244 m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
245 m->cp_hqd_wg_state_offset = q->ctl_stack_size;
246 }
247
248 *mqd = m;
249 if (gart_addr)
250 *gart_addr = addr;
251 update_mqd(mm, m, q, NULL);
252 }
253
load_mqd(struct mqd_manager * mm,void * mqd,uint32_t pipe_id,uint32_t queue_id,struct queue_properties * p,struct mm_struct * mms)254 static int load_mqd(struct mqd_manager *mm, void *mqd,
255 uint32_t pipe_id, uint32_t queue_id,
256 struct queue_properties *p, struct mm_struct *mms)
257 {
258 /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
259 uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
260
261 return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
262 (uint32_t __user *)p->write_ptr,
263 wptr_shift, 0, mms, 0);
264 }
265
update_mqd(struct mqd_manager * mm,void * mqd,struct queue_properties * q,struct mqd_update_info * minfo)266 static void update_mqd(struct mqd_manager *mm, void *mqd,
267 struct queue_properties *q,
268 struct mqd_update_info *minfo)
269 {
270 struct v9_mqd *m;
271
272 m = get_mqd(mqd);
273
274 m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
275 m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
276 pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
277
278 m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
279 m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
280
281 m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
282 m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
283 m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
284 m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
285
286 m->cp_hqd_pq_doorbell_control =
287 q->doorbell_off <<
288 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
289 pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
290 m->cp_hqd_pq_doorbell_control);
291
292 m->cp_hqd_ib_control =
293 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
294 1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT;
295
296 /*
297 * HW does not clamp this field correctly. Maximum EOP queue size
298 * is constrained by per-SE EOP done signal count, which is 8-bit.
299 * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
300 * more than (EOP entry count - 1) so a queue size of 0x800 dwords
301 * is safe, giving a maximum field value of 0xA.
302 *
303 * Also, do calculation only if EOP is used (size > 0), otherwise
304 * the order_base_2 calculation provides incorrect result.
305 *
306 */
307 m->cp_hqd_eop_control = q->eop_ring_buffer_size ?
308 min(0xA, order_base_2(q->eop_ring_buffer_size / 4) - 1) : 0;
309
310 m->cp_hqd_eop_base_addr_lo =
311 lower_32_bits(q->eop_ring_buffer_address >> 8);
312 m->cp_hqd_eop_base_addr_hi =
313 upper_32_bits(q->eop_ring_buffer_address >> 8);
314
315 m->cp_hqd_iq_timer = 0;
316
317 m->cp_hqd_vmid = q->vmid;
318
319 if (q->format == KFD_QUEUE_FORMAT_AQL) {
320 m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
321 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
322 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT |
323 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT;
324 m->cp_hqd_pq_doorbell_control |= 1 <<
325 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
326 }
327 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
328 m->cp_hqd_ctx_save_control = 0;
329
330 if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) &&
331 KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4) &&
332 KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 5, 0))
333 update_cu_mask(mm, mqd, minfo, 0);
334 set_priority(m, q);
335
336 if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) {
337 if (minfo->update_flag & UPDATE_FLAG_IS_GWS)
338 m->compute_resource_limits |=
339 COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
340 else
341 m->compute_resource_limits &=
342 ~COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
343 }
344
345 q->is_active = QUEUE_IS_ACTIVE(*q);
346 }
347
348
check_preemption_failed(struct mqd_manager * mm,void * mqd)349 static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
350 {
351 struct v9_mqd *m = (struct v9_mqd *)mqd;
352 uint32_t doorbell_id = m->queue_doorbell_id0;
353
354 m->queue_doorbell_id0 = 0;
355
356 return kfd_check_hiq_mqd_doorbell_id(mm->dev, doorbell_id, 0);
357 }
358
get_wave_state(struct mqd_manager * mm,void * mqd,struct queue_properties * q,void __user * ctl_stack,u32 * ctl_stack_used_size,u32 * save_area_used_size)359 static int get_wave_state(struct mqd_manager *mm, void *mqd,
360 struct queue_properties *q,
361 void __user *ctl_stack,
362 u32 *ctl_stack_used_size,
363 u32 *save_area_used_size)
364 {
365 struct v9_mqd *m;
366 struct kfd_context_save_area_header header;
367
368 /* Control stack is located one page after MQD. */
369 void *mqd_ctl_stack = (void *)((uintptr_t)mqd + AMDGPU_GPU_PAGE_SIZE);
370
371 m = get_mqd(mqd);
372
373 *ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
374 m->cp_hqd_cntl_stack_offset;
375 *save_area_used_size = m->cp_hqd_wg_state_offset -
376 m->cp_hqd_cntl_stack_size;
377
378 header.wave_state.control_stack_size = *ctl_stack_used_size;
379 header.wave_state.wave_state_size = *save_area_used_size;
380
381 header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset;
382 header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset;
383
384 if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state)))
385 return -EFAULT;
386
387 if (copy_to_user(ctl_stack + m->cp_hqd_cntl_stack_offset,
388 mqd_ctl_stack + m->cp_hqd_cntl_stack_offset,
389 *ctl_stack_used_size))
390 return -EFAULT;
391
392 return 0;
393 }
394
get_checkpoint_info(struct mqd_manager * mm,void * mqd,u32 * ctl_stack_size)395 static int get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
396 {
397 struct v9_mqd *m = get_mqd(mqd);
398
399 if (check_mul_overflow(m->cp_hqd_cntl_stack_size, NUM_XCC(mm->dev->xcc_mask), ctl_stack_size))
400 return -EINVAL;
401
402 return 0;
403 }
404
checkpoint_mqd(struct mqd_manager * mm,void * mqd,void * mqd_dst,void * ctl_stack_dst)405 static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
406 {
407 struct v9_mqd *m;
408 /* Control stack is located one page after MQD. */
409 void *ctl_stack = (void *)((uintptr_t)mqd + AMDGPU_GPU_PAGE_SIZE);
410
411 m = get_mqd(mqd);
412
413 memcpy(mqd_dst, m, sizeof(struct v9_mqd));
414 memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size);
415 }
416
checkpoint_mqd_v9_4_3(struct mqd_manager * mm,void * mqd,void * mqd_dst,void * ctl_stack_dst)417 static void checkpoint_mqd_v9_4_3(struct mqd_manager *mm,
418 void *mqd,
419 void *mqd_dst,
420 void *ctl_stack_dst)
421 {
422 struct v9_mqd *m;
423 int xcc;
424 uint64_t size = get_mqd(mqd)->cp_mqd_stride_size;
425
426 for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
427 m = get_mqd(mqd + size * xcc);
428
429 checkpoint_mqd(mm, m,
430 (uint8_t *)mqd_dst + sizeof(*m) * xcc,
431 (uint8_t *)ctl_stack_dst + m->cp_hqd_cntl_stack_size * xcc);
432 }
433 }
434
restore_mqd(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * qp,const void * mqd_src,const void * ctl_stack_src,u32 ctl_stack_size)435 static void restore_mqd(struct mqd_manager *mm, void **mqd,
436 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
437 struct queue_properties *qp,
438 const void *mqd_src,
439 const void *ctl_stack_src, u32 ctl_stack_size)
440 {
441 uint64_t addr;
442 struct v9_mqd *m;
443 void *ctl_stack;
444
445 m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
446 addr = mqd_mem_obj->gpu_addr;
447
448 memcpy(m, mqd_src, sizeof(*m));
449
450 *mqd = m;
451 if (gart_addr)
452 *gart_addr = addr;
453
454 /* Control stack is located one page after MQD. */
455 ctl_stack = (void *)((uintptr_t)*mqd + AMDGPU_GPU_PAGE_SIZE);
456 memcpy(ctl_stack, ctl_stack_src, ctl_stack_size);
457
458 m->cp_hqd_pq_doorbell_control =
459 qp->doorbell_off <<
460 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
461 pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
462 m->cp_hqd_pq_doorbell_control);
463
464 qp->is_active = 0;
465 }
466
init_mqd_hiq(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * q)467 static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
468 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
469 struct queue_properties *q)
470 {
471 struct v9_mqd *m;
472
473 init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
474
475 m = get_mqd(*mqd);
476
477 m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
478 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
479 }
480
destroy_hiq_mqd(struct mqd_manager * mm,void * mqd,enum kfd_preempt_type type,unsigned int timeout,uint32_t pipe_id,uint32_t queue_id)481 static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
482 enum kfd_preempt_type type, unsigned int timeout,
483 uint32_t pipe_id, uint32_t queue_id)
484 {
485 int err;
486 struct v9_mqd *m;
487 u32 doorbell_off;
488
489 m = get_mqd(mqd);
490
491 doorbell_off = m->cp_hqd_pq_doorbell_control >>
492 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
493 err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
494 if (err)
495 pr_debug("Destroy HIQ MQD failed: %d\n", err);
496
497 return err;
498 }
499
init_mqd_sdma(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * q)500 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
501 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
502 struct queue_properties *q)
503 {
504 struct v9_sdma_mqd *m;
505
506 m = (struct v9_sdma_mqd *) mqd_mem_obj->cpu_ptr;
507
508 memset(m, 0, sizeof(struct v9_sdma_mqd));
509
510 *mqd = m;
511 if (gart_addr)
512 *gart_addr = mqd_mem_obj->gpu_addr;
513
514 mm->update_mqd(mm, m, q, NULL);
515 }
516
517 #define SDMA_RLC_DUMMY_DEFAULT 0xf
518
update_mqd_sdma(struct mqd_manager * mm,void * mqd,struct queue_properties * q,struct mqd_update_info * minfo)519 static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
520 struct queue_properties *q,
521 struct mqd_update_info *minfo)
522 {
523 struct v9_sdma_mqd *m;
524
525 m = get_sdma_mqd(mqd);
526 m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4)
527 << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
528 q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
529 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
530 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
531
532 m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
533 m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
534 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
535 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
536 m->sdmax_rlcx_doorbell_offset =
537 q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
538
539 m->sdma_engine_id = q->sdma_engine_id;
540 m->sdma_queue_id = q->sdma_queue_id;
541 m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
542 /* Allow context switch so we don't cross-process starve with a massive
543 * command buffer of long-running SDMA commands
544 */
545 m->sdmax_rlcx_ib_cntl |= SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK;
546
547 q->is_active = QUEUE_IS_ACTIVE(*q);
548 }
549
checkpoint_mqd_sdma(struct mqd_manager * mm,void * mqd,void * mqd_dst,void * ctl_stack_dst)550 static void checkpoint_mqd_sdma(struct mqd_manager *mm,
551 void *mqd,
552 void *mqd_dst,
553 void *ctl_stack_dst)
554 {
555 struct v9_sdma_mqd *m;
556
557 m = get_sdma_mqd(mqd);
558
559 memcpy(mqd_dst, m, sizeof(struct v9_sdma_mqd));
560 }
561
restore_mqd_sdma(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * qp,const void * mqd_src,const void * ctl_stack_src,const u32 ctl_stack_size)562 static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
563 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
564 struct queue_properties *qp,
565 const void *mqd_src,
566 const void *ctl_stack_src, const u32 ctl_stack_size)
567 {
568 uint64_t addr;
569 struct v9_sdma_mqd *m;
570
571 m = (struct v9_sdma_mqd *) mqd_mem_obj->cpu_ptr;
572 addr = mqd_mem_obj->gpu_addr;
573
574 memcpy(m, mqd_src, sizeof(*m));
575
576 m->sdmax_rlcx_doorbell_offset =
577 qp->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
578
579 *mqd = m;
580 if (gart_addr)
581 *gart_addr = addr;
582
583 qp->is_active = 0;
584 }
585
init_mqd_hiq_v9_4_3(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * q)586 static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd,
587 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
588 struct queue_properties *q)
589 {
590 struct v9_mqd *m;
591 int xcc = 0;
592 struct kfd_mem_obj xcc_mqd_mem_obj;
593 uint64_t xcc_gart_addr = 0;
594
595 memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
596
597 for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
598 kfd_get_hiq_xcc_mqd(mm->dev, &xcc_mqd_mem_obj, xcc);
599
600 init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
601
602 m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
603 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
604 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
605 if (amdgpu_sriov_multi_vf_mode(mm->dev->adev))
606 m->cp_hqd_pq_doorbell_control |= 1 <<
607 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
608 m->cp_mqd_stride_size = kfd_hiq_mqd_stride(mm->dev);
609 if (xcc == 0) {
610 /* Set no_update_rptr = 0 in Master XCC */
611 m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
612
613 /* Set the MQD pointer and gart address to XCC0 MQD */
614 *mqd = m;
615 *gart_addr = xcc_gart_addr;
616 }
617 }
618 }
619
hiq_load_mqd_kiq_v9_4_3(struct mqd_manager * mm,void * mqd,uint32_t pipe_id,uint32_t queue_id,struct queue_properties * p,struct mm_struct * mms)620 static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd,
621 uint32_t pipe_id, uint32_t queue_id,
622 struct queue_properties *p, struct mm_struct *mms)
623 {
624 uint32_t xcc_mask = mm->dev->xcc_mask;
625 int xcc_id, err = 0, inst = 0;
626 void *xcc_mqd;
627 uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
628
629 for_each_inst(xcc_id, xcc_mask) {
630 xcc_mqd = mqd + hiq_mqd_size * inst;
631 err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd,
632 pipe_id, queue_id,
633 p->doorbell_off, xcc_id);
634 if (err) {
635 pr_debug("Failed to load HIQ MQD for XCC: %d\n", inst);
636 break;
637 }
638 ++inst;
639 }
640
641 return err;
642 }
643
destroy_hiq_mqd_v9_4_3(struct mqd_manager * mm,void * mqd,enum kfd_preempt_type type,unsigned int timeout,uint32_t pipe_id,uint32_t queue_id)644 static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
645 enum kfd_preempt_type type, unsigned int timeout,
646 uint32_t pipe_id, uint32_t queue_id)
647 {
648 uint32_t xcc_mask = mm->dev->xcc_mask;
649 int xcc_id, err = 0, inst = 0;
650 uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
651 struct v9_mqd *m;
652 u32 doorbell_off;
653
654 for_each_inst(xcc_id, xcc_mask) {
655 m = get_mqd(mqd + hiq_mqd_size * inst);
656
657 doorbell_off = m->cp_hqd_pq_doorbell_control >>
658 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
659
660 err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, xcc_id);
661 if (err) {
662 pr_debug("Destroy HIQ MQD failed for xcc: %d\n", inst);
663 break;
664 }
665 ++inst;
666 }
667
668 return err;
669 }
670
check_preemption_failed_v9_4_3(struct mqd_manager * mm,void * mqd)671 static bool check_preemption_failed_v9_4_3(struct mqd_manager *mm, void *mqd)
672 {
673 uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
674 uint32_t xcc_mask = mm->dev->xcc_mask;
675 int inst = 0, xcc_id;
676 struct v9_mqd *m;
677 bool ret = false;
678
679 for_each_inst(xcc_id, xcc_mask) {
680 m = get_mqd(mqd + hiq_mqd_size * inst);
681 ret |= kfd_check_hiq_mqd_doorbell_id(mm->dev,
682 m->queue_doorbell_id0, inst);
683 m->queue_doorbell_id0 = 0;
684 ++inst;
685 }
686
687 return ret;
688 }
689
get_xcc_mqd(struct kfd_mem_obj * mqd_mem_obj,struct kfd_mem_obj * xcc_mqd_mem_obj,uint64_t offset)690 static void get_xcc_mqd(struct kfd_mem_obj *mqd_mem_obj,
691 struct kfd_mem_obj *xcc_mqd_mem_obj,
692 uint64_t offset)
693 {
694 xcc_mqd_mem_obj->mem = (offset == 0) ?
695 mqd_mem_obj->mem : NULL;
696 xcc_mqd_mem_obj->gpu_addr = mqd_mem_obj->gpu_addr + offset;
697 xcc_mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t)mqd_mem_obj->cpu_ptr
698 + offset);
699 }
700
init_mqd_v9_4_3(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * q)701 static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
702 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
703 struct queue_properties *q)
704 {
705 struct v9_mqd *m;
706 int xcc = 0;
707 struct kfd_mem_obj xcc_mqd_mem_obj;
708 uint64_t xcc_gart_addr = 0;
709 uint64_t xcc_ctx_save_restore_area_address;
710 uint64_t offset = mm->mqd_stride(mm, q);
711 uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++;
712
713 memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
714 for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
715 get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset*xcc);
716
717 init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
718 if (amdgpu_sriov_multi_vf_mode(mm->dev->adev))
719 m->cp_hqd_pq_doorbell_control |= 1 <<
720 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
721 m->cp_mqd_stride_size = offset;
722
723 /*
724 * Update the CWSR address for each XCC if CWSR is enabled
725 * and CWSR area is allocated in thunk
726 */
727 if (mm->dev->kfd->cwsr_enabled &&
728 q->ctx_save_restore_area_address) {
729 xcc_ctx_save_restore_area_address =
730 q->ctx_save_restore_area_address +
731 (xcc * q->ctx_save_restore_area_size);
732
733 m->cp_hqd_ctx_save_base_addr_lo =
734 lower_32_bits(xcc_ctx_save_restore_area_address);
735 m->cp_hqd_ctx_save_base_addr_hi =
736 upper_32_bits(xcc_ctx_save_restore_area_address);
737 }
738
739 if (q->format == KFD_QUEUE_FORMAT_AQL) {
740 m->compute_tg_chunk_size = 1;
741 m->compute_current_logic_xcc_id =
742 (local_xcc_start + xcc) %
743 NUM_XCC(mm->dev->xcc_mask);
744
745 switch (xcc) {
746 case 0:
747 /* Master XCC */
748 m->cp_hqd_pq_control &=
749 ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
750 break;
751 default:
752 break;
753 }
754 } else {
755 /* PM4 Queue */
756 m->compute_current_logic_xcc_id = 0;
757 m->compute_tg_chunk_size = 0;
758 m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
759 }
760
761 if (xcc == 0) {
762 /* Set the MQD pointer and gart address to XCC0 MQD */
763 *mqd = m;
764 *gart_addr = xcc_gart_addr;
765 }
766 }
767
768 if (mqd_on_vram(mm->dev->adev))
769 amdgpu_device_flush_hdp(mm->dev->adev, NULL);
770 }
771
update_mqd_v9_4_3(struct mqd_manager * mm,void * mqd,struct queue_properties * q,struct mqd_update_info * minfo)772 static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
773 struct queue_properties *q, struct mqd_update_info *minfo)
774 {
775 struct v9_mqd *m;
776 int xcc = 0;
777 uint64_t size = mm->mqd_stride(mm, q);
778
779 for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
780 m = get_mqd(mqd + size * xcc);
781 update_mqd(mm, m, q, minfo);
782
783 if (amdgpu_sriov_multi_vf_mode(mm->dev->adev))
784 m->cp_hqd_pq_doorbell_control |= 1 <<
785 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
786 update_cu_mask(mm, m, minfo, xcc);
787
788 if (q->format == KFD_QUEUE_FORMAT_AQL) {
789 switch (xcc) {
790 case 0:
791 /* Master XCC */
792 m->cp_hqd_pq_control &=
793 ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
794 break;
795 default:
796 break;
797 }
798 m->compute_tg_chunk_size = 1;
799 } else {
800 /* PM4 Queue */
801 m->compute_current_logic_xcc_id = 0;
802 m->compute_tg_chunk_size = 0;
803 m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
804 }
805 }
806
807 if (mqd_on_vram(mm->dev->adev))
808 amdgpu_device_flush_hdp(mm->dev->adev, NULL);
809 }
810
restore_mqd_v9_4_3(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * qp,const void * mqd_src,const void * ctl_stack_src,u32 ctl_stack_size)811 static void restore_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
812 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
813 struct queue_properties *qp,
814 const void *mqd_src,
815 const void *ctl_stack_src, u32 ctl_stack_size)
816 {
817 struct kfd_mem_obj xcc_mqd_mem_obj;
818 u32 mqd_ctl_stack_size;
819 struct v9_mqd *m;
820 u32 num_xcc;
821 int xcc;
822
823 uint64_t offset = mm->mqd_stride(mm, qp);
824
825 mm->dev->dqm->current_logical_xcc_start++;
826
827 num_xcc = NUM_XCC(mm->dev->xcc_mask);
828 mqd_ctl_stack_size = ctl_stack_size / num_xcc;
829
830 memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
831
832 /* Set the MQD pointer and gart address to XCC0 MQD */
833 *mqd = mqd_mem_obj->cpu_ptr;
834 if (gart_addr)
835 *gart_addr = mqd_mem_obj->gpu_addr;
836
837 for (xcc = 0; xcc < num_xcc; xcc++) {
838 get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset * xcc);
839 restore_mqd(mm, (void **)&m,
840 &xcc_mqd_mem_obj,
841 NULL,
842 qp,
843 (uint8_t *)mqd_src + xcc * sizeof(*m),
844 (uint8_t *)ctl_stack_src + xcc * mqd_ctl_stack_size,
845 mqd_ctl_stack_size);
846 }
847
848 if (mqd_on_vram(mm->dev->adev))
849 amdgpu_device_flush_hdp(mm->dev->adev, NULL);
850 }
destroy_mqd_v9_4_3(struct mqd_manager * mm,void * mqd,enum kfd_preempt_type type,unsigned int timeout,uint32_t pipe_id,uint32_t queue_id)851 static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
852 enum kfd_preempt_type type, unsigned int timeout,
853 uint32_t pipe_id, uint32_t queue_id)
854 {
855 uint32_t xcc_mask = mm->dev->xcc_mask;
856 int xcc_id, err = 0, inst = 0;
857 void *xcc_mqd;
858 struct v9_mqd *m;
859 uint64_t mqd_offset;
860
861 m = get_mqd(mqd);
862 mqd_offset = m->cp_mqd_stride_size;
863
864 for_each_inst(xcc_id, xcc_mask) {
865 xcc_mqd = mqd + mqd_offset * inst;
866 err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd,
867 type, timeout, pipe_id,
868 queue_id, xcc_id);
869 if (err) {
870 pr_debug("Destroy MQD failed for xcc: %d\n", inst);
871 break;
872 }
873 ++inst;
874 }
875
876 return err;
877 }
878
load_mqd_v9_4_3(struct mqd_manager * mm,void * mqd,uint32_t pipe_id,uint32_t queue_id,struct queue_properties * p,struct mm_struct * mms)879 static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
880 uint32_t pipe_id, uint32_t queue_id,
881 struct queue_properties *p, struct mm_struct *mms)
882 {
883 /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
884 uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
885 uint32_t xcc_mask = mm->dev->xcc_mask;
886 int xcc_id, err = 0, inst = 0;
887 void *xcc_mqd;
888 uint64_t mqd_stride_size = mm->mqd_stride(mm, p);
889
890 for_each_inst(xcc_id, xcc_mask) {
891 xcc_mqd = mqd + mqd_stride_size * inst;
892 err = mm->dev->kfd2kgd->hqd_load(
893 mm->dev->adev, xcc_mqd, pipe_id, queue_id,
894 (uint32_t __user *)p->write_ptr, wptr_shift, 0, mms,
895 xcc_id);
896 if (err) {
897 pr_debug("Load MQD failed for xcc: %d\n", inst);
898 break;
899 }
900 ++inst;
901 }
902
903 return err;
904 }
905
get_wave_state_v9_4_3(struct mqd_manager * mm,void * mqd,struct queue_properties * q,void __user * ctl_stack,u32 * ctl_stack_used_size,u32 * save_area_used_size)906 static int get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd,
907 struct queue_properties *q,
908 void __user *ctl_stack,
909 u32 *ctl_stack_used_size,
910 u32 *save_area_used_size)
911 {
912 int xcc, err = 0;
913 void *xcc_mqd;
914 void __user *xcc_ctl_stack;
915 uint64_t mqd_stride_size = mm->mqd_stride(mm, q);
916 u32 tmp_ctl_stack_used_size = 0, tmp_save_area_used_size = 0;
917
918 for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
919 xcc_mqd = mqd + mqd_stride_size * xcc;
920 xcc_ctl_stack = (void __user *)((uintptr_t)ctl_stack +
921 q->ctx_save_restore_area_size * xcc);
922
923 err = get_wave_state(mm, xcc_mqd, q, xcc_ctl_stack,
924 &tmp_ctl_stack_used_size,
925 &tmp_save_area_used_size);
926 if (err)
927 break;
928
929 /*
930 * Set the ctl_stack_used_size and save_area_used_size to
931 * ctl_stack_used_size and save_area_used_size of XCC 0 when
932 * passing the info the user-space.
933 * For multi XCC, user-space would have to look at the header
934 * info of each Control stack area to determine the control
935 * stack size and save area used.
936 */
937 if (xcc == 0) {
938 *ctl_stack_used_size = tmp_ctl_stack_used_size;
939 *save_area_used_size = tmp_save_area_used_size;
940 }
941 }
942
943 return err;
944 }
945
946 #if defined(CONFIG_DEBUG_FS)
947
debugfs_show_mqd(struct seq_file * m,void * data)948 static int debugfs_show_mqd(struct seq_file *m, void *data)
949 {
950 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
951 data, sizeof(struct v9_mqd), false);
952 return 0;
953 }
954
debugfs_show_mqd_sdma(struct seq_file * m,void * data)955 static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
956 {
957 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
958 data, sizeof(struct v9_sdma_mqd), false);
959 return 0;
960 }
961
962 #endif
963
mqd_manager_init_v9(enum KFD_MQD_TYPE type,struct kfd_node * dev)964 struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
965 struct kfd_node *dev)
966 {
967 struct mqd_manager *mqd;
968
969 if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
970 return NULL;
971
972 mqd = kzalloc_obj(*mqd);
973 if (!mqd)
974 return NULL;
975
976 mqd->dev = dev;
977
978 switch (type) {
979 case KFD_MQD_TYPE_CP:
980 mqd->allocate_mqd = allocate_mqd;
981 mqd->free_mqd = kfd_free_mqd_cp;
982 mqd->is_occupied = kfd_is_occupied_cp;
983 mqd->get_checkpoint_info = get_checkpoint_info;
984 mqd->mqd_size = sizeof(struct v9_mqd);
985 mqd->mqd_stride = mqd_stride_v9;
986 #if defined(CONFIG_DEBUG_FS)
987 mqd->debugfs_show_mqd = debugfs_show_mqd;
988 #endif
989 if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
990 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
991 KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)) {
992 mqd->init_mqd = init_mqd_v9_4_3;
993 mqd->load_mqd = load_mqd_v9_4_3;
994 mqd->update_mqd = update_mqd_v9_4_3;
995 mqd->destroy_mqd = destroy_mqd_v9_4_3;
996 mqd->get_wave_state = get_wave_state_v9_4_3;
997 mqd->checkpoint_mqd = checkpoint_mqd_v9_4_3;
998 mqd->restore_mqd = restore_mqd_v9_4_3;
999 } else {
1000 mqd->init_mqd = init_mqd;
1001 mqd->load_mqd = load_mqd;
1002 mqd->update_mqd = update_mqd;
1003 mqd->destroy_mqd = kfd_destroy_mqd_cp;
1004 mqd->get_wave_state = get_wave_state;
1005 mqd->checkpoint_mqd = checkpoint_mqd;
1006 mqd->restore_mqd = restore_mqd;
1007 }
1008 break;
1009 case KFD_MQD_TYPE_HIQ:
1010 mqd->allocate_mqd = allocate_hiq_mqd;
1011 mqd->free_mqd = free_mqd_hiq_sdma;
1012 mqd->update_mqd = update_mqd;
1013 mqd->is_occupied = kfd_is_occupied_cp;
1014 mqd->mqd_size = sizeof(struct v9_mqd);
1015 mqd->mqd_stride = kfd_mqd_stride;
1016 #if defined(CONFIG_DEBUG_FS)
1017 mqd->debugfs_show_mqd = debugfs_show_mqd;
1018 #endif
1019 mqd->check_preemption_failed = check_preemption_failed;
1020 if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
1021 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
1022 KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)) {
1023 mqd->init_mqd = init_mqd_hiq_v9_4_3;
1024 mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3;
1025 mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3;
1026 mqd->check_preemption_failed = check_preemption_failed_v9_4_3;
1027 } else {
1028 mqd->init_mqd = init_mqd_hiq;
1029 mqd->load_mqd = kfd_hiq_load_mqd_kiq;
1030 mqd->destroy_mqd = destroy_hiq_mqd;
1031 mqd->check_preemption_failed = check_preemption_failed;
1032 }
1033 break;
1034 case KFD_MQD_TYPE_DIQ:
1035 mqd->allocate_mqd = allocate_mqd;
1036 mqd->init_mqd = init_mqd_hiq;
1037 mqd->free_mqd = kfd_free_mqd_cp;
1038 mqd->load_mqd = load_mqd;
1039 mqd->update_mqd = update_mqd;
1040 mqd->destroy_mqd = kfd_destroy_mqd_cp;
1041 mqd->is_occupied = kfd_is_occupied_cp;
1042 mqd->mqd_size = sizeof(struct v9_mqd);
1043 #if defined(CONFIG_DEBUG_FS)
1044 mqd->debugfs_show_mqd = debugfs_show_mqd;
1045 #endif
1046 break;
1047 case KFD_MQD_TYPE_SDMA:
1048 mqd->allocate_mqd = allocate_sdma_mqd;
1049 mqd->init_mqd = init_mqd_sdma;
1050 mqd->free_mqd = free_mqd_hiq_sdma;
1051 mqd->load_mqd = kfd_load_mqd_sdma;
1052 mqd->update_mqd = update_mqd_sdma;
1053 mqd->destroy_mqd = kfd_destroy_mqd_sdma;
1054 mqd->is_occupied = kfd_is_occupied_sdma;
1055 mqd->checkpoint_mqd = checkpoint_mqd_sdma;
1056 mqd->restore_mqd = restore_mqd_sdma;
1057 mqd->mqd_size = sizeof(struct v9_sdma_mqd);
1058 mqd->mqd_stride = kfd_mqd_stride;
1059 #if defined(CONFIG_DEBUG_FS)
1060 mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
1061 #endif
1062 break;
1063 default:
1064 kfree(mqd);
1065 return NULL;
1066 }
1067
1068 return mqd;
1069 }
1070