1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2016-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/printk.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 #include "kfd_priv.h"
29 #include "kfd_mqd_manager.h"
30 #include "v9_structs.h"
31 #include "gc/gc_9_0_offset.h"
32 #include "gc/gc_9_0_sh_mask.h"
33 #include "sdma0/sdma0_4_0_sh_mask.h"
34 #include "amdgpu_amdkfd.h"
35 #include "kfd_device_queue_manager.h"
36
37 static void update_mqd(struct mqd_manager *mm, void *mqd,
38 struct queue_properties *q,
39 struct mqd_update_info *minfo);
40
mqd_stride_v9(struct mqd_manager * mm,struct queue_properties * q)41 static uint64_t mqd_stride_v9(struct mqd_manager *mm,
42 struct queue_properties *q)
43 {
44 if (mm->dev->kfd->cwsr_enabled &&
45 q->type == KFD_QUEUE_TYPE_COMPUTE)
46 return ALIGN(q->ctl_stack_size, PAGE_SIZE) +
47 ALIGN(sizeof(struct v9_mqd), PAGE_SIZE);
48
49 return mm->mqd_size;
50 }
51
get_mqd(void * mqd)52 static inline struct v9_mqd *get_mqd(void *mqd)
53 {
54 return (struct v9_mqd *)mqd;
55 }
56
get_sdma_mqd(void * mqd)57 static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
58 {
59 return (struct v9_sdma_mqd *)mqd;
60 }
61
update_cu_mask(struct mqd_manager * mm,void * mqd,struct mqd_update_info * minfo,uint32_t inst)62 static void update_cu_mask(struct mqd_manager *mm, void *mqd,
63 struct mqd_update_info *minfo, uint32_t inst)
64 {
65 struct v9_mqd *m;
66 uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
67
68 if (!minfo || !minfo->cu_mask.ptr)
69 return;
70
71 mqd_symmetrically_map_cu_mask(mm,
72 minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, inst);
73
74 m = get_mqd(mqd);
75
76 m->compute_static_thread_mgmt_se0 = se_mask[0];
77 m->compute_static_thread_mgmt_se1 = se_mask[1];
78 m->compute_static_thread_mgmt_se2 = se_mask[2];
79 m->compute_static_thread_mgmt_se3 = se_mask[3];
80 if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) &&
81 KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4)) {
82 m->compute_static_thread_mgmt_se4 = se_mask[4];
83 m->compute_static_thread_mgmt_se5 = se_mask[5];
84 m->compute_static_thread_mgmt_se6 = se_mask[6];
85 m->compute_static_thread_mgmt_se7 = se_mask[7];
86
87 pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
88 m->compute_static_thread_mgmt_se0,
89 m->compute_static_thread_mgmt_se1,
90 m->compute_static_thread_mgmt_se2,
91 m->compute_static_thread_mgmt_se3,
92 m->compute_static_thread_mgmt_se4,
93 m->compute_static_thread_mgmt_se5,
94 m->compute_static_thread_mgmt_se6,
95 m->compute_static_thread_mgmt_se7);
96 } else {
97 pr_debug("inst: %u, update cu mask to %#x %#x %#x %#x\n",
98 inst, m->compute_static_thread_mgmt_se0,
99 m->compute_static_thread_mgmt_se1,
100 m->compute_static_thread_mgmt_se2,
101 m->compute_static_thread_mgmt_se3);
102 }
103 }
104
set_priority(struct v9_mqd * m,struct queue_properties * q)105 static void set_priority(struct v9_mqd *m, struct queue_properties *q)
106 {
107 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
108 m->cp_hqd_queue_priority = q->priority;
109 }
110
allocate_mqd(struct kfd_node * node,struct queue_properties * q)111 static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node,
112 struct queue_properties *q)
113 {
114 int retval;
115 struct kfd_mem_obj *mqd_mem_obj = NULL;
116
117 /* For V9 only, due to a HW bug, the control stack of a user mode
118 * compute queue needs to be allocated just behind the page boundary
119 * of its regular MQD buffer. So we allocate an enlarged MQD buffer:
120 * the first page of the buffer serves as the regular MQD buffer
121 * purpose and the remaining is for control stack. Although the two
122 * parts are in the same buffer object, they need different memory
123 * types: MQD part needs UC (uncached) as usual, while control stack
124 * needs NC (non coherent), which is different from the UC type which
125 * is used when control stack is allocated in user space.
126 *
127 * Because of all those, we use the gtt allocation function instead
128 * of sub-allocation function for this enlarged MQD buffer. Moreover,
129 * in order to achieve two memory types in a single buffer object, we
130 * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct
131 * amdgpu memory functions to do so.
132 */
133 if (node->kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
134 mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
135 if (!mqd_mem_obj)
136 return NULL;
137 retval = amdgpu_amdkfd_alloc_gtt_mem(node->adev,
138 (ALIGN(q->ctl_stack_size, PAGE_SIZE) +
139 ALIGN(sizeof(struct v9_mqd), PAGE_SIZE)) *
140 NUM_XCC(node->xcc_mask),
141 &(mqd_mem_obj->gtt_mem),
142 &(mqd_mem_obj->gpu_addr),
143 (void *)&(mqd_mem_obj->cpu_ptr), true);
144
145 if (retval) {
146 kfree(mqd_mem_obj);
147 return NULL;
148 }
149 } else {
150 retval = kfd_gtt_sa_allocate(node, sizeof(struct v9_mqd),
151 &mqd_mem_obj);
152 if (retval)
153 return NULL;
154 }
155
156 return mqd_mem_obj;
157 }
158
init_mqd(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * q)159 static void init_mqd(struct mqd_manager *mm, void **mqd,
160 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
161 struct queue_properties *q)
162 {
163 uint64_t addr;
164 struct v9_mqd *m;
165
166 m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
167 addr = mqd_mem_obj->gpu_addr;
168
169 memset(m, 0, sizeof(struct v9_mqd));
170
171 m->header = 0xC0310800;
172 m->compute_pipelinestat_enable = 1;
173 m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
174 m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
175 m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
176 m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
177 m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
178 m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
179 m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
180 m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
181
182 m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
183 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
184
185 m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
186
187 m->cp_mqd_base_addr_lo = lower_32_bits(addr);
188 m->cp_mqd_base_addr_hi = upper_32_bits(addr);
189
190 m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
191 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
192 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
193
194 /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the
195 * DISPATCH_PTR. This is required for the kfd debugger
196 */
197 m->cp_hqd_hq_status0 = 1 << 14;
198
199 if (q->format == KFD_QUEUE_FORMAT_AQL)
200 m->cp_hqd_aql_control =
201 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
202
203 if (q->tba_addr) {
204 m->compute_pgm_rsrc2 |=
205 (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
206 }
207
208 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) {
209 m->cp_hqd_persistent_state |=
210 (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
211 m->cp_hqd_ctx_save_base_addr_lo =
212 lower_32_bits(q->ctx_save_restore_area_address);
213 m->cp_hqd_ctx_save_base_addr_hi =
214 upper_32_bits(q->ctx_save_restore_area_address);
215 m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
216 m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
217 m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
218 m->cp_hqd_wg_state_offset = q->ctl_stack_size;
219 }
220
221 *mqd = m;
222 if (gart_addr)
223 *gart_addr = addr;
224 update_mqd(mm, m, q, NULL);
225 }
226
load_mqd(struct mqd_manager * mm,void * mqd,uint32_t pipe_id,uint32_t queue_id,struct queue_properties * p,struct mm_struct * mms)227 static int load_mqd(struct mqd_manager *mm, void *mqd,
228 uint32_t pipe_id, uint32_t queue_id,
229 struct queue_properties *p, struct mm_struct *mms)
230 {
231 /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
232 uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
233
234 return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
235 (uint32_t __user *)p->write_ptr,
236 wptr_shift, 0, mms, 0);
237 }
238
update_mqd(struct mqd_manager * mm,void * mqd,struct queue_properties * q,struct mqd_update_info * minfo)239 static void update_mqd(struct mqd_manager *mm, void *mqd,
240 struct queue_properties *q,
241 struct mqd_update_info *minfo)
242 {
243 struct v9_mqd *m;
244
245 m = get_mqd(mqd);
246
247 m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
248 m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
249 pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
250
251 m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
252 m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
253
254 m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
255 m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
256 m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
257 m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
258
259 m->cp_hqd_pq_doorbell_control =
260 q->doorbell_off <<
261 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
262 pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
263 m->cp_hqd_pq_doorbell_control);
264
265 m->cp_hqd_ib_control =
266 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
267 1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT;
268
269 /*
270 * HW does not clamp this field correctly. Maximum EOP queue size
271 * is constrained by per-SE EOP done signal count, which is 8-bit.
272 * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
273 * more than (EOP entry count - 1) so a queue size of 0x800 dwords
274 * is safe, giving a maximum field value of 0xA.
275 *
276 * Also, do calculation only if EOP is used (size > 0), otherwise
277 * the order_base_2 calculation provides incorrect result.
278 *
279 */
280 m->cp_hqd_eop_control = q->eop_ring_buffer_size ?
281 min(0xA, order_base_2(q->eop_ring_buffer_size / 4) - 1) : 0;
282
283 m->cp_hqd_eop_base_addr_lo =
284 lower_32_bits(q->eop_ring_buffer_address >> 8);
285 m->cp_hqd_eop_base_addr_hi =
286 upper_32_bits(q->eop_ring_buffer_address >> 8);
287
288 m->cp_hqd_iq_timer = 0;
289
290 m->cp_hqd_vmid = q->vmid;
291
292 if (q->format == KFD_QUEUE_FORMAT_AQL) {
293 m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
294 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
295 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT |
296 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT;
297 m->cp_hqd_pq_doorbell_control |= 1 <<
298 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
299 }
300 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
301 m->cp_hqd_ctx_save_control = 0;
302
303 if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) &&
304 KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4))
305 update_cu_mask(mm, mqd, minfo, 0);
306 set_priority(m, q);
307
308 if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) {
309 if (minfo->update_flag & UPDATE_FLAG_IS_GWS)
310 m->compute_resource_limits |=
311 COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
312 else
313 m->compute_resource_limits &=
314 ~COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
315 }
316
317 q->is_active = QUEUE_IS_ACTIVE(*q);
318 }
319
320
check_preemption_failed(struct mqd_manager * mm,void * mqd)321 static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
322 {
323 struct v9_mqd *m = (struct v9_mqd *)mqd;
324 uint32_t doorbell_id = m->queue_doorbell_id0;
325
326 m->queue_doorbell_id0 = 0;
327
328 return kfd_check_hiq_mqd_doorbell_id(mm->dev, doorbell_id, 0);
329 }
330
get_wave_state(struct mqd_manager * mm,void * mqd,struct queue_properties * q,void __user * ctl_stack,u32 * ctl_stack_used_size,u32 * save_area_used_size)331 static int get_wave_state(struct mqd_manager *mm, void *mqd,
332 struct queue_properties *q,
333 void __user *ctl_stack,
334 u32 *ctl_stack_used_size,
335 u32 *save_area_used_size)
336 {
337 struct v9_mqd *m;
338 struct kfd_context_save_area_header header;
339
340 /* Control stack is located one page after MQD. */
341 void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
342
343 m = get_mqd(mqd);
344
345 *ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
346 m->cp_hqd_cntl_stack_offset;
347 *save_area_used_size = m->cp_hqd_wg_state_offset -
348 m->cp_hqd_cntl_stack_size;
349
350 header.wave_state.control_stack_size = *ctl_stack_used_size;
351 header.wave_state.wave_state_size = *save_area_used_size;
352
353 header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset;
354 header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset;
355
356 if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state)))
357 return -EFAULT;
358
359 if (copy_to_user(ctl_stack + m->cp_hqd_cntl_stack_offset,
360 mqd_ctl_stack + m->cp_hqd_cntl_stack_offset,
361 *ctl_stack_used_size))
362 return -EFAULT;
363
364 return 0;
365 }
366
get_checkpoint_info(struct mqd_manager * mm,void * mqd,u32 * ctl_stack_size)367 static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
368 {
369 struct v9_mqd *m = get_mqd(mqd);
370
371 *ctl_stack_size = m->cp_hqd_cntl_stack_size;
372 }
373
checkpoint_mqd(struct mqd_manager * mm,void * mqd,void * mqd_dst,void * ctl_stack_dst)374 static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
375 {
376 struct v9_mqd *m;
377 /* Control stack is located one page after MQD. */
378 void *ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
379
380 m = get_mqd(mqd);
381
382 memcpy(mqd_dst, m, sizeof(struct v9_mqd));
383 memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size);
384 }
385
restore_mqd(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * qp,const void * mqd_src,const void * ctl_stack_src,u32 ctl_stack_size)386 static void restore_mqd(struct mqd_manager *mm, void **mqd,
387 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
388 struct queue_properties *qp,
389 const void *mqd_src,
390 const void *ctl_stack_src, u32 ctl_stack_size)
391 {
392 uint64_t addr;
393 struct v9_mqd *m;
394 void *ctl_stack;
395
396 m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
397 addr = mqd_mem_obj->gpu_addr;
398
399 memcpy(m, mqd_src, sizeof(*m));
400
401 *mqd = m;
402 if (gart_addr)
403 *gart_addr = addr;
404
405 /* Control stack is located one page after MQD. */
406 ctl_stack = (void *)((uintptr_t)*mqd + PAGE_SIZE);
407 memcpy(ctl_stack, ctl_stack_src, ctl_stack_size);
408
409 m->cp_hqd_pq_doorbell_control =
410 qp->doorbell_off <<
411 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
412 pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
413 m->cp_hqd_pq_doorbell_control);
414
415 qp->is_active = 0;
416 }
417
init_mqd_hiq(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * q)418 static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
419 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
420 struct queue_properties *q)
421 {
422 struct v9_mqd *m;
423
424 init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
425
426 m = get_mqd(*mqd);
427
428 m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
429 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
430 }
431
destroy_hiq_mqd(struct mqd_manager * mm,void * mqd,enum kfd_preempt_type type,unsigned int timeout,uint32_t pipe_id,uint32_t queue_id)432 static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
433 enum kfd_preempt_type type, unsigned int timeout,
434 uint32_t pipe_id, uint32_t queue_id)
435 {
436 int err;
437 struct v9_mqd *m;
438 u32 doorbell_off;
439
440 m = get_mqd(mqd);
441
442 doorbell_off = m->cp_hqd_pq_doorbell_control >>
443 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
444 err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
445 if (err)
446 pr_debug("Destroy HIQ MQD failed: %d\n", err);
447
448 return err;
449 }
450
init_mqd_sdma(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * q)451 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
452 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
453 struct queue_properties *q)
454 {
455 struct v9_sdma_mqd *m;
456
457 m = (struct v9_sdma_mqd *) mqd_mem_obj->cpu_ptr;
458
459 memset(m, 0, sizeof(struct v9_sdma_mqd));
460
461 *mqd = m;
462 if (gart_addr)
463 *gart_addr = mqd_mem_obj->gpu_addr;
464
465 mm->update_mqd(mm, m, q, NULL);
466 }
467
468 #define SDMA_RLC_DUMMY_DEFAULT 0xf
469
update_mqd_sdma(struct mqd_manager * mm,void * mqd,struct queue_properties * q,struct mqd_update_info * minfo)470 static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
471 struct queue_properties *q,
472 struct mqd_update_info *minfo)
473 {
474 struct v9_sdma_mqd *m;
475
476 m = get_sdma_mqd(mqd);
477 m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4)
478 << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
479 q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
480 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
481 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
482
483 m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
484 m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
485 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
486 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
487 m->sdmax_rlcx_doorbell_offset =
488 q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
489
490 m->sdma_engine_id = q->sdma_engine_id;
491 m->sdma_queue_id = q->sdma_queue_id;
492 m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
493
494 q->is_active = QUEUE_IS_ACTIVE(*q);
495 }
496
checkpoint_mqd_sdma(struct mqd_manager * mm,void * mqd,void * mqd_dst,void * ctl_stack_dst)497 static void checkpoint_mqd_sdma(struct mqd_manager *mm,
498 void *mqd,
499 void *mqd_dst,
500 void *ctl_stack_dst)
501 {
502 struct v9_sdma_mqd *m;
503
504 m = get_sdma_mqd(mqd);
505
506 memcpy(mqd_dst, m, sizeof(struct v9_sdma_mqd));
507 }
508
restore_mqd_sdma(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * qp,const void * mqd_src,const void * ctl_stack_src,const u32 ctl_stack_size)509 static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
510 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
511 struct queue_properties *qp,
512 const void *mqd_src,
513 const void *ctl_stack_src, const u32 ctl_stack_size)
514 {
515 uint64_t addr;
516 struct v9_sdma_mqd *m;
517
518 m = (struct v9_sdma_mqd *) mqd_mem_obj->cpu_ptr;
519 addr = mqd_mem_obj->gpu_addr;
520
521 memcpy(m, mqd_src, sizeof(*m));
522
523 m->sdmax_rlcx_doorbell_offset =
524 qp->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
525
526 *mqd = m;
527 if (gart_addr)
528 *gart_addr = addr;
529
530 qp->is_active = 0;
531 }
532
init_mqd_hiq_v9_4_3(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * q)533 static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd,
534 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
535 struct queue_properties *q)
536 {
537 struct v9_mqd *m;
538 int xcc = 0;
539 struct kfd_mem_obj xcc_mqd_mem_obj;
540 uint64_t xcc_gart_addr = 0;
541
542 memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
543
544 for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
545 kfd_get_hiq_xcc_mqd(mm->dev, &xcc_mqd_mem_obj, xcc);
546
547 init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
548
549 m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
550 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
551 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
552 if (amdgpu_sriov_vf(mm->dev->adev))
553 m->cp_hqd_pq_doorbell_control |= 1 <<
554 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
555 m->cp_mqd_stride_size = kfd_hiq_mqd_stride(mm->dev);
556 if (xcc == 0) {
557 /* Set no_update_rptr = 0 in Master XCC */
558 m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
559
560 /* Set the MQD pointer and gart address to XCC0 MQD */
561 *mqd = m;
562 *gart_addr = xcc_gart_addr;
563 }
564 }
565 }
566
hiq_load_mqd_kiq_v9_4_3(struct mqd_manager * mm,void * mqd,uint32_t pipe_id,uint32_t queue_id,struct queue_properties * p,struct mm_struct * mms)567 static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd,
568 uint32_t pipe_id, uint32_t queue_id,
569 struct queue_properties *p, struct mm_struct *mms)
570 {
571 uint32_t xcc_mask = mm->dev->xcc_mask;
572 int xcc_id, err, inst = 0;
573 void *xcc_mqd;
574 uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
575
576 for_each_inst(xcc_id, xcc_mask) {
577 xcc_mqd = mqd + hiq_mqd_size * inst;
578 err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd,
579 pipe_id, queue_id,
580 p->doorbell_off, xcc_id);
581 if (err) {
582 pr_debug("Failed to load HIQ MQD for XCC: %d\n", inst);
583 break;
584 }
585 ++inst;
586 }
587
588 return err;
589 }
590
destroy_hiq_mqd_v9_4_3(struct mqd_manager * mm,void * mqd,enum kfd_preempt_type type,unsigned int timeout,uint32_t pipe_id,uint32_t queue_id)591 static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
592 enum kfd_preempt_type type, unsigned int timeout,
593 uint32_t pipe_id, uint32_t queue_id)
594 {
595 uint32_t xcc_mask = mm->dev->xcc_mask;
596 int xcc_id, err, inst = 0;
597 uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
598 struct v9_mqd *m;
599 u32 doorbell_off;
600
601 for_each_inst(xcc_id, xcc_mask) {
602 m = get_mqd(mqd + hiq_mqd_size * inst);
603
604 doorbell_off = m->cp_hqd_pq_doorbell_control >>
605 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
606
607 err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, xcc_id);
608 if (err) {
609 pr_debug("Destroy HIQ MQD failed for xcc: %d\n", inst);
610 break;
611 }
612 ++inst;
613 }
614
615 return err;
616 }
617
check_preemption_failed_v9_4_3(struct mqd_manager * mm,void * mqd)618 static bool check_preemption_failed_v9_4_3(struct mqd_manager *mm, void *mqd)
619 {
620 uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
621 uint32_t xcc_mask = mm->dev->xcc_mask;
622 int inst = 0, xcc_id;
623 struct v9_mqd *m;
624 bool ret = false;
625
626 for_each_inst(xcc_id, xcc_mask) {
627 m = get_mqd(mqd + hiq_mqd_size * inst);
628 ret |= kfd_check_hiq_mqd_doorbell_id(mm->dev,
629 m->queue_doorbell_id0, inst);
630 m->queue_doorbell_id0 = 0;
631 ++inst;
632 }
633
634 return ret;
635 }
636
get_xcc_mqd(struct kfd_mem_obj * mqd_mem_obj,struct kfd_mem_obj * xcc_mqd_mem_obj,uint64_t offset)637 static void get_xcc_mqd(struct kfd_mem_obj *mqd_mem_obj,
638 struct kfd_mem_obj *xcc_mqd_mem_obj,
639 uint64_t offset)
640 {
641 xcc_mqd_mem_obj->gtt_mem = (offset == 0) ?
642 mqd_mem_obj->gtt_mem : NULL;
643 xcc_mqd_mem_obj->gpu_addr = mqd_mem_obj->gpu_addr + offset;
644 xcc_mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t)mqd_mem_obj->cpu_ptr
645 + offset);
646 }
647
init_mqd_v9_4_3(struct mqd_manager * mm,void ** mqd,struct kfd_mem_obj * mqd_mem_obj,uint64_t * gart_addr,struct queue_properties * q)648 static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
649 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
650 struct queue_properties *q)
651 {
652 struct v9_mqd *m;
653 int xcc = 0;
654 struct kfd_mem_obj xcc_mqd_mem_obj;
655 uint64_t xcc_gart_addr = 0;
656 uint64_t xcc_ctx_save_restore_area_address;
657 uint64_t offset = mm->mqd_stride(mm, q);
658 uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++;
659
660 memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
661 for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
662 get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset*xcc);
663
664 init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
665
666 m->cp_mqd_stride_size = offset;
667
668 /*
669 * Update the CWSR address for each XCC if CWSR is enabled
670 * and CWSR area is allocated in thunk
671 */
672 if (mm->dev->kfd->cwsr_enabled &&
673 q->ctx_save_restore_area_address) {
674 xcc_ctx_save_restore_area_address =
675 q->ctx_save_restore_area_address +
676 (xcc * q->ctx_save_restore_area_size);
677
678 m->cp_hqd_ctx_save_base_addr_lo =
679 lower_32_bits(xcc_ctx_save_restore_area_address);
680 m->cp_hqd_ctx_save_base_addr_hi =
681 upper_32_bits(xcc_ctx_save_restore_area_address);
682 }
683
684 if (q->format == KFD_QUEUE_FORMAT_AQL) {
685 m->compute_tg_chunk_size = 1;
686 m->compute_current_logic_xcc_id =
687 (local_xcc_start + xcc) %
688 NUM_XCC(mm->dev->xcc_mask);
689
690 switch (xcc) {
691 case 0:
692 /* Master XCC */
693 m->cp_hqd_pq_control &=
694 ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
695 break;
696 default:
697 break;
698 }
699 } else {
700 /* PM4 Queue */
701 m->compute_current_logic_xcc_id = 0;
702 m->compute_tg_chunk_size = 0;
703 m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
704 }
705
706 if (xcc == 0) {
707 /* Set the MQD pointer and gart address to XCC0 MQD */
708 *mqd = m;
709 *gart_addr = xcc_gart_addr;
710 }
711 }
712 }
713
update_mqd_v9_4_3(struct mqd_manager * mm,void * mqd,struct queue_properties * q,struct mqd_update_info * minfo)714 static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
715 struct queue_properties *q, struct mqd_update_info *minfo)
716 {
717 struct v9_mqd *m;
718 int xcc = 0;
719 uint64_t size = mm->mqd_stride(mm, q);
720
721 for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
722 m = get_mqd(mqd + size * xcc);
723 update_mqd(mm, m, q, minfo);
724
725 update_cu_mask(mm, m, minfo, xcc);
726
727 if (q->format == KFD_QUEUE_FORMAT_AQL) {
728 switch (xcc) {
729 case 0:
730 /* Master XCC */
731 m->cp_hqd_pq_control &=
732 ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
733 break;
734 default:
735 break;
736 }
737 m->compute_tg_chunk_size = 1;
738 } else {
739 /* PM4 Queue */
740 m->compute_current_logic_xcc_id = 0;
741 m->compute_tg_chunk_size = 0;
742 m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
743 }
744 }
745 }
746
destroy_mqd_v9_4_3(struct mqd_manager * mm,void * mqd,enum kfd_preempt_type type,unsigned int timeout,uint32_t pipe_id,uint32_t queue_id)747 static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
748 enum kfd_preempt_type type, unsigned int timeout,
749 uint32_t pipe_id, uint32_t queue_id)
750 {
751 uint32_t xcc_mask = mm->dev->xcc_mask;
752 int xcc_id, err, inst = 0;
753 void *xcc_mqd;
754 struct v9_mqd *m;
755 uint64_t mqd_offset;
756
757 m = get_mqd(mqd);
758 mqd_offset = m->cp_mqd_stride_size;
759
760 for_each_inst(xcc_id, xcc_mask) {
761 xcc_mqd = mqd + mqd_offset * inst;
762 err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd,
763 type, timeout, pipe_id,
764 queue_id, xcc_id);
765 if (err) {
766 pr_debug("Destroy MQD failed for xcc: %d\n", inst);
767 break;
768 }
769 ++inst;
770 }
771
772 return err;
773 }
774
load_mqd_v9_4_3(struct mqd_manager * mm,void * mqd,uint32_t pipe_id,uint32_t queue_id,struct queue_properties * p,struct mm_struct * mms)775 static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
776 uint32_t pipe_id, uint32_t queue_id,
777 struct queue_properties *p, struct mm_struct *mms)
778 {
779 /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
780 uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
781 uint32_t xcc_mask = mm->dev->xcc_mask;
782 int xcc_id, err, inst = 0;
783 void *xcc_mqd;
784 uint64_t mqd_stride_size = mm->mqd_stride(mm, p);
785
786 for_each_inst(xcc_id, xcc_mask) {
787 xcc_mqd = mqd + mqd_stride_size * inst;
788 err = mm->dev->kfd2kgd->hqd_load(
789 mm->dev->adev, xcc_mqd, pipe_id, queue_id,
790 (uint32_t __user *)p->write_ptr, wptr_shift, 0, mms,
791 xcc_id);
792 if (err) {
793 pr_debug("Load MQD failed for xcc: %d\n", inst);
794 break;
795 }
796 ++inst;
797 }
798
799 return err;
800 }
801
get_wave_state_v9_4_3(struct mqd_manager * mm,void * mqd,struct queue_properties * q,void __user * ctl_stack,u32 * ctl_stack_used_size,u32 * save_area_used_size)802 static int get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd,
803 struct queue_properties *q,
804 void __user *ctl_stack,
805 u32 *ctl_stack_used_size,
806 u32 *save_area_used_size)
807 {
808 int xcc, err = 0;
809 void *xcc_mqd;
810 void __user *xcc_ctl_stack;
811 uint64_t mqd_stride_size = mm->mqd_stride(mm, q);
812 u32 tmp_ctl_stack_used_size = 0, tmp_save_area_used_size = 0;
813
814 for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
815 xcc_mqd = mqd + mqd_stride_size * xcc;
816 xcc_ctl_stack = (void __user *)((uintptr_t)ctl_stack +
817 q->ctx_save_restore_area_size * xcc);
818
819 err = get_wave_state(mm, xcc_mqd, q, xcc_ctl_stack,
820 &tmp_ctl_stack_used_size,
821 &tmp_save_area_used_size);
822 if (err)
823 break;
824
825 /*
826 * Set the ctl_stack_used_size and save_area_used_size to
827 * ctl_stack_used_size and save_area_used_size of XCC 0 when
828 * passing the info the user-space.
829 * For multi XCC, user-space would have to look at the header
830 * info of each Control stack area to determine the control
831 * stack size and save area used.
832 */
833 if (xcc == 0) {
834 *ctl_stack_used_size = tmp_ctl_stack_used_size;
835 *save_area_used_size = tmp_save_area_used_size;
836 }
837 }
838
839 return err;
840 }
841
842 #if defined(CONFIG_DEBUG_FS)
843
debugfs_show_mqd(struct seq_file * m,void * data)844 static int debugfs_show_mqd(struct seq_file *m, void *data)
845 {
846 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
847 data, sizeof(struct v9_mqd), false);
848 return 0;
849 }
850
debugfs_show_mqd_sdma(struct seq_file * m,void * data)851 static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
852 {
853 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
854 data, sizeof(struct v9_sdma_mqd), false);
855 return 0;
856 }
857
858 #endif
859
mqd_manager_init_v9(enum KFD_MQD_TYPE type,struct kfd_node * dev)860 struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
861 struct kfd_node *dev)
862 {
863 struct mqd_manager *mqd;
864
865 if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
866 return NULL;
867
868 mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
869 if (!mqd)
870 return NULL;
871
872 mqd->dev = dev;
873
874 switch (type) {
875 case KFD_MQD_TYPE_CP:
876 mqd->allocate_mqd = allocate_mqd;
877 mqd->free_mqd = kfd_free_mqd_cp;
878 mqd->is_occupied = kfd_is_occupied_cp;
879 mqd->get_checkpoint_info = get_checkpoint_info;
880 mqd->checkpoint_mqd = checkpoint_mqd;
881 mqd->restore_mqd = restore_mqd;
882 mqd->mqd_size = sizeof(struct v9_mqd);
883 mqd->mqd_stride = mqd_stride_v9;
884 #if defined(CONFIG_DEBUG_FS)
885 mqd->debugfs_show_mqd = debugfs_show_mqd;
886 #endif
887 if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
888 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)) {
889 mqd->init_mqd = init_mqd_v9_4_3;
890 mqd->load_mqd = load_mqd_v9_4_3;
891 mqd->update_mqd = update_mqd_v9_4_3;
892 mqd->destroy_mqd = destroy_mqd_v9_4_3;
893 mqd->get_wave_state = get_wave_state_v9_4_3;
894 } else {
895 mqd->init_mqd = init_mqd;
896 mqd->load_mqd = load_mqd;
897 mqd->update_mqd = update_mqd;
898 mqd->destroy_mqd = kfd_destroy_mqd_cp;
899 mqd->get_wave_state = get_wave_state;
900 }
901 break;
902 case KFD_MQD_TYPE_HIQ:
903 mqd->allocate_mqd = allocate_hiq_mqd;
904 mqd->free_mqd = free_mqd_hiq_sdma;
905 mqd->update_mqd = update_mqd;
906 mqd->is_occupied = kfd_is_occupied_cp;
907 mqd->mqd_size = sizeof(struct v9_mqd);
908 mqd->mqd_stride = kfd_mqd_stride;
909 #if defined(CONFIG_DEBUG_FS)
910 mqd->debugfs_show_mqd = debugfs_show_mqd;
911 #endif
912 if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
913 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)) {
914 mqd->init_mqd = init_mqd_hiq_v9_4_3;
915 mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3;
916 mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3;
917 mqd->check_preemption_failed = check_preemption_failed_v9_4_3;
918 } else {
919 mqd->init_mqd = init_mqd_hiq;
920 mqd->load_mqd = kfd_hiq_load_mqd_kiq;
921 mqd->destroy_mqd = destroy_hiq_mqd;
922 mqd->check_preemption_failed = check_preemption_failed;
923 }
924 break;
925 case KFD_MQD_TYPE_DIQ:
926 mqd->allocate_mqd = allocate_mqd;
927 mqd->init_mqd = init_mqd_hiq;
928 mqd->free_mqd = kfd_free_mqd_cp;
929 mqd->load_mqd = load_mqd;
930 mqd->update_mqd = update_mqd;
931 mqd->destroy_mqd = kfd_destroy_mqd_cp;
932 mqd->is_occupied = kfd_is_occupied_cp;
933 mqd->mqd_size = sizeof(struct v9_mqd);
934 #if defined(CONFIG_DEBUG_FS)
935 mqd->debugfs_show_mqd = debugfs_show_mqd;
936 #endif
937 break;
938 case KFD_MQD_TYPE_SDMA:
939 mqd->allocate_mqd = allocate_sdma_mqd;
940 mqd->init_mqd = init_mqd_sdma;
941 mqd->free_mqd = free_mqd_hiq_sdma;
942 mqd->load_mqd = kfd_load_mqd_sdma;
943 mqd->update_mqd = update_mqd_sdma;
944 mqd->destroy_mqd = kfd_destroy_mqd_sdma;
945 mqd->is_occupied = kfd_is_occupied_sdma;
946 mqd->checkpoint_mqd = checkpoint_mqd_sdma;
947 mqd->restore_mqd = restore_mqd_sdma;
948 mqd->mqd_size = sizeof(struct v9_sdma_mqd);
949 mqd->mqd_stride = kfd_mqd_stride;
950 #if defined(CONFIG_DEBUG_FS)
951 mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
952 #endif
953 break;
954 default:
955 kfree(mqd);
956 return NULL;
957 }
958
959 return mqd;
960 }
961