1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2023 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/printk.h> 26 #include <linux/slab.h> 27 #include <linux/uaccess.h> 28 #include "kfd_priv.h" 29 #include "kfd_mqd_manager.h" 30 #include "v12_structs.h" 31 #include "gc/gc_12_0_0_sh_mask.h" 32 #include "amdgpu_amdkfd.h" 33 34 static inline struct v12_compute_mqd *get_mqd(void *mqd) 35 { 36 return (struct v12_compute_mqd *)mqd; 37 } 38 39 static inline struct v12_sdma_mqd *get_sdma_mqd(void *mqd) 40 { 41 return (struct v12_sdma_mqd *)mqd; 42 } 43 44 static void update_cu_mask(struct mqd_manager *mm, void *mqd, 45 struct mqd_update_info *minfo) 46 { 47 struct v12_compute_mqd *m; 48 uint32_t se_mask[KFD_MAX_NUM_SE] = {0}; 49 50 if (!minfo || !minfo->cu_mask.ptr) 51 return; 52 53 mqd_symmetrically_map_cu_mask(mm, 54 minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0); 55 56 m = get_mqd(mqd); 57 m->compute_static_thread_mgmt_se0 = se_mask[0]; 58 m->compute_static_thread_mgmt_se1 = se_mask[1]; 59 m->compute_static_thread_mgmt_se2 = se_mask[2]; 60 m->compute_static_thread_mgmt_se3 = se_mask[3]; 61 m->compute_static_thread_mgmt_se4 = se_mask[4]; 62 m->compute_static_thread_mgmt_se5 = se_mask[5]; 63 m->compute_static_thread_mgmt_se6 = se_mask[6]; 64 m->compute_static_thread_mgmt_se7 = se_mask[7]; 65 66 pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n", 67 m->compute_static_thread_mgmt_se0, 68 m->compute_static_thread_mgmt_se1, 69 m->compute_static_thread_mgmt_se2, 70 m->compute_static_thread_mgmt_se3, 71 m->compute_static_thread_mgmt_se4, 72 m->compute_static_thread_mgmt_se5, 73 m->compute_static_thread_mgmt_se6, 74 m->compute_static_thread_mgmt_se7); 75 } 76 77 static void set_priority(struct v12_compute_mqd *m, struct queue_properties *q) 78 { 79 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 80 m->cp_hqd_queue_priority = q->priority; 81 } 82 83 static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, 84 struct queue_properties *q) 85 { 86 struct kfd_mem_obj *mqd_mem_obj; 87 88 /* 89 * Allocate one PAGE_SIZE memory for MQD as MES writes to areas beyond 90 * struct MQD size. 91 */ 92 if (kfd_gtt_sa_allocate(node, PAGE_SIZE, &mqd_mem_obj)) 93 return NULL; 94 95 return mqd_mem_obj; 96 } 97 98 static void init_mqd(struct mqd_manager *mm, void **mqd, 99 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, 100 struct queue_properties *q) 101 { 102 uint64_t addr; 103 struct v12_compute_mqd *m; 104 105 m = (struct v12_compute_mqd *) mqd_mem_obj->cpu_ptr; 106 addr = mqd_mem_obj->gpu_addr; 107 108 memset(m, 0, PAGE_SIZE); 109 110 m->header = 0xC0310800; 111 m->compute_pipelinestat_enable = 1; 112 m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; 113 m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; 114 m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; 115 m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; 116 m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF; 117 m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF; 118 m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF; 119 m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF; 120 121 m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 122 0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; 123 124 m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; 125 126 m->cp_mqd_base_addr_lo = lower_32_bits(addr); 127 m->cp_mqd_base_addr_hi = upper_32_bits(addr); 128 129 m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT | 130 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | 131 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; 132 133 /* Set cp_hqd_hq_status0.c_queue_debug_en to 1 to have the CP set up the 134 * DISPATCH_PTR. This is required for the kfd debugger 135 */ 136 m->cp_hqd_hq_status0 = 1 << 14; 137 138 if (amdgpu_amdkfd_have_atomics_support(mm->dev->adev)) 139 m->cp_hqd_hq_status0 |= 1 << 29; 140 141 if (q->format == KFD_QUEUE_FORMAT_AQL) { 142 m->cp_hqd_aql_control = 143 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; 144 } 145 146 if (mm->dev->kfd->cwsr_enabled) { 147 m->cp_hqd_persistent_state |= 148 (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); 149 m->cp_hqd_ctx_save_base_addr_lo = 150 lower_32_bits(q->ctx_save_restore_area_address); 151 m->cp_hqd_ctx_save_base_addr_hi = 152 upper_32_bits(q->ctx_save_restore_area_address); 153 m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size; 154 m->cp_hqd_cntl_stack_size = q->ctl_stack_size; 155 m->cp_hqd_cntl_stack_offset = q->ctl_stack_size; 156 m->cp_hqd_wg_state_offset = q->ctl_stack_size; 157 } 158 159 *mqd = m; 160 if (gart_addr) 161 *gart_addr = addr; 162 mm->update_mqd(mm, m, q, NULL); 163 } 164 165 static int load_mqd(struct mqd_manager *mm, void *mqd, 166 uint32_t pipe_id, uint32_t queue_id, 167 struct queue_properties *p, struct mm_struct *mms) 168 { 169 int r = 0; 170 /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ 171 uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); 172 173 r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, 174 (uint32_t __user *)p->write_ptr, 175 wptr_shift, 0, mms, 0); 176 return r; 177 } 178 179 static void update_mqd(struct mqd_manager *mm, void *mqd, 180 struct queue_properties *q, 181 struct mqd_update_info *minfo) 182 { 183 struct v12_compute_mqd *m; 184 185 m = get_mqd(mqd); 186 187 m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; 188 m->cp_hqd_pq_control |= 189 ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; 190 m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; 191 pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); 192 193 m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); 194 m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); 195 196 m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); 197 m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); 198 m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); 199 m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); 200 201 m->cp_hqd_pq_doorbell_control = 202 q->doorbell_off << 203 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; 204 pr_debug("cp_hqd_pq_doorbell_control 0x%x\n", 205 m->cp_hqd_pq_doorbell_control); 206 207 m->cp_hqd_ib_control = 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT; 208 209 /* 210 * HW does not clamp this field correctly. Maximum EOP queue size 211 * is constrained by per-SE EOP done signal count, which is 8-bit. 212 * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit 213 * more than (EOP entry count - 1) so a queue size of 0x800 dwords 214 * is safe, giving a maximum field value of 0xA. 215 */ 216 m->cp_hqd_eop_control = min(0xA, 217 ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1); 218 m->cp_hqd_eop_base_addr_lo = 219 lower_32_bits(q->eop_ring_buffer_address >> 8); 220 m->cp_hqd_eop_base_addr_hi = 221 upper_32_bits(q->eop_ring_buffer_address >> 8); 222 223 m->cp_hqd_iq_timer = 0; 224 225 m->cp_hqd_vmid = q->vmid; 226 227 if (q->format == KFD_QUEUE_FORMAT_AQL) { 228 /* GC 10 removed WPP_CLAMP from PQ Control */ 229 m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK | 230 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT | 231 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT; 232 m->cp_hqd_pq_doorbell_control |= 233 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; 234 } 235 if (mm->dev->kfd->cwsr_enabled) 236 m->cp_hqd_ctx_save_control = 0; 237 238 update_cu_mask(mm, mqd, minfo); 239 set_priority(m, q); 240 241 q->is_active = QUEUE_IS_ACTIVE(*q); 242 } 243 244 static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) 245 { 246 struct v12_compute_mqd *m = (struct v12_compute_mqd *)mqd; 247 248 return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0); 249 } 250 251 static int get_wave_state(struct mqd_manager *mm, void *mqd, 252 struct queue_properties *q, 253 void __user *ctl_stack, 254 u32 *ctl_stack_used_size, 255 u32 *save_area_used_size) 256 { 257 struct v12_compute_mqd *m; 258 struct mqd_user_context_save_area_header header; 259 260 m = get_mqd(mqd); 261 262 /* Control stack is written backwards, while workgroup context data 263 * is written forwards. Both starts from m->cp_hqd_cntl_stack_size. 264 * Current position is at m->cp_hqd_cntl_stack_offset and 265 * m->cp_hqd_wg_state_offset, respectively. 266 */ 267 *ctl_stack_used_size = m->cp_hqd_cntl_stack_size - 268 m->cp_hqd_cntl_stack_offset; 269 *save_area_used_size = m->cp_hqd_wg_state_offset - 270 m->cp_hqd_cntl_stack_size; 271 272 /* Control stack is not copied to user mode for GFXv12 because 273 * it's part of the context save area that is already 274 * accessible to user mode 275 */ 276 header.control_stack_size = *ctl_stack_used_size; 277 header.wave_state_size = *save_area_used_size; 278 279 header.wave_state_offset = m->cp_hqd_wg_state_offset; 280 header.control_stack_offset = m->cp_hqd_cntl_stack_offset; 281 282 if (copy_to_user(ctl_stack, &header, sizeof(header))) 283 return -EFAULT; 284 285 return 0; 286 } 287 288 static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, 289 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, 290 struct queue_properties *q) 291 { 292 struct v12_compute_mqd *m; 293 294 init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); 295 296 m = get_mqd(*mqd); 297 298 m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT | 299 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; 300 } 301 302 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, 303 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, 304 struct queue_properties *q) 305 { 306 struct v12_sdma_mqd *m; 307 308 m = (struct v12_sdma_mqd *) mqd_mem_obj->cpu_ptr; 309 310 memset(m, 0, sizeof(struct v12_sdma_mqd)); 311 312 *mqd = m; 313 if (gart_addr) 314 *gart_addr = mqd_mem_obj->gpu_addr; 315 316 mm->update_mqd(mm, m, q, NULL); 317 } 318 319 #define SDMA_RLC_DUMMY_DEFAULT 0xf 320 321 static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, 322 struct queue_properties *q, 323 struct mqd_update_info *minfo) 324 { 325 struct v12_sdma_mqd *m; 326 327 m = get_sdma_mqd(mqd); 328 m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1) 329 << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT | 330 q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT | 331 1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | 332 6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT | 333 1 << SDMA0_QUEUE0_RB_CNTL__MCU_WPTR_POLL_ENABLE__SHIFT; 334 335 m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8); 336 m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8); 337 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); 338 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); 339 m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); 340 m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); 341 m->sdmax_rlcx_doorbell_offset = 342 q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; 343 344 m->sdmax_rlcx_sched_cntl = (amdgpu_sdma_phase_quantum 345 << SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT) 346 & SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK; 347 348 m->sdma_engine_id = q->sdma_engine_id; 349 m->sdma_queue_id = q->sdma_queue_id; 350 351 m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT; 352 353 q->is_active = QUEUE_IS_ACTIVE(*q); 354 } 355 356 #if defined(CONFIG_DEBUG_FS) 357 358 static int debugfs_show_mqd(struct seq_file *m, void *data) 359 { 360 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4, 361 data, sizeof(struct v12_compute_mqd), false); 362 return 0; 363 } 364 365 static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) 366 { 367 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4, 368 data, sizeof(struct v12_sdma_mqd), false); 369 return 0; 370 } 371 372 #endif 373 374 struct mqd_manager *mqd_manager_init_v12(enum KFD_MQD_TYPE type, 375 struct kfd_node *dev) 376 { 377 struct mqd_manager *mqd; 378 379 if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) 380 return NULL; 381 382 mqd = kzalloc(sizeof(*mqd), GFP_KERNEL); 383 if (!mqd) 384 return NULL; 385 386 mqd->dev = dev; 387 388 switch (type) { 389 case KFD_MQD_TYPE_CP: 390 pr_debug("%s@%i\n", __func__, __LINE__); 391 mqd->allocate_mqd = allocate_mqd; 392 mqd->init_mqd = init_mqd; 393 mqd->free_mqd = kfd_free_mqd_cp; 394 mqd->load_mqd = load_mqd; 395 mqd->update_mqd = update_mqd; 396 mqd->destroy_mqd = kfd_destroy_mqd_cp; 397 mqd->is_occupied = kfd_is_occupied_cp; 398 mqd->mqd_size = sizeof(struct v12_compute_mqd); 399 mqd->get_wave_state = get_wave_state; 400 mqd->mqd_stride = kfd_mqd_stride; 401 #if defined(CONFIG_DEBUG_FS) 402 mqd->debugfs_show_mqd = debugfs_show_mqd; 403 #endif 404 pr_debug("%s@%i\n", __func__, __LINE__); 405 break; 406 case KFD_MQD_TYPE_HIQ: 407 pr_debug("%s@%i\n", __func__, __LINE__); 408 mqd->allocate_mqd = allocate_hiq_mqd; 409 mqd->init_mqd = init_mqd_hiq; 410 mqd->free_mqd = free_mqd_hiq_sdma; 411 mqd->load_mqd = kfd_hiq_load_mqd_kiq; 412 mqd->update_mqd = update_mqd; 413 mqd->destroy_mqd = kfd_destroy_mqd_cp; 414 mqd->is_occupied = kfd_is_occupied_cp; 415 mqd->mqd_size = sizeof(struct v12_compute_mqd); 416 mqd->mqd_stride = kfd_mqd_stride; 417 #if defined(CONFIG_DEBUG_FS) 418 mqd->debugfs_show_mqd = debugfs_show_mqd; 419 #endif 420 mqd->check_preemption_failed = check_preemption_failed; 421 pr_debug("%s@%i\n", __func__, __LINE__); 422 break; 423 case KFD_MQD_TYPE_DIQ: 424 mqd->allocate_mqd = allocate_mqd; 425 mqd->init_mqd = init_mqd_hiq; 426 mqd->free_mqd = kfd_free_mqd_cp; 427 mqd->load_mqd = load_mqd; 428 mqd->update_mqd = update_mqd; 429 mqd->destroy_mqd = kfd_destroy_mqd_cp; 430 mqd->is_occupied = kfd_is_occupied_cp; 431 mqd->mqd_size = sizeof(struct v12_compute_mqd); 432 #if defined(CONFIG_DEBUG_FS) 433 mqd->debugfs_show_mqd = debugfs_show_mqd; 434 #endif 435 break; 436 case KFD_MQD_TYPE_SDMA: 437 pr_debug("%s@%i\n", __func__, __LINE__); 438 mqd->allocate_mqd = allocate_mqd; 439 mqd->init_mqd = init_mqd_sdma; 440 mqd->free_mqd = kfd_free_mqd_cp; 441 mqd->load_mqd = kfd_load_mqd_sdma; 442 mqd->update_mqd = update_mqd_sdma; 443 mqd->destroy_mqd = kfd_destroy_mqd_sdma; 444 mqd->is_occupied = kfd_is_occupied_sdma; 445 mqd->mqd_size = sizeof(struct v12_sdma_mqd); 446 mqd->mqd_stride = kfd_mqd_stride; 447 #if defined(CONFIG_DEBUG_FS) 448 mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; 449 #endif 450 pr_debug("%s@%i\n", __func__, __LINE__); 451 break; 452 default: 453 kfree(mqd); 454 return NULL; 455 } 456 457 return mqd; 458 } 459