1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/ratelimit.h> 26 #include <linux/printk.h> 27 #include <linux/slab.h> 28 #include <linux/list.h> 29 #include <linux/types.h> 30 #include <linux/bitops.h> 31 #include <linux/sched.h> 32 #include "kfd_priv.h" 33 #include "kfd_device_queue_manager.h" 34 #include "kfd_mqd_manager.h" 35 #include "cik_regs.h" 36 #include "kfd_kernel_queue.h" 37 #include "amdgpu_amdkfd.h" 38 #include "amdgpu_reset.h" 39 #include "mes_v11_api_def.h" 40 #include "kfd_debug.h" 41 42 /* Size of the per-pipe EOP queue */ 43 #define CIK_HPD_EOP_BYTES_LOG2 11 44 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2) 45 46 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, 47 u32 pasid, unsigned int vmid); 48 49 static int execute_queues_cpsch(struct device_queue_manager *dqm, 50 enum kfd_unmap_queues_filter filter, 51 uint32_t filter_param, 52 uint32_t grace_period); 53 static int unmap_queues_cpsch(struct device_queue_manager *dqm, 54 enum kfd_unmap_queues_filter filter, 55 uint32_t filter_param, 56 uint32_t grace_period, 57 bool reset); 58 59 static int map_queues_cpsch(struct device_queue_manager *dqm); 60 61 static void deallocate_sdma_queue(struct device_queue_manager *dqm, 62 struct queue *q); 63 64 static inline void deallocate_hqd(struct device_queue_manager *dqm, 65 struct queue *q); 66 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q); 67 static int allocate_sdma_queue(struct device_queue_manager *dqm, 68 struct queue *q, const uint32_t *restore_sdma_id); 69 static void kfd_process_hw_exception(struct work_struct *work); 70 71 static inline 72 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) 73 { 74 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI) 75 return KFD_MQD_TYPE_SDMA; 76 return KFD_MQD_TYPE_CP; 77 } 78 79 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) 80 { 81 int i; 82 int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec 83 + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe; 84 85 /* queue is available for KFD usage if bit is 1 */ 86 for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i) 87 if (test_bit(pipe_offset + i, 88 dqm->dev->kfd->shared_resources.cp_queue_bitmap)) 89 return true; 90 return false; 91 } 92 93 unsigned int get_cp_queues_num(struct device_queue_manager *dqm) 94 { 95 return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap, 96 AMDGPU_MAX_QUEUES); 97 } 98 99 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm) 100 { 101 return dqm->dev->kfd->shared_resources.num_queue_per_pipe; 102 } 103 104 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm) 105 { 106 return dqm->dev->kfd->shared_resources.num_pipe_per_mec; 107 } 108 109 static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) 110 { 111 return kfd_get_num_sdma_engines(dqm->dev) + 112 kfd_get_num_xgmi_sdma_engines(dqm->dev); 113 } 114 115 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) 116 { 117 return kfd_get_num_sdma_engines(dqm->dev) * 118 dqm->dev->kfd->device_info.num_sdma_queues_per_engine; 119 } 120 121 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) 122 { 123 return kfd_get_num_xgmi_sdma_engines(dqm->dev) * 124 dqm->dev->kfd->device_info.num_sdma_queues_per_engine; 125 } 126 127 static void init_sdma_bitmaps(struct device_queue_manager *dqm) 128 { 129 bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES); 130 bitmap_set(dqm->sdma_bitmap, 0, get_num_sdma_queues(dqm)); 131 132 bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES); 133 bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm)); 134 135 /* Mask out the reserved queues */ 136 bitmap_andnot(dqm->sdma_bitmap, dqm->sdma_bitmap, 137 dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap, 138 KFD_MAX_SDMA_QUEUES); 139 } 140 141 void program_sh_mem_settings(struct device_queue_manager *dqm, 142 struct qcm_process_device *qpd) 143 { 144 uint32_t xcc_mask = dqm->dev->xcc_mask; 145 int xcc_id; 146 147 for_each_inst(xcc_id, xcc_mask) 148 dqm->dev->kfd2kgd->program_sh_mem_settings( 149 dqm->dev->adev, qpd->vmid, qpd->sh_mem_config, 150 qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit, 151 qpd->sh_mem_bases, xcc_id); 152 } 153 154 static void kfd_hws_hang(struct device_queue_manager *dqm) 155 { 156 struct device_process_node *cur; 157 struct qcm_process_device *qpd; 158 struct queue *q; 159 160 /* Mark all device queues as reset. */ 161 list_for_each_entry(cur, &dqm->queues, list) { 162 qpd = cur->qpd; 163 list_for_each_entry(q, &qpd->queues_list, list) { 164 struct kfd_process_device *pdd = qpd_to_pdd(qpd); 165 166 pdd->has_reset_queue = true; 167 } 168 } 169 170 /* 171 * Issue a GPU reset if HWS is unresponsive 172 */ 173 schedule_work(&dqm->hw_exception_work); 174 } 175 176 static int convert_to_mes_queue_type(int queue_type) 177 { 178 int mes_queue_type; 179 180 switch (queue_type) { 181 case KFD_QUEUE_TYPE_COMPUTE: 182 mes_queue_type = MES_QUEUE_TYPE_COMPUTE; 183 break; 184 case KFD_QUEUE_TYPE_SDMA: 185 mes_queue_type = MES_QUEUE_TYPE_SDMA; 186 break; 187 default: 188 WARN(1, "Invalid queue type %d", queue_type); 189 mes_queue_type = -EINVAL; 190 break; 191 } 192 193 return mes_queue_type; 194 } 195 196 static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, 197 struct qcm_process_device *qpd) 198 { 199 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev; 200 struct kfd_process_device *pdd = qpd_to_pdd(qpd); 201 struct mes_add_queue_input queue_input; 202 int r, queue_type; 203 uint64_t wptr_addr_off; 204 205 if (!dqm->sched_running || dqm->sched_halt) 206 return 0; 207 if (!down_read_trylock(&adev->reset_domain->sem)) 208 return -EIO; 209 210 memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input)); 211 queue_input.process_id = qpd->pqm->process->pasid; 212 queue_input.page_table_base_addr = qpd->page_table_base; 213 queue_input.process_va_start = 0; 214 queue_input.process_va_end = adev->vm_manager.max_pfn - 1; 215 /* MES unit for quantum is 100ns */ 216 queue_input.process_quantum = KFD_MES_PROCESS_QUANTUM; /* Equivalent to 10ms. */ 217 queue_input.process_context_addr = pdd->proc_ctx_gpu_addr; 218 queue_input.gang_quantum = KFD_MES_GANG_QUANTUM; /* Equivalent to 1ms */ 219 queue_input.gang_context_addr = q->gang_ctx_gpu_addr; 220 queue_input.inprocess_gang_priority = q->properties.priority; 221 queue_input.gang_global_priority_level = 222 AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 223 queue_input.doorbell_offset = q->properties.doorbell_off; 224 queue_input.mqd_addr = q->gart_mqd_addr; 225 queue_input.wptr_addr = (uint64_t)q->properties.write_ptr; 226 227 wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1); 228 queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->properties.wptr_bo) + wptr_addr_off; 229 230 queue_input.is_kfd_process = 1; 231 queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL); 232 queue_input.queue_size = q->properties.queue_size >> 2; 233 234 queue_input.paging = false; 235 queue_input.tba_addr = qpd->tba_addr; 236 queue_input.tma_addr = qpd->tma_addr; 237 queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device); 238 queue_input.skip_process_ctx_clear = 239 qpd->pqm->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED && 240 (qpd->pqm->process->debug_trap_enabled || 241 kfd_dbg_has_ttmps_always_setup(q->device)); 242 243 queue_type = convert_to_mes_queue_type(q->properties.type); 244 if (queue_type < 0) { 245 dev_err(adev->dev, "Queue type not supported with MES, queue:%d\n", 246 q->properties.type); 247 up_read(&adev->reset_domain->sem); 248 return -EINVAL; 249 } 250 queue_input.queue_type = (uint32_t)queue_type; 251 252 queue_input.exclusively_scheduled = q->properties.is_gws; 253 254 amdgpu_mes_lock(&adev->mes); 255 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); 256 amdgpu_mes_unlock(&adev->mes); 257 up_read(&adev->reset_domain->sem); 258 if (r) { 259 dev_err(adev->dev, "failed to add hardware queue to MES, doorbell=0x%x\n", 260 q->properties.doorbell_off); 261 dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n"); 262 kfd_hws_hang(dqm); 263 } 264 265 return r; 266 } 267 268 static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q, 269 struct qcm_process_device *qpd) 270 { 271 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev; 272 int r; 273 struct mes_remove_queue_input queue_input; 274 275 if (!dqm->sched_running || dqm->sched_halt) 276 return 0; 277 if (!down_read_trylock(&adev->reset_domain->sem)) 278 return -EIO; 279 280 memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input)); 281 queue_input.doorbell_offset = q->properties.doorbell_off; 282 queue_input.gang_context_addr = q->gang_ctx_gpu_addr; 283 284 amdgpu_mes_lock(&adev->mes); 285 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input); 286 amdgpu_mes_unlock(&adev->mes); 287 up_read(&adev->reset_domain->sem); 288 289 if (r) { 290 dev_err(adev->dev, "failed to remove hardware queue from MES, doorbell=0x%x\n", 291 q->properties.doorbell_off); 292 dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n"); 293 kfd_hws_hang(dqm); 294 } 295 296 return r; 297 } 298 299 static int remove_all_kfd_queues_mes(struct device_queue_manager *dqm) 300 { 301 struct device_process_node *cur; 302 struct device *dev = dqm->dev->adev->dev; 303 struct qcm_process_device *qpd; 304 struct queue *q; 305 int retval = 0; 306 307 list_for_each_entry(cur, &dqm->queues, list) { 308 qpd = cur->qpd; 309 list_for_each_entry(q, &qpd->queues_list, list) { 310 if (q->properties.is_active) { 311 retval = remove_queue_mes(dqm, q, qpd); 312 if (retval) { 313 dev_err(dev, "%s: Failed to remove queue %d for dev %d", 314 __func__, 315 q->properties.queue_id, 316 dqm->dev->id); 317 return retval; 318 } 319 } 320 } 321 } 322 323 return retval; 324 } 325 326 static int add_all_kfd_queues_mes(struct device_queue_manager *dqm) 327 { 328 struct device_process_node *cur; 329 struct device *dev = dqm->dev->adev->dev; 330 struct qcm_process_device *qpd; 331 struct queue *q; 332 int retval = 0; 333 334 list_for_each_entry(cur, &dqm->queues, list) { 335 qpd = cur->qpd; 336 list_for_each_entry(q, &qpd->queues_list, list) { 337 if (!q->properties.is_active) 338 continue; 339 retval = add_queue_mes(dqm, q, qpd); 340 if (retval) { 341 dev_err(dev, "%s: Failed to add queue %d for dev %d", 342 __func__, 343 q->properties.queue_id, 344 dqm->dev->id); 345 return retval; 346 } 347 } 348 } 349 350 return retval; 351 } 352 353 static int suspend_all_queues_mes(struct device_queue_manager *dqm) 354 { 355 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev; 356 int r = 0; 357 358 if (!down_read_trylock(&adev->reset_domain->sem)) 359 return -EIO; 360 361 r = amdgpu_mes_suspend(adev); 362 up_read(&adev->reset_domain->sem); 363 364 if (r) { 365 dev_err(adev->dev, "failed to suspend gangs from MES\n"); 366 dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n"); 367 kfd_hws_hang(dqm); 368 } 369 370 return r; 371 } 372 373 static int resume_all_queues_mes(struct device_queue_manager *dqm) 374 { 375 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev; 376 int r = 0; 377 378 if (!down_read_trylock(&adev->reset_domain->sem)) 379 return -EIO; 380 381 r = amdgpu_mes_resume(adev); 382 up_read(&adev->reset_domain->sem); 383 384 if (r) { 385 dev_err(adev->dev, "failed to resume gangs from MES\n"); 386 dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n"); 387 kfd_hws_hang(dqm); 388 } 389 390 return r; 391 } 392 393 static void increment_queue_count(struct device_queue_manager *dqm, 394 struct qcm_process_device *qpd, 395 struct queue *q) 396 { 397 dqm->active_queue_count++; 398 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || 399 q->properties.type == KFD_QUEUE_TYPE_DIQ) 400 dqm->active_cp_queue_count++; 401 402 if (q->properties.is_gws) { 403 dqm->gws_queue_count++; 404 qpd->mapped_gws_queue = true; 405 } 406 } 407 408 static void decrement_queue_count(struct device_queue_manager *dqm, 409 struct qcm_process_device *qpd, 410 struct queue *q) 411 { 412 dqm->active_queue_count--; 413 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || 414 q->properties.type == KFD_QUEUE_TYPE_DIQ) 415 dqm->active_cp_queue_count--; 416 417 if (q->properties.is_gws) { 418 dqm->gws_queue_count--; 419 qpd->mapped_gws_queue = false; 420 } 421 } 422 423 /* 424 * Allocate a doorbell ID to this queue. 425 * If doorbell_id is passed in, make sure requested ID is valid then allocate it. 426 */ 427 static int allocate_doorbell(struct qcm_process_device *qpd, 428 struct queue *q, 429 uint32_t const *restore_id) 430 { 431 struct kfd_node *dev = qpd->dqm->dev; 432 433 if (!KFD_IS_SOC15(dev)) { 434 /* On pre-SOC15 chips we need to use the queue ID to 435 * preserve the user mode ABI. 436 */ 437 438 if (restore_id && *restore_id != q->properties.queue_id) 439 return -EINVAL; 440 441 q->doorbell_id = q->properties.queue_id; 442 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA || 443 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { 444 /* For SDMA queues on SOC15 with 8-byte doorbell, use static 445 * doorbell assignments based on the engine and queue id. 446 * The doobell index distance between RLC (2*i) and (2*i+1) 447 * for a SDMA engine is 512. 448 */ 449 450 uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx; 451 452 /* 453 * q->properties.sdma_engine_id corresponds to the virtual 454 * sdma engine number. However, for doorbell allocation, 455 * we need the physical sdma engine id in order to get the 456 * correct doorbell offset. 457 */ 458 uint32_t valid_id = idx_offset[qpd->dqm->dev->node_id * 459 get_num_all_sdma_engines(qpd->dqm) + 460 q->properties.sdma_engine_id] 461 + (q->properties.sdma_queue_id & 1) 462 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET 463 + (q->properties.sdma_queue_id >> 1); 464 465 if (restore_id && *restore_id != valid_id) 466 return -EINVAL; 467 q->doorbell_id = valid_id; 468 } else { 469 /* For CP queues on SOC15 */ 470 if (restore_id) { 471 /* make sure that ID is free */ 472 if (__test_and_set_bit(*restore_id, qpd->doorbell_bitmap)) 473 return -EINVAL; 474 475 q->doorbell_id = *restore_id; 476 } else { 477 /* or reserve a free doorbell ID */ 478 unsigned int found; 479 480 found = find_first_zero_bit(qpd->doorbell_bitmap, 481 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); 482 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { 483 pr_debug("No doorbells available"); 484 return -EBUSY; 485 } 486 set_bit(found, qpd->doorbell_bitmap); 487 q->doorbell_id = found; 488 } 489 } 490 491 q->properties.doorbell_off = amdgpu_doorbell_index_on_bar(dev->adev, 492 qpd->proc_doorbells, 493 q->doorbell_id, 494 dev->kfd->device_info.doorbell_size); 495 return 0; 496 } 497 498 static void deallocate_doorbell(struct qcm_process_device *qpd, 499 struct queue *q) 500 { 501 unsigned int old; 502 struct kfd_node *dev = qpd->dqm->dev; 503 504 if (!KFD_IS_SOC15(dev) || 505 q->properties.type == KFD_QUEUE_TYPE_SDMA || 506 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) 507 return; 508 509 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap); 510 WARN_ON(!old); 511 } 512 513 static void program_trap_handler_settings(struct device_queue_manager *dqm, 514 struct qcm_process_device *qpd) 515 { 516 uint32_t xcc_mask = dqm->dev->xcc_mask; 517 int xcc_id; 518 519 if (dqm->dev->kfd2kgd->program_trap_handler_settings) 520 for_each_inst(xcc_id, xcc_mask) 521 dqm->dev->kfd2kgd->program_trap_handler_settings( 522 dqm->dev->adev, qpd->vmid, qpd->tba_addr, 523 qpd->tma_addr, xcc_id); 524 } 525 526 static int allocate_vmid(struct device_queue_manager *dqm, 527 struct qcm_process_device *qpd, 528 struct queue *q) 529 { 530 struct device *dev = dqm->dev->adev->dev; 531 int allocated_vmid = -1, i; 532 533 for (i = dqm->dev->vm_info.first_vmid_kfd; 534 i <= dqm->dev->vm_info.last_vmid_kfd; i++) { 535 if (!dqm->vmid_pasid[i]) { 536 allocated_vmid = i; 537 break; 538 } 539 } 540 541 if (allocated_vmid < 0) { 542 dev_err(dev, "no more vmid to allocate\n"); 543 return -ENOSPC; 544 } 545 546 pr_debug("vmid allocated: %d\n", allocated_vmid); 547 548 dqm->vmid_pasid[allocated_vmid] = q->process->pasid; 549 550 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid); 551 552 qpd->vmid = allocated_vmid; 553 q->properties.vmid = allocated_vmid; 554 555 program_sh_mem_settings(dqm, qpd); 556 557 if (KFD_IS_SOC15(dqm->dev) && dqm->dev->kfd->cwsr_enabled) 558 program_trap_handler_settings(dqm, qpd); 559 560 /* qpd->page_table_base is set earlier when register_process() 561 * is called, i.e. when the first queue is created. 562 */ 563 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev, 564 qpd->vmid, 565 qpd->page_table_base); 566 /* invalidate the VM context after pasid and vmid mapping is set up */ 567 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY); 568 569 if (dqm->dev->kfd2kgd->set_scratch_backing_va) 570 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev, 571 qpd->sh_hidden_private_base, qpd->vmid); 572 573 return 0; 574 } 575 576 static int flush_texture_cache_nocpsch(struct kfd_node *kdev, 577 struct qcm_process_device *qpd) 578 { 579 const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf; 580 int ret; 581 582 if (!qpd->ib_kaddr) 583 return -ENOMEM; 584 585 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr); 586 if (ret) 587 return ret; 588 589 return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid, 590 qpd->ib_base, (uint32_t *)qpd->ib_kaddr, 591 pmf->release_mem_size / sizeof(uint32_t)); 592 } 593 594 static void deallocate_vmid(struct device_queue_manager *dqm, 595 struct qcm_process_device *qpd, 596 struct queue *q) 597 { 598 struct device *dev = dqm->dev->adev->dev; 599 600 /* On GFX v7, CP doesn't flush TC at dequeue */ 601 if (q->device->adev->asic_type == CHIP_HAWAII) 602 if (flush_texture_cache_nocpsch(q->device, qpd)) 603 dev_err(dev, "Failed to flush TC\n"); 604 605 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY); 606 607 /* Release the vmid mapping */ 608 set_pasid_vmid_mapping(dqm, 0, qpd->vmid); 609 dqm->vmid_pasid[qpd->vmid] = 0; 610 611 qpd->vmid = 0; 612 q->properties.vmid = 0; 613 } 614 615 static int create_queue_nocpsch(struct device_queue_manager *dqm, 616 struct queue *q, 617 struct qcm_process_device *qpd, 618 const struct kfd_criu_queue_priv_data *qd, 619 const void *restore_mqd, const void *restore_ctl_stack) 620 { 621 struct mqd_manager *mqd_mgr; 622 int retval; 623 624 dqm_lock(dqm); 625 626 if (dqm->total_queue_count >= max_num_of_queues_per_device) { 627 pr_warn("Can't create new usermode queue because %d queues were already created\n", 628 dqm->total_queue_count); 629 retval = -EPERM; 630 goto out_unlock; 631 } 632 633 if (list_empty(&qpd->queues_list)) { 634 retval = allocate_vmid(dqm, qpd, q); 635 if (retval) 636 goto out_unlock; 637 } 638 q->properties.vmid = qpd->vmid; 639 /* 640 * Eviction state logic: mark all queues as evicted, even ones 641 * not currently active. Restoring inactive queues later only 642 * updates the is_evicted flag but is a no-op otherwise. 643 */ 644 q->properties.is_evicted = !!qpd->evicted; 645 646 q->properties.tba_addr = qpd->tba_addr; 647 q->properties.tma_addr = qpd->tma_addr; 648 649 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 650 q->properties.type)]; 651 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { 652 retval = allocate_hqd(dqm, q); 653 if (retval) 654 goto deallocate_vmid; 655 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n", 656 q->pipe, q->queue); 657 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA || 658 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { 659 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL); 660 if (retval) 661 goto deallocate_vmid; 662 dqm->asic_ops.init_sdma_vm(dqm, q, qpd); 663 } 664 665 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL); 666 if (retval) 667 goto out_deallocate_hqd; 668 669 /* Temporarily release dqm lock to avoid a circular lock dependency */ 670 dqm_unlock(dqm); 671 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties); 672 dqm_lock(dqm); 673 674 if (!q->mqd_mem_obj) { 675 retval = -ENOMEM; 676 goto out_deallocate_doorbell; 677 } 678 679 if (qd) 680 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, 681 &q->properties, restore_mqd, restore_ctl_stack, 682 qd->ctl_stack_size); 683 else 684 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, 685 &q->gart_mqd_addr, &q->properties); 686 687 if (q->properties.is_active) { 688 if (!dqm->sched_running) { 689 WARN_ONCE(1, "Load non-HWS mqd while stopped\n"); 690 goto add_queue_to_list; 691 } 692 693 if (WARN(q->process->mm != current->mm, 694 "should only run in user thread")) 695 retval = -EFAULT; 696 else 697 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, 698 q->queue, &q->properties, current->mm); 699 if (retval) 700 goto out_free_mqd; 701 } 702 703 add_queue_to_list: 704 list_add(&q->list, &qpd->queues_list); 705 qpd->queue_count++; 706 if (q->properties.is_active) 707 increment_queue_count(dqm, qpd, q); 708 709 /* 710 * Unconditionally increment this counter, regardless of the queue's 711 * type or whether the queue is active. 712 */ 713 dqm->total_queue_count++; 714 pr_debug("Total of %d queues are accountable so far\n", 715 dqm->total_queue_count); 716 goto out_unlock; 717 718 out_free_mqd: 719 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 720 out_deallocate_doorbell: 721 deallocate_doorbell(qpd, q); 722 out_deallocate_hqd: 723 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) 724 deallocate_hqd(dqm, q); 725 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA || 726 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) 727 deallocate_sdma_queue(dqm, q); 728 deallocate_vmid: 729 if (list_empty(&qpd->queues_list)) 730 deallocate_vmid(dqm, qpd, q); 731 out_unlock: 732 dqm_unlock(dqm); 733 return retval; 734 } 735 736 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) 737 { 738 bool set; 739 int pipe, bit, i; 740 741 set = false; 742 743 for (pipe = dqm->next_pipe_to_allocate, i = 0; 744 i < get_pipes_per_mec(dqm); 745 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) { 746 747 if (!is_pipe_enabled(dqm, 0, pipe)) 748 continue; 749 750 if (dqm->allocated_queues[pipe] != 0) { 751 bit = ffs(dqm->allocated_queues[pipe]) - 1; 752 dqm->allocated_queues[pipe] &= ~(1 << bit); 753 q->pipe = pipe; 754 q->queue = bit; 755 set = true; 756 break; 757 } 758 } 759 760 if (!set) 761 return -EBUSY; 762 763 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue); 764 /* horizontal hqd allocation */ 765 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm); 766 767 return 0; 768 } 769 770 static inline void deallocate_hqd(struct device_queue_manager *dqm, 771 struct queue *q) 772 { 773 dqm->allocated_queues[q->pipe] |= (1 << q->queue); 774 } 775 776 #define SQ_IND_CMD_CMD_KILL 0x00000003 777 #define SQ_IND_CMD_MODE_BROADCAST 0x00000001 778 779 static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p) 780 { 781 int status = 0; 782 unsigned int vmid; 783 uint16_t queried_pasid; 784 union SQ_CMD_BITS reg_sq_cmd; 785 union GRBM_GFX_INDEX_BITS reg_gfx_index; 786 struct kfd_process_device *pdd; 787 int first_vmid_to_scan = dev->vm_info.first_vmid_kfd; 788 int last_vmid_to_scan = dev->vm_info.last_vmid_kfd; 789 uint32_t xcc_mask = dev->xcc_mask; 790 int xcc_id; 791 792 reg_sq_cmd.u32All = 0; 793 reg_gfx_index.u32All = 0; 794 795 pr_debug("Killing all process wavefronts\n"); 796 797 if (!dev->kfd2kgd->get_atc_vmid_pasid_mapping_info) { 798 dev_err(dev->adev->dev, "no vmid pasid mapping supported\n"); 799 return -EOPNOTSUPP; 800 } 801 802 /* Scan all registers in the range ATC_VMID8_PASID_MAPPING .. 803 * ATC_VMID15_PASID_MAPPING 804 * to check which VMID the current process is mapped to. 805 */ 806 807 for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) { 808 status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info 809 (dev->adev, vmid, &queried_pasid); 810 811 if (status && queried_pasid == p->pasid) { 812 pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n", 813 vmid, p->pasid); 814 break; 815 } 816 } 817 818 if (vmid > last_vmid_to_scan) { 819 dev_err(dev->adev->dev, "Didn't find vmid for pasid 0x%x\n", p->pasid); 820 return -EFAULT; 821 } 822 823 /* taking the VMID for that process on the safe way using PDD */ 824 pdd = kfd_get_process_device_data(dev, p); 825 if (!pdd) 826 return -EFAULT; 827 828 reg_gfx_index.bits.sh_broadcast_writes = 1; 829 reg_gfx_index.bits.se_broadcast_writes = 1; 830 reg_gfx_index.bits.instance_broadcast_writes = 1; 831 reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST; 832 reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL; 833 reg_sq_cmd.bits.vm_id = vmid; 834 835 for_each_inst(xcc_id, xcc_mask) 836 dev->kfd2kgd->wave_control_execute( 837 dev->adev, reg_gfx_index.u32All, 838 reg_sq_cmd.u32All, xcc_id); 839 840 return 0; 841 } 842 843 /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked 844 * to avoid asynchronized access 845 */ 846 static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, 847 struct qcm_process_device *qpd, 848 struct queue *q) 849 { 850 int retval; 851 struct mqd_manager *mqd_mgr; 852 853 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 854 q->properties.type)]; 855 856 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) 857 deallocate_hqd(dqm, q); 858 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) 859 deallocate_sdma_queue(dqm, q); 860 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) 861 deallocate_sdma_queue(dqm, q); 862 else { 863 pr_debug("q->properties.type %d is invalid\n", 864 q->properties.type); 865 return -EINVAL; 866 } 867 dqm->total_queue_count--; 868 869 deallocate_doorbell(qpd, q); 870 871 if (!dqm->sched_running) { 872 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n"); 873 return 0; 874 } 875 876 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, 877 KFD_PREEMPT_TYPE_WAVEFRONT_RESET, 878 KFD_UNMAP_LATENCY_MS, 879 q->pipe, q->queue); 880 if (retval == -ETIME) 881 qpd->reset_wavefronts = true; 882 883 list_del(&q->list); 884 if (list_empty(&qpd->queues_list)) { 885 if (qpd->reset_wavefronts) { 886 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n", 887 dqm->dev); 888 /* dbgdev_wave_reset_wavefronts has to be called before 889 * deallocate_vmid(), i.e. when vmid is still in use. 890 */ 891 dbgdev_wave_reset_wavefronts(dqm->dev, 892 qpd->pqm->process); 893 qpd->reset_wavefronts = false; 894 } 895 896 deallocate_vmid(dqm, qpd, q); 897 } 898 qpd->queue_count--; 899 if (q->properties.is_active) 900 decrement_queue_count(dqm, qpd, q); 901 902 return retval; 903 } 904 905 static int destroy_queue_nocpsch(struct device_queue_manager *dqm, 906 struct qcm_process_device *qpd, 907 struct queue *q) 908 { 909 int retval; 910 uint64_t sdma_val = 0; 911 struct device *dev = dqm->dev->adev->dev; 912 struct kfd_process_device *pdd = qpd_to_pdd(qpd); 913 struct mqd_manager *mqd_mgr = 914 dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)]; 915 916 /* Get the SDMA queue stats */ 917 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) || 918 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { 919 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr, 920 &sdma_val); 921 if (retval) 922 dev_err(dev, "Failed to read SDMA queue counter for queue: %d\n", 923 q->properties.queue_id); 924 } 925 926 dqm_lock(dqm); 927 retval = destroy_queue_nocpsch_locked(dqm, qpd, q); 928 if (!retval) 929 pdd->sdma_past_activity_counter += sdma_val; 930 dqm_unlock(dqm); 931 932 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 933 934 return retval; 935 } 936 937 static int update_queue(struct device_queue_manager *dqm, struct queue *q, 938 struct mqd_update_info *minfo) 939 { 940 int retval = 0; 941 struct device *dev = dqm->dev->adev->dev; 942 struct mqd_manager *mqd_mgr; 943 struct kfd_process_device *pdd; 944 bool prev_active = false; 945 946 dqm_lock(dqm); 947 pdd = kfd_get_process_device_data(q->device, q->process); 948 if (!pdd) { 949 retval = -ENODEV; 950 goto out_unlock; 951 } 952 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 953 q->properties.type)]; 954 955 /* Save previous activity state for counters */ 956 prev_active = q->properties.is_active; 957 958 /* Make sure the queue is unmapped before updating the MQD */ 959 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { 960 if (!dqm->dev->kfd->shared_resources.enable_mes) 961 retval = unmap_queues_cpsch(dqm, 962 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); 963 else if (prev_active) 964 retval = remove_queue_mes(dqm, q, &pdd->qpd); 965 966 /* queue is reset so inaccessable */ 967 if (pdd->has_reset_queue) { 968 retval = -EACCES; 969 goto out_unlock; 970 } 971 972 if (retval) { 973 dev_err(dev, "unmap queue failed\n"); 974 goto out_unlock; 975 } 976 } else if (prev_active && 977 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || 978 q->properties.type == KFD_QUEUE_TYPE_SDMA || 979 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { 980 981 if (!dqm->sched_running) { 982 WARN_ONCE(1, "Update non-HWS queue while stopped\n"); 983 goto out_unlock; 984 } 985 986 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, 987 (dqm->dev->kfd->cwsr_enabled ? 988 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE : 989 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN), 990 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); 991 if (retval) { 992 dev_err(dev, "destroy mqd failed\n"); 993 goto out_unlock; 994 } 995 } 996 997 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo); 998 999 /* 1000 * check active state vs. the previous state and modify 1001 * counter accordingly. map_queues_cpsch uses the 1002 * dqm->active_queue_count to determine whether a new runlist must be 1003 * uploaded. 1004 */ 1005 if (q->properties.is_active && !prev_active) { 1006 increment_queue_count(dqm, &pdd->qpd, q); 1007 } else if (!q->properties.is_active && prev_active) { 1008 decrement_queue_count(dqm, &pdd->qpd, q); 1009 } else if (q->gws && !q->properties.is_gws) { 1010 if (q->properties.is_active) { 1011 dqm->gws_queue_count++; 1012 pdd->qpd.mapped_gws_queue = true; 1013 } 1014 q->properties.is_gws = true; 1015 } else if (!q->gws && q->properties.is_gws) { 1016 if (q->properties.is_active) { 1017 dqm->gws_queue_count--; 1018 pdd->qpd.mapped_gws_queue = false; 1019 } 1020 q->properties.is_gws = false; 1021 } 1022 1023 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { 1024 if (!dqm->dev->kfd->shared_resources.enable_mes) 1025 retval = map_queues_cpsch(dqm); 1026 else if (q->properties.is_active) 1027 retval = add_queue_mes(dqm, q, &pdd->qpd); 1028 } else if (q->properties.is_active && 1029 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || 1030 q->properties.type == KFD_QUEUE_TYPE_SDMA || 1031 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { 1032 if (WARN(q->process->mm != current->mm, 1033 "should only run in user thread")) 1034 retval = -EFAULT; 1035 else 1036 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 1037 q->pipe, q->queue, 1038 &q->properties, current->mm); 1039 } 1040 1041 out_unlock: 1042 dqm_unlock(dqm); 1043 return retval; 1044 } 1045 1046 /* suspend_single_queue does not lock the dqm like the 1047 * evict_process_queues_cpsch or evict_process_queues_nocpsch. You should 1048 * lock the dqm before calling, and unlock after calling. 1049 * 1050 * The reason we don't lock the dqm is because this function may be 1051 * called on multiple queues in a loop, so rather than locking/unlocking 1052 * multiple times, we will just keep the dqm locked for all of the calls. 1053 */ 1054 static int suspend_single_queue(struct device_queue_manager *dqm, 1055 struct kfd_process_device *pdd, 1056 struct queue *q) 1057 { 1058 bool is_new; 1059 1060 if (q->properties.is_suspended) 1061 return 0; 1062 1063 pr_debug("Suspending PASID %u queue [%i]\n", 1064 pdd->process->pasid, 1065 q->properties.queue_id); 1066 1067 is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW); 1068 1069 if (is_new || q->properties.is_being_destroyed) { 1070 pr_debug("Suspend: skip %s queue id %i\n", 1071 is_new ? "new" : "destroyed", 1072 q->properties.queue_id); 1073 return -EBUSY; 1074 } 1075 1076 q->properties.is_suspended = true; 1077 if (q->properties.is_active) { 1078 if (dqm->dev->kfd->shared_resources.enable_mes) { 1079 int r = remove_queue_mes(dqm, q, &pdd->qpd); 1080 1081 if (r) 1082 return r; 1083 } 1084 1085 decrement_queue_count(dqm, &pdd->qpd, q); 1086 q->properties.is_active = false; 1087 } 1088 1089 return 0; 1090 } 1091 1092 /* resume_single_queue does not lock the dqm like the functions 1093 * restore_process_queues_cpsch or restore_process_queues_nocpsch. You should 1094 * lock the dqm before calling, and unlock after calling. 1095 * 1096 * The reason we don't lock the dqm is because this function may be 1097 * called on multiple queues in a loop, so rather than locking/unlocking 1098 * multiple times, we will just keep the dqm locked for all of the calls. 1099 */ 1100 static int resume_single_queue(struct device_queue_manager *dqm, 1101 struct qcm_process_device *qpd, 1102 struct queue *q) 1103 { 1104 struct kfd_process_device *pdd; 1105 1106 if (!q->properties.is_suspended) 1107 return 0; 1108 1109 pdd = qpd_to_pdd(qpd); 1110 1111 pr_debug("Restoring from suspend PASID %u queue [%i]\n", 1112 pdd->process->pasid, 1113 q->properties.queue_id); 1114 1115 q->properties.is_suspended = false; 1116 1117 if (QUEUE_IS_ACTIVE(q->properties)) { 1118 if (dqm->dev->kfd->shared_resources.enable_mes) { 1119 int r = add_queue_mes(dqm, q, &pdd->qpd); 1120 1121 if (r) 1122 return r; 1123 } 1124 1125 q->properties.is_active = true; 1126 increment_queue_count(dqm, qpd, q); 1127 } 1128 1129 return 0; 1130 } 1131 1132 static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, 1133 struct qcm_process_device *qpd) 1134 { 1135 struct queue *q; 1136 struct mqd_manager *mqd_mgr; 1137 struct kfd_process_device *pdd; 1138 int retval, ret = 0; 1139 1140 dqm_lock(dqm); 1141 if (qpd->evicted++ > 0) /* already evicted, do nothing */ 1142 goto out; 1143 1144 pdd = qpd_to_pdd(qpd); 1145 pr_debug_ratelimited("Evicting PASID 0x%x queues\n", 1146 pdd->process->pasid); 1147 1148 pdd->last_evict_timestamp = get_jiffies_64(); 1149 /* Mark all queues as evicted. Deactivate all active queues on 1150 * the qpd. 1151 */ 1152 list_for_each_entry(q, &qpd->queues_list, list) { 1153 q->properties.is_evicted = true; 1154 if (!q->properties.is_active) 1155 continue; 1156 1157 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 1158 q->properties.type)]; 1159 q->properties.is_active = false; 1160 decrement_queue_count(dqm, qpd, q); 1161 1162 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n")) 1163 continue; 1164 1165 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, 1166 (dqm->dev->kfd->cwsr_enabled ? 1167 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE : 1168 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN), 1169 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); 1170 if (retval && !ret) 1171 /* Return the first error, but keep going to 1172 * maintain a consistent eviction state 1173 */ 1174 ret = retval; 1175 } 1176 1177 out: 1178 dqm_unlock(dqm); 1179 return ret; 1180 } 1181 1182 static int evict_process_queues_cpsch(struct device_queue_manager *dqm, 1183 struct qcm_process_device *qpd) 1184 { 1185 struct queue *q; 1186 struct device *dev = dqm->dev->adev->dev; 1187 struct kfd_process_device *pdd; 1188 int retval = 0; 1189 1190 dqm_lock(dqm); 1191 if (qpd->evicted++ > 0) /* already evicted, do nothing */ 1192 goto out; 1193 1194 pdd = qpd_to_pdd(qpd); 1195 1196 /* The debugger creates processes that temporarily have not acquired 1197 * all VMs for all devices and has no VMs itself. 1198 * Skip queue eviction on process eviction. 1199 */ 1200 if (!pdd->drm_priv) 1201 goto out; 1202 1203 pr_debug_ratelimited("Evicting PASID 0x%x queues\n", 1204 pdd->process->pasid); 1205 1206 /* Mark all queues as evicted. Deactivate all active queues on 1207 * the qpd. 1208 */ 1209 list_for_each_entry(q, &qpd->queues_list, list) { 1210 q->properties.is_evicted = true; 1211 if (!q->properties.is_active) 1212 continue; 1213 1214 q->properties.is_active = false; 1215 decrement_queue_count(dqm, qpd, q); 1216 1217 if (dqm->dev->kfd->shared_resources.enable_mes) { 1218 retval = remove_queue_mes(dqm, q, qpd); 1219 if (retval) { 1220 dev_err(dev, "Failed to evict queue %d\n", 1221 q->properties.queue_id); 1222 goto out; 1223 } 1224 } 1225 } 1226 pdd->last_evict_timestamp = get_jiffies_64(); 1227 if (!dqm->dev->kfd->shared_resources.enable_mes) 1228 retval = execute_queues_cpsch(dqm, 1229 qpd->is_debug ? 1230 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : 1231 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, 1232 USE_DEFAULT_GRACE_PERIOD); 1233 1234 out: 1235 dqm_unlock(dqm); 1236 return retval; 1237 } 1238 1239 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, 1240 struct qcm_process_device *qpd) 1241 { 1242 struct mm_struct *mm = NULL; 1243 struct queue *q; 1244 struct mqd_manager *mqd_mgr; 1245 struct kfd_process_device *pdd; 1246 uint64_t pd_base; 1247 uint64_t eviction_duration; 1248 int retval, ret = 0; 1249 1250 pdd = qpd_to_pdd(qpd); 1251 /* Retrieve PD base */ 1252 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv); 1253 1254 dqm_lock(dqm); 1255 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ 1256 goto out; 1257 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ 1258 qpd->evicted--; 1259 goto out; 1260 } 1261 1262 pr_debug_ratelimited("Restoring PASID 0x%x queues\n", 1263 pdd->process->pasid); 1264 1265 /* Update PD Base in QPD */ 1266 qpd->page_table_base = pd_base; 1267 pr_debug("Updated PD address to 0x%llx\n", pd_base); 1268 1269 if (!list_empty(&qpd->queues_list)) { 1270 dqm->dev->kfd2kgd->set_vm_context_page_table_base( 1271 dqm->dev->adev, 1272 qpd->vmid, 1273 qpd->page_table_base); 1274 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY); 1275 } 1276 1277 /* Take a safe reference to the mm_struct, which may otherwise 1278 * disappear even while the kfd_process is still referenced. 1279 */ 1280 mm = get_task_mm(pdd->process->lead_thread); 1281 if (!mm) { 1282 ret = -EFAULT; 1283 goto out; 1284 } 1285 1286 /* Remove the eviction flags. Activate queues that are not 1287 * inactive for other reasons. 1288 */ 1289 list_for_each_entry(q, &qpd->queues_list, list) { 1290 q->properties.is_evicted = false; 1291 if (!QUEUE_IS_ACTIVE(q->properties)) 1292 continue; 1293 1294 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 1295 q->properties.type)]; 1296 q->properties.is_active = true; 1297 increment_queue_count(dqm, qpd, q); 1298 1299 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n")) 1300 continue; 1301 1302 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, 1303 q->queue, &q->properties, mm); 1304 if (retval && !ret) 1305 /* Return the first error, but keep going to 1306 * maintain a consistent eviction state 1307 */ 1308 ret = retval; 1309 } 1310 qpd->evicted = 0; 1311 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp; 1312 atomic64_add(eviction_duration, &pdd->evict_duration_counter); 1313 out: 1314 if (mm) 1315 mmput(mm); 1316 dqm_unlock(dqm); 1317 return ret; 1318 } 1319 1320 static int restore_process_queues_cpsch(struct device_queue_manager *dqm, 1321 struct qcm_process_device *qpd) 1322 { 1323 struct queue *q; 1324 struct device *dev = dqm->dev->adev->dev; 1325 struct kfd_process_device *pdd; 1326 uint64_t eviction_duration; 1327 int retval = 0; 1328 1329 pdd = qpd_to_pdd(qpd); 1330 1331 dqm_lock(dqm); 1332 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ 1333 goto out; 1334 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ 1335 qpd->evicted--; 1336 goto out; 1337 } 1338 1339 /* The debugger creates processes that temporarily have not acquired 1340 * all VMs for all devices and has no VMs itself. 1341 * Skip queue restore on process restore. 1342 */ 1343 if (!pdd->drm_priv) 1344 goto vm_not_acquired; 1345 1346 pr_debug_ratelimited("Restoring PASID 0x%x queues\n", 1347 pdd->process->pasid); 1348 1349 /* Update PD Base in QPD */ 1350 qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv); 1351 pr_debug("Updated PD address to 0x%llx\n", qpd->page_table_base); 1352 1353 /* activate all active queues on the qpd */ 1354 list_for_each_entry(q, &qpd->queues_list, list) { 1355 q->properties.is_evicted = false; 1356 if (!QUEUE_IS_ACTIVE(q->properties)) 1357 continue; 1358 1359 q->properties.is_active = true; 1360 increment_queue_count(dqm, &pdd->qpd, q); 1361 1362 if (dqm->dev->kfd->shared_resources.enable_mes) { 1363 retval = add_queue_mes(dqm, q, qpd); 1364 if (retval) { 1365 dev_err(dev, "Failed to restore queue %d\n", 1366 q->properties.queue_id); 1367 goto out; 1368 } 1369 } 1370 } 1371 if (!dqm->dev->kfd->shared_resources.enable_mes) 1372 retval = execute_queues_cpsch(dqm, 1373 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); 1374 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp; 1375 atomic64_add(eviction_duration, &pdd->evict_duration_counter); 1376 vm_not_acquired: 1377 qpd->evicted = 0; 1378 out: 1379 dqm_unlock(dqm); 1380 return retval; 1381 } 1382 1383 static int register_process(struct device_queue_manager *dqm, 1384 struct qcm_process_device *qpd) 1385 { 1386 struct device_process_node *n; 1387 struct kfd_process_device *pdd; 1388 uint64_t pd_base; 1389 int retval; 1390 1391 n = kzalloc(sizeof(*n), GFP_KERNEL); 1392 if (!n) 1393 return -ENOMEM; 1394 1395 n->qpd = qpd; 1396 1397 pdd = qpd_to_pdd(qpd); 1398 /* Retrieve PD base */ 1399 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv); 1400 1401 dqm_lock(dqm); 1402 list_add(&n->list, &dqm->queues); 1403 1404 /* Update PD Base in QPD */ 1405 qpd->page_table_base = pd_base; 1406 pr_debug("Updated PD address to 0x%llx\n", pd_base); 1407 1408 retval = dqm->asic_ops.update_qpd(dqm, qpd); 1409 1410 dqm->processes_count++; 1411 1412 dqm_unlock(dqm); 1413 1414 /* Outside the DQM lock because under the DQM lock we can't do 1415 * reclaim or take other locks that others hold while reclaiming. 1416 */ 1417 kfd_inc_compute_active(dqm->dev); 1418 1419 return retval; 1420 } 1421 1422 static int unregister_process(struct device_queue_manager *dqm, 1423 struct qcm_process_device *qpd) 1424 { 1425 int retval; 1426 struct device_process_node *cur, *next; 1427 1428 pr_debug("qpd->queues_list is %s\n", 1429 list_empty(&qpd->queues_list) ? "empty" : "not empty"); 1430 1431 retval = 0; 1432 dqm_lock(dqm); 1433 1434 list_for_each_entry_safe(cur, next, &dqm->queues, list) { 1435 if (qpd == cur->qpd) { 1436 list_del(&cur->list); 1437 kfree(cur); 1438 dqm->processes_count--; 1439 goto out; 1440 } 1441 } 1442 /* qpd not found in dqm list */ 1443 retval = 1; 1444 out: 1445 dqm_unlock(dqm); 1446 1447 /* Outside the DQM lock because under the DQM lock we can't do 1448 * reclaim or take other locks that others hold while reclaiming. 1449 */ 1450 if (!retval) 1451 kfd_dec_compute_active(dqm->dev); 1452 1453 return retval; 1454 } 1455 1456 static int 1457 set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, 1458 unsigned int vmid) 1459 { 1460 uint32_t xcc_mask = dqm->dev->xcc_mask; 1461 int xcc_id, ret; 1462 1463 for_each_inst(xcc_id, xcc_mask) { 1464 ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping( 1465 dqm->dev->adev, pasid, vmid, xcc_id); 1466 if (ret) 1467 break; 1468 } 1469 1470 return ret; 1471 } 1472 1473 static void init_interrupts(struct device_queue_manager *dqm) 1474 { 1475 uint32_t xcc_mask = dqm->dev->xcc_mask; 1476 unsigned int i, xcc_id; 1477 1478 for_each_inst(xcc_id, xcc_mask) { 1479 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) { 1480 if (is_pipe_enabled(dqm, 0, i)) { 1481 dqm->dev->kfd2kgd->init_interrupts( 1482 dqm->dev->adev, i, xcc_id); 1483 } 1484 } 1485 } 1486 } 1487 1488 static int initialize_nocpsch(struct device_queue_manager *dqm) 1489 { 1490 int pipe, queue; 1491 1492 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm)); 1493 1494 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm), 1495 sizeof(unsigned int), GFP_KERNEL); 1496 if (!dqm->allocated_queues) 1497 return -ENOMEM; 1498 1499 mutex_init(&dqm->lock_hidden); 1500 INIT_LIST_HEAD(&dqm->queues); 1501 dqm->active_queue_count = dqm->next_pipe_to_allocate = 0; 1502 dqm->active_cp_queue_count = 0; 1503 dqm->gws_queue_count = 0; 1504 1505 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { 1506 int pipe_offset = pipe * get_queues_per_pipe(dqm); 1507 1508 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) 1509 if (test_bit(pipe_offset + queue, 1510 dqm->dev->kfd->shared_resources.cp_queue_bitmap)) 1511 dqm->allocated_queues[pipe] |= 1 << queue; 1512 } 1513 1514 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid)); 1515 1516 init_sdma_bitmaps(dqm); 1517 1518 return 0; 1519 } 1520 1521 static void uninitialize(struct device_queue_manager *dqm) 1522 { 1523 int i; 1524 1525 WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0); 1526 1527 kfree(dqm->allocated_queues); 1528 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) 1529 kfree(dqm->mqd_mgrs[i]); 1530 mutex_destroy(&dqm->lock_hidden); 1531 } 1532 1533 static int start_nocpsch(struct device_queue_manager *dqm) 1534 { 1535 int r = 0; 1536 1537 pr_info("SW scheduler is used"); 1538 init_interrupts(dqm); 1539 1540 if (dqm->dev->adev->asic_type == CHIP_HAWAII) 1541 r = pm_init(&dqm->packet_mgr, dqm); 1542 if (!r) 1543 dqm->sched_running = true; 1544 1545 return r; 1546 } 1547 1548 static int stop_nocpsch(struct device_queue_manager *dqm) 1549 { 1550 dqm_lock(dqm); 1551 if (!dqm->sched_running) { 1552 dqm_unlock(dqm); 1553 return 0; 1554 } 1555 1556 if (dqm->dev->adev->asic_type == CHIP_HAWAII) 1557 pm_uninit(&dqm->packet_mgr); 1558 dqm->sched_running = false; 1559 dqm_unlock(dqm); 1560 1561 return 0; 1562 } 1563 1564 static int allocate_sdma_queue(struct device_queue_manager *dqm, 1565 struct queue *q, const uint32_t *restore_sdma_id) 1566 { 1567 struct device *dev = dqm->dev->adev->dev; 1568 int bit; 1569 1570 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { 1571 if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { 1572 dev_err(dev, "No more SDMA queue to allocate\n"); 1573 return -ENOMEM; 1574 } 1575 1576 if (restore_sdma_id) { 1577 /* Re-use existing sdma_id */ 1578 if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) { 1579 dev_err(dev, "SDMA queue already in use\n"); 1580 return -EBUSY; 1581 } 1582 clear_bit(*restore_sdma_id, dqm->sdma_bitmap); 1583 q->sdma_id = *restore_sdma_id; 1584 } else { 1585 /* Find first available sdma_id */ 1586 bit = find_first_bit(dqm->sdma_bitmap, 1587 get_num_sdma_queues(dqm)); 1588 clear_bit(bit, dqm->sdma_bitmap); 1589 q->sdma_id = bit; 1590 } 1591 1592 q->properties.sdma_engine_id = 1593 q->sdma_id % kfd_get_num_sdma_engines(dqm->dev); 1594 q->properties.sdma_queue_id = q->sdma_id / 1595 kfd_get_num_sdma_engines(dqm->dev); 1596 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { 1597 if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { 1598 dev_err(dev, "No more XGMI SDMA queue to allocate\n"); 1599 return -ENOMEM; 1600 } 1601 if (restore_sdma_id) { 1602 /* Re-use existing sdma_id */ 1603 if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) { 1604 dev_err(dev, "SDMA queue already in use\n"); 1605 return -EBUSY; 1606 } 1607 clear_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap); 1608 q->sdma_id = *restore_sdma_id; 1609 } else { 1610 bit = find_first_bit(dqm->xgmi_sdma_bitmap, 1611 get_num_xgmi_sdma_queues(dqm)); 1612 clear_bit(bit, dqm->xgmi_sdma_bitmap); 1613 q->sdma_id = bit; 1614 } 1615 /* sdma_engine_id is sdma id including 1616 * both PCIe-optimized SDMAs and XGMI- 1617 * optimized SDMAs. The calculation below 1618 * assumes the first N engines are always 1619 * PCIe-optimized ones 1620 */ 1621 q->properties.sdma_engine_id = 1622 kfd_get_num_sdma_engines(dqm->dev) + 1623 q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev); 1624 q->properties.sdma_queue_id = q->sdma_id / 1625 kfd_get_num_xgmi_sdma_engines(dqm->dev); 1626 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) { 1627 int i, num_queues, num_engines, eng_offset = 0, start_engine; 1628 bool free_bit_found = false, is_xgmi = false; 1629 1630 if (q->properties.sdma_engine_id < kfd_get_num_sdma_engines(dqm->dev)) { 1631 num_queues = get_num_sdma_queues(dqm); 1632 num_engines = kfd_get_num_sdma_engines(dqm->dev); 1633 q->properties.type = KFD_QUEUE_TYPE_SDMA; 1634 } else { 1635 num_queues = get_num_xgmi_sdma_queues(dqm); 1636 num_engines = kfd_get_num_xgmi_sdma_engines(dqm->dev); 1637 eng_offset = kfd_get_num_sdma_engines(dqm->dev); 1638 q->properties.type = KFD_QUEUE_TYPE_SDMA_XGMI; 1639 is_xgmi = true; 1640 } 1641 1642 /* Scan available bit based on target engine ID. */ 1643 start_engine = q->properties.sdma_engine_id - eng_offset; 1644 for (i = start_engine; i < num_queues; i += num_engines) { 1645 1646 if (!test_bit(i, is_xgmi ? dqm->xgmi_sdma_bitmap : dqm->sdma_bitmap)) 1647 continue; 1648 1649 clear_bit(i, is_xgmi ? dqm->xgmi_sdma_bitmap : dqm->sdma_bitmap); 1650 q->sdma_id = i; 1651 q->properties.sdma_queue_id = q->sdma_id / num_engines; 1652 free_bit_found = true; 1653 break; 1654 } 1655 1656 if (!free_bit_found) { 1657 dev_err(dev, "No more SDMA queue to allocate for target ID %i\n", 1658 q->properties.sdma_engine_id); 1659 return -ENOMEM; 1660 } 1661 } 1662 1663 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id); 1664 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id); 1665 1666 return 0; 1667 } 1668 1669 static void deallocate_sdma_queue(struct device_queue_manager *dqm, 1670 struct queue *q) 1671 { 1672 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { 1673 if (q->sdma_id >= get_num_sdma_queues(dqm)) 1674 return; 1675 set_bit(q->sdma_id, dqm->sdma_bitmap); 1676 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { 1677 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm)) 1678 return; 1679 set_bit(q->sdma_id, dqm->xgmi_sdma_bitmap); 1680 } 1681 } 1682 1683 /* 1684 * Device Queue Manager implementation for cp scheduler 1685 */ 1686 1687 static int set_sched_resources(struct device_queue_manager *dqm) 1688 { 1689 int i, mec; 1690 struct scheduling_resources res; 1691 struct device *dev = dqm->dev->adev->dev; 1692 1693 res.vmid_mask = dqm->dev->compute_vmid_bitmap; 1694 1695 res.queue_mask = 0; 1696 for (i = 0; i < AMDGPU_MAX_QUEUES; ++i) { 1697 mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe) 1698 / dqm->dev->kfd->shared_resources.num_pipe_per_mec; 1699 1700 if (!test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap)) 1701 continue; 1702 1703 /* only acquire queues from the first MEC */ 1704 if (mec > 0) 1705 continue; 1706 1707 /* This situation may be hit in the future if a new HW 1708 * generation exposes more than 64 queues. If so, the 1709 * definition of res.queue_mask needs updating 1710 */ 1711 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) { 1712 dev_err(dev, "Invalid queue enabled by amdgpu: %d\n", i); 1713 break; 1714 } 1715 1716 res.queue_mask |= 1ull 1717 << amdgpu_queue_mask_bit_to_set_resource_bit( 1718 dqm->dev->adev, i); 1719 } 1720 res.gws_mask = ~0ull; 1721 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0; 1722 1723 pr_debug("Scheduling resources:\n" 1724 "vmid mask: 0x%8X\n" 1725 "queue mask: 0x%8llX\n", 1726 res.vmid_mask, res.queue_mask); 1727 1728 return pm_send_set_resources(&dqm->packet_mgr, &res); 1729 } 1730 1731 static int initialize_cpsch(struct device_queue_manager *dqm) 1732 { 1733 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm)); 1734 1735 mutex_init(&dqm->lock_hidden); 1736 INIT_LIST_HEAD(&dqm->queues); 1737 dqm->active_queue_count = dqm->processes_count = 0; 1738 dqm->active_cp_queue_count = 0; 1739 dqm->gws_queue_count = 0; 1740 dqm->active_runlist = false; 1741 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception); 1742 dqm->trap_debug_vmid = 0; 1743 1744 init_sdma_bitmaps(dqm); 1745 1746 if (dqm->dev->kfd2kgd->get_iq_wait_times) 1747 dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev, 1748 &dqm->wait_times, 1749 ffs(dqm->dev->xcc_mask) - 1); 1750 return 0; 1751 } 1752 1753 /* halt_cpsch: 1754 * Unmap queues so the schedule doesn't continue remaining jobs in the queue. 1755 * Then set dqm->sched_halt so queues don't map to runlist until unhalt_cpsch 1756 * is called. 1757 */ 1758 static int halt_cpsch(struct device_queue_manager *dqm) 1759 { 1760 int ret = 0; 1761 1762 dqm_lock(dqm); 1763 if (!dqm->sched_running) { 1764 dqm_unlock(dqm); 1765 return 0; 1766 } 1767 1768 WARN_ONCE(dqm->sched_halt, "Scheduling is already on halt\n"); 1769 1770 if (!dqm->is_hws_hang) { 1771 if (!dqm->dev->kfd->shared_resources.enable_mes) 1772 ret = unmap_queues_cpsch(dqm, 1773 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 1774 USE_DEFAULT_GRACE_PERIOD, false); 1775 else 1776 ret = remove_all_kfd_queues_mes(dqm); 1777 } 1778 dqm->sched_halt = true; 1779 dqm_unlock(dqm); 1780 1781 return ret; 1782 } 1783 1784 /* unhalt_cpsch 1785 * Unset dqm->sched_halt and map queues back to runlist 1786 */ 1787 static int unhalt_cpsch(struct device_queue_manager *dqm) 1788 { 1789 int ret = 0; 1790 1791 dqm_lock(dqm); 1792 if (!dqm->sched_running || !dqm->sched_halt) { 1793 WARN_ONCE(!dqm->sched_halt, "Scheduling is not on halt.\n"); 1794 dqm_unlock(dqm); 1795 return 0; 1796 } 1797 dqm->sched_halt = false; 1798 if (!dqm->dev->kfd->shared_resources.enable_mes) 1799 ret = execute_queues_cpsch(dqm, 1800 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 1801 0, USE_DEFAULT_GRACE_PERIOD); 1802 else 1803 ret = add_all_kfd_queues_mes(dqm); 1804 1805 dqm_unlock(dqm); 1806 1807 return ret; 1808 } 1809 1810 static int start_cpsch(struct device_queue_manager *dqm) 1811 { 1812 struct device *dev = dqm->dev->adev->dev; 1813 int retval, num_hw_queue_slots; 1814 1815 retval = 0; 1816 1817 dqm_lock(dqm); 1818 1819 if (!dqm->dev->kfd->shared_resources.enable_mes) { 1820 retval = pm_init(&dqm->packet_mgr, dqm); 1821 if (retval) 1822 goto fail_packet_manager_init; 1823 1824 retval = set_sched_resources(dqm); 1825 if (retval) 1826 goto fail_set_sched_resources; 1827 } 1828 pr_debug("Allocating fence memory\n"); 1829 1830 /* allocate fence memory on the gart */ 1831 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr), 1832 &dqm->fence_mem); 1833 1834 if (retval) 1835 goto fail_allocate_vidmem; 1836 1837 dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr; 1838 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr; 1839 1840 init_interrupts(dqm); 1841 1842 /* clear hang status when driver try to start the hw scheduler */ 1843 dqm->sched_running = true; 1844 1845 if (!dqm->dev->kfd->shared_resources.enable_mes) 1846 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); 1847 1848 /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */ 1849 if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu && 1850 (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))) { 1851 uint32_t reg_offset = 0; 1852 uint32_t grace_period = 1; 1853 1854 retval = pm_update_grace_period(&dqm->packet_mgr, 1855 grace_period); 1856 if (retval) 1857 dev_err(dev, "Setting grace timeout failed\n"); 1858 else if (dqm->dev->kfd2kgd->build_grace_period_packet_info) 1859 /* Update dqm->wait_times maintained in software */ 1860 dqm->dev->kfd2kgd->build_grace_period_packet_info( 1861 dqm->dev->adev, dqm->wait_times, 1862 grace_period, ®_offset, 1863 &dqm->wait_times); 1864 } 1865 1866 /* setup per-queue reset detection buffer */ 1867 num_hw_queue_slots = dqm->dev->kfd->shared_resources.num_queue_per_pipe * 1868 dqm->dev->kfd->shared_resources.num_pipe_per_mec * 1869 NUM_XCC(dqm->dev->xcc_mask); 1870 1871 dqm->detect_hang_info_size = num_hw_queue_slots * sizeof(struct dqm_detect_hang_info); 1872 dqm->detect_hang_info = kzalloc(dqm->detect_hang_info_size, GFP_KERNEL); 1873 1874 if (!dqm->detect_hang_info) { 1875 retval = -ENOMEM; 1876 goto fail_detect_hang_buffer; 1877 } 1878 1879 dqm_unlock(dqm); 1880 1881 return 0; 1882 fail_detect_hang_buffer: 1883 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); 1884 fail_allocate_vidmem: 1885 fail_set_sched_resources: 1886 if (!dqm->dev->kfd->shared_resources.enable_mes) 1887 pm_uninit(&dqm->packet_mgr); 1888 fail_packet_manager_init: 1889 dqm_unlock(dqm); 1890 return retval; 1891 } 1892 1893 static int stop_cpsch(struct device_queue_manager *dqm) 1894 { 1895 dqm_lock(dqm); 1896 if (!dqm->sched_running) { 1897 dqm_unlock(dqm); 1898 return 0; 1899 } 1900 1901 if (!dqm->dev->kfd->shared_resources.enable_mes) 1902 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); 1903 else 1904 remove_all_kfd_queues_mes(dqm); 1905 1906 dqm->sched_running = false; 1907 1908 if (!dqm->dev->kfd->shared_resources.enable_mes) 1909 pm_release_ib(&dqm->packet_mgr); 1910 1911 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); 1912 if (!dqm->dev->kfd->shared_resources.enable_mes) 1913 pm_uninit(&dqm->packet_mgr); 1914 kfree(dqm->detect_hang_info); 1915 dqm->detect_hang_info = NULL; 1916 dqm_unlock(dqm); 1917 1918 return 0; 1919 } 1920 1921 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, 1922 struct kernel_queue *kq, 1923 struct qcm_process_device *qpd) 1924 { 1925 dqm_lock(dqm); 1926 if (dqm->total_queue_count >= max_num_of_queues_per_device) { 1927 pr_warn("Can't create new kernel queue because %d queues were already created\n", 1928 dqm->total_queue_count); 1929 dqm_unlock(dqm); 1930 return -EPERM; 1931 } 1932 1933 /* 1934 * Unconditionally increment this counter, regardless of the queue's 1935 * type or whether the queue is active. 1936 */ 1937 dqm->total_queue_count++; 1938 pr_debug("Total of %d queues are accountable so far\n", 1939 dqm->total_queue_count); 1940 1941 list_add(&kq->list, &qpd->priv_queue_list); 1942 increment_queue_count(dqm, qpd, kq->queue); 1943 qpd->is_debug = true; 1944 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, 1945 USE_DEFAULT_GRACE_PERIOD); 1946 dqm_unlock(dqm); 1947 1948 return 0; 1949 } 1950 1951 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, 1952 struct kernel_queue *kq, 1953 struct qcm_process_device *qpd) 1954 { 1955 dqm_lock(dqm); 1956 list_del(&kq->list); 1957 decrement_queue_count(dqm, qpd, kq->queue); 1958 qpd->is_debug = false; 1959 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 1960 USE_DEFAULT_GRACE_PERIOD); 1961 /* 1962 * Unconditionally decrement this counter, regardless of the queue's 1963 * type. 1964 */ 1965 dqm->total_queue_count--; 1966 pr_debug("Total of %d queues are accountable so far\n", 1967 dqm->total_queue_count); 1968 dqm_unlock(dqm); 1969 } 1970 1971 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, 1972 struct qcm_process_device *qpd, 1973 const struct kfd_criu_queue_priv_data *qd, 1974 const void *restore_mqd, const void *restore_ctl_stack) 1975 { 1976 int retval; 1977 struct mqd_manager *mqd_mgr; 1978 1979 if (dqm->total_queue_count >= max_num_of_queues_per_device) { 1980 pr_warn("Can't create new usermode queue because %d queues were already created\n", 1981 dqm->total_queue_count); 1982 retval = -EPERM; 1983 goto out; 1984 } 1985 1986 if (q->properties.type == KFD_QUEUE_TYPE_SDMA || 1987 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI || 1988 q->properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) { 1989 dqm_lock(dqm); 1990 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL); 1991 dqm_unlock(dqm); 1992 if (retval) 1993 goto out; 1994 } 1995 1996 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL); 1997 if (retval) 1998 goto out_deallocate_sdma_queue; 1999 2000 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 2001 q->properties.type)]; 2002 2003 if (q->properties.type == KFD_QUEUE_TYPE_SDMA || 2004 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) 2005 dqm->asic_ops.init_sdma_vm(dqm, q, qpd); 2006 q->properties.tba_addr = qpd->tba_addr; 2007 q->properties.tma_addr = qpd->tma_addr; 2008 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties); 2009 if (!q->mqd_mem_obj) { 2010 retval = -ENOMEM; 2011 goto out_deallocate_doorbell; 2012 } 2013 2014 dqm_lock(dqm); 2015 /* 2016 * Eviction state logic: mark all queues as evicted, even ones 2017 * not currently active. Restoring inactive queues later only 2018 * updates the is_evicted flag but is a no-op otherwise. 2019 */ 2020 q->properties.is_evicted = !!qpd->evicted; 2021 q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled && 2022 kfd_dbg_has_cwsr_workaround(q->device); 2023 2024 if (qd) 2025 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, 2026 &q->properties, restore_mqd, restore_ctl_stack, 2027 qd->ctl_stack_size); 2028 else 2029 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, 2030 &q->gart_mqd_addr, &q->properties); 2031 2032 list_add(&q->list, &qpd->queues_list); 2033 qpd->queue_count++; 2034 2035 if (q->properties.is_active) { 2036 increment_queue_count(dqm, qpd, q); 2037 2038 if (!dqm->dev->kfd->shared_resources.enable_mes) 2039 retval = execute_queues_cpsch(dqm, 2040 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); 2041 else 2042 retval = add_queue_mes(dqm, q, qpd); 2043 if (retval) 2044 goto cleanup_queue; 2045 } 2046 2047 /* 2048 * Unconditionally increment this counter, regardless of the queue's 2049 * type or whether the queue is active. 2050 */ 2051 dqm->total_queue_count++; 2052 2053 pr_debug("Total of %d queues are accountable so far\n", 2054 dqm->total_queue_count); 2055 2056 dqm_unlock(dqm); 2057 return retval; 2058 2059 cleanup_queue: 2060 qpd->queue_count--; 2061 list_del(&q->list); 2062 if (q->properties.is_active) 2063 decrement_queue_count(dqm, qpd, q); 2064 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 2065 dqm_unlock(dqm); 2066 out_deallocate_doorbell: 2067 deallocate_doorbell(qpd, q); 2068 out_deallocate_sdma_queue: 2069 if (q->properties.type == KFD_QUEUE_TYPE_SDMA || 2070 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { 2071 dqm_lock(dqm); 2072 deallocate_sdma_queue(dqm, q); 2073 dqm_unlock(dqm); 2074 } 2075 out: 2076 return retval; 2077 } 2078 2079 int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm, 2080 uint64_t fence_value, 2081 unsigned int timeout_ms) 2082 { 2083 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies; 2084 struct device *dev = dqm->dev->adev->dev; 2085 uint64_t *fence_addr = dqm->fence_addr; 2086 2087 while (*fence_addr != fence_value) { 2088 /* Fatal err detected, this response won't come */ 2089 if (amdgpu_amdkfd_is_fed(dqm->dev->adev)) 2090 return -EIO; 2091 2092 if (time_after(jiffies, end_jiffies)) { 2093 dev_err(dev, "qcm fence wait loop timeout expired\n"); 2094 /* In HWS case, this is used to halt the driver thread 2095 * in order not to mess up CP states before doing 2096 * scandumps for FW debugging. 2097 */ 2098 while (halt_if_hws_hang) 2099 schedule(); 2100 2101 return -ETIME; 2102 } 2103 schedule(); 2104 } 2105 2106 return 0; 2107 } 2108 2109 /* dqm->lock mutex has to be locked before calling this function */ 2110 static int map_queues_cpsch(struct device_queue_manager *dqm) 2111 { 2112 struct device *dev = dqm->dev->adev->dev; 2113 int retval; 2114 2115 if (!dqm->sched_running || dqm->sched_halt) 2116 return 0; 2117 if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0) 2118 return 0; 2119 if (dqm->active_runlist) 2120 return 0; 2121 2122 retval = pm_send_runlist(&dqm->packet_mgr, &dqm->queues); 2123 pr_debug("%s sent runlist\n", __func__); 2124 if (retval) { 2125 dev_err(dev, "failed to execute runlist\n"); 2126 return retval; 2127 } 2128 dqm->active_runlist = true; 2129 2130 return retval; 2131 } 2132 2133 static void set_queue_as_reset(struct device_queue_manager *dqm, struct queue *q, 2134 struct qcm_process_device *qpd) 2135 { 2136 struct kfd_process_device *pdd = qpd_to_pdd(qpd); 2137 2138 dev_err(dqm->dev->adev->dev, "queue id 0x%0x at pasid 0x%0x is reset\n", 2139 q->properties.queue_id, q->process->pasid); 2140 2141 pdd->has_reset_queue = true; 2142 if (q->properties.is_active) { 2143 q->properties.is_active = false; 2144 decrement_queue_count(dqm, qpd, q); 2145 } 2146 } 2147 2148 static int detect_queue_hang(struct device_queue_manager *dqm) 2149 { 2150 int i; 2151 2152 /* detect should be used only in dqm locked queue reset */ 2153 if (WARN_ON(dqm->detect_hang_count > 0)) 2154 return 0; 2155 2156 memset(dqm->detect_hang_info, 0, dqm->detect_hang_info_size); 2157 2158 for (i = 0; i < AMDGPU_MAX_QUEUES; ++i) { 2159 uint32_t mec, pipe, queue; 2160 int xcc_id; 2161 2162 mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe) 2163 / dqm->dev->kfd->shared_resources.num_pipe_per_mec; 2164 2165 if (mec || !test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap)) 2166 continue; 2167 2168 amdgpu_queue_mask_bit_to_mec_queue(dqm->dev->adev, i, &mec, &pipe, &queue); 2169 2170 for_each_inst(xcc_id, dqm->dev->xcc_mask) { 2171 uint64_t queue_addr = dqm->dev->kfd2kgd->hqd_get_pq_addr( 2172 dqm->dev->adev, pipe, queue, xcc_id); 2173 struct dqm_detect_hang_info hang_info; 2174 2175 if (!queue_addr) 2176 continue; 2177 2178 hang_info.pipe_id = pipe; 2179 hang_info.queue_id = queue; 2180 hang_info.xcc_id = xcc_id; 2181 hang_info.queue_address = queue_addr; 2182 2183 dqm->detect_hang_info[dqm->detect_hang_count] = hang_info; 2184 dqm->detect_hang_count++; 2185 } 2186 } 2187 2188 return dqm->detect_hang_count; 2189 } 2190 2191 static struct queue *find_queue_by_address(struct device_queue_manager *dqm, uint64_t queue_address) 2192 { 2193 struct device_process_node *cur; 2194 struct qcm_process_device *qpd; 2195 struct queue *q; 2196 2197 list_for_each_entry(cur, &dqm->queues, list) { 2198 qpd = cur->qpd; 2199 list_for_each_entry(q, &qpd->queues_list, list) { 2200 if (queue_address == q->properties.queue_address) 2201 return q; 2202 } 2203 } 2204 2205 return NULL; 2206 } 2207 2208 /* only for compute queue */ 2209 static int reset_queues_on_hws_hang(struct device_queue_manager *dqm) 2210 { 2211 int r = 0, reset_count = 0, i; 2212 2213 if (!dqm->detect_hang_info || dqm->is_hws_hang) 2214 return -EIO; 2215 2216 /* assume dqm locked. */ 2217 if (!detect_queue_hang(dqm)) 2218 return -ENOTRECOVERABLE; 2219 2220 for (i = 0; i < dqm->detect_hang_count; i++) { 2221 struct dqm_detect_hang_info hang_info = dqm->detect_hang_info[i]; 2222 struct queue *q = find_queue_by_address(dqm, hang_info.queue_address); 2223 struct kfd_process_device *pdd; 2224 uint64_t queue_addr = 0; 2225 2226 if (!q) { 2227 r = -ENOTRECOVERABLE; 2228 goto reset_fail; 2229 } 2230 2231 pdd = kfd_get_process_device_data(dqm->dev, q->process); 2232 if (!pdd) { 2233 r = -ENOTRECOVERABLE; 2234 goto reset_fail; 2235 } 2236 2237 queue_addr = dqm->dev->kfd2kgd->hqd_reset(dqm->dev->adev, 2238 hang_info.pipe_id, hang_info.queue_id, hang_info.xcc_id, 2239 KFD_UNMAP_LATENCY_MS); 2240 2241 /* either reset failed or we reset an unexpected queue. */ 2242 if (queue_addr != q->properties.queue_address) { 2243 r = -ENOTRECOVERABLE; 2244 goto reset_fail; 2245 } 2246 2247 set_queue_as_reset(dqm, q, &pdd->qpd); 2248 reset_count++; 2249 } 2250 2251 if (reset_count == dqm->detect_hang_count) 2252 kfd_signal_reset_event(dqm->dev); 2253 else 2254 r = -ENOTRECOVERABLE; 2255 2256 reset_fail: 2257 dqm->detect_hang_count = 0; 2258 2259 return r; 2260 } 2261 2262 /* dqm->lock mutex has to be locked before calling this function */ 2263 static int unmap_queues_cpsch(struct device_queue_manager *dqm, 2264 enum kfd_unmap_queues_filter filter, 2265 uint32_t filter_param, 2266 uint32_t grace_period, 2267 bool reset) 2268 { 2269 struct device *dev = dqm->dev->adev->dev; 2270 struct mqd_manager *mqd_mgr; 2271 int retval; 2272 2273 if (!dqm->sched_running) 2274 return 0; 2275 if (!dqm->active_runlist) 2276 return 0; 2277 if (!down_read_trylock(&dqm->dev->adev->reset_domain->sem)) 2278 return -EIO; 2279 2280 if (grace_period != USE_DEFAULT_GRACE_PERIOD) { 2281 retval = pm_update_grace_period(&dqm->packet_mgr, grace_period); 2282 if (retval) 2283 goto out; 2284 } 2285 2286 retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset); 2287 if (retval) 2288 goto out; 2289 2290 *dqm->fence_addr = KFD_FENCE_INIT; 2291 mb(); 2292 pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr, 2293 KFD_FENCE_COMPLETED); 2294 /* should be timed out */ 2295 retval = amdkfd_fence_wait_timeout(dqm, KFD_FENCE_COMPLETED, 2296 queue_preemption_timeout_ms); 2297 if (retval) { 2298 dev_err(dev, "The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n"); 2299 kfd_hws_hang(dqm); 2300 goto out; 2301 } 2302 2303 /* In the current MEC firmware implementation, if compute queue 2304 * doesn't response to the preemption request in time, HIQ will 2305 * abandon the unmap request without returning any timeout error 2306 * to driver. Instead, MEC firmware will log the doorbell of the 2307 * unresponding compute queue to HIQ.MQD.queue_doorbell_id fields. 2308 * To make sure the queue unmap was successful, driver need to 2309 * check those fields 2310 */ 2311 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; 2312 if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) { 2313 if (reset_queues_on_hws_hang(dqm)) { 2314 while (halt_if_hws_hang) 2315 schedule(); 2316 dqm->is_hws_hang = true; 2317 kfd_hws_hang(dqm); 2318 retval = -ETIME; 2319 goto out; 2320 } 2321 } 2322 2323 /* We need to reset the grace period value for this device */ 2324 if (grace_period != USE_DEFAULT_GRACE_PERIOD) { 2325 if (pm_update_grace_period(&dqm->packet_mgr, 2326 USE_DEFAULT_GRACE_PERIOD)) 2327 dev_err(dev, "Failed to reset grace period\n"); 2328 } 2329 2330 pm_release_ib(&dqm->packet_mgr); 2331 dqm->active_runlist = false; 2332 2333 out: 2334 up_read(&dqm->dev->adev->reset_domain->sem); 2335 return retval; 2336 } 2337 2338 /* only for compute queue */ 2339 static int reset_queues_cpsch(struct device_queue_manager *dqm, uint16_t pasid) 2340 { 2341 int retval; 2342 2343 dqm_lock(dqm); 2344 2345 retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID, 2346 pasid, USE_DEFAULT_GRACE_PERIOD, true); 2347 2348 dqm_unlock(dqm); 2349 return retval; 2350 } 2351 2352 /* dqm->lock mutex has to be locked before calling this function */ 2353 static int execute_queues_cpsch(struct device_queue_manager *dqm, 2354 enum kfd_unmap_queues_filter filter, 2355 uint32_t filter_param, 2356 uint32_t grace_period) 2357 { 2358 int retval; 2359 2360 if (!down_read_trylock(&dqm->dev->adev->reset_domain->sem)) 2361 return -EIO; 2362 retval = unmap_queues_cpsch(dqm, filter, filter_param, grace_period, false); 2363 if (!retval) 2364 retval = map_queues_cpsch(dqm); 2365 up_read(&dqm->dev->adev->reset_domain->sem); 2366 return retval; 2367 } 2368 2369 static int wait_on_destroy_queue(struct device_queue_manager *dqm, 2370 struct queue *q) 2371 { 2372 struct kfd_process_device *pdd = kfd_get_process_device_data(q->device, 2373 q->process); 2374 int ret = 0; 2375 2376 if (pdd->qpd.is_debug) 2377 return ret; 2378 2379 q->properties.is_being_destroyed = true; 2380 2381 if (pdd->process->debug_trap_enabled && q->properties.is_suspended) { 2382 dqm_unlock(dqm); 2383 mutex_unlock(&q->process->mutex); 2384 ret = wait_event_interruptible(dqm->destroy_wait, 2385 !q->properties.is_suspended); 2386 2387 mutex_lock(&q->process->mutex); 2388 dqm_lock(dqm); 2389 } 2390 2391 return ret; 2392 } 2393 2394 static int destroy_queue_cpsch(struct device_queue_manager *dqm, 2395 struct qcm_process_device *qpd, 2396 struct queue *q) 2397 { 2398 int retval; 2399 struct mqd_manager *mqd_mgr; 2400 uint64_t sdma_val = 0; 2401 struct kfd_process_device *pdd = qpd_to_pdd(qpd); 2402 struct device *dev = dqm->dev->adev->dev; 2403 2404 /* Get the SDMA queue stats */ 2405 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) || 2406 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { 2407 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr, 2408 &sdma_val); 2409 if (retval) 2410 dev_err(dev, "Failed to read SDMA queue counter for queue: %d\n", 2411 q->properties.queue_id); 2412 } 2413 2414 /* remove queue from list to prevent rescheduling after preemption */ 2415 dqm_lock(dqm); 2416 2417 retval = wait_on_destroy_queue(dqm, q); 2418 2419 if (retval) { 2420 dqm_unlock(dqm); 2421 return retval; 2422 } 2423 2424 if (qpd->is_debug) { 2425 /* 2426 * error, currently we do not allow to destroy a queue 2427 * of a currently debugged process 2428 */ 2429 retval = -EBUSY; 2430 goto failed_try_destroy_debugged_queue; 2431 2432 } 2433 2434 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 2435 q->properties.type)]; 2436 2437 deallocate_doorbell(qpd, q); 2438 2439 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) || 2440 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { 2441 deallocate_sdma_queue(dqm, q); 2442 pdd->sdma_past_activity_counter += sdma_val; 2443 } 2444 2445 if (q->properties.is_active) { 2446 decrement_queue_count(dqm, qpd, q); 2447 q->properties.is_active = false; 2448 if (!dqm->dev->kfd->shared_resources.enable_mes) { 2449 retval = execute_queues_cpsch(dqm, 2450 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, 2451 USE_DEFAULT_GRACE_PERIOD); 2452 if (retval == -ETIME) 2453 qpd->reset_wavefronts = true; 2454 } else { 2455 retval = remove_queue_mes(dqm, q, qpd); 2456 } 2457 } 2458 list_del(&q->list); 2459 qpd->queue_count--; 2460 2461 /* 2462 * Unconditionally decrement this counter, regardless of the queue's 2463 * type 2464 */ 2465 dqm->total_queue_count--; 2466 pr_debug("Total of %d queues are accountable so far\n", 2467 dqm->total_queue_count); 2468 2469 dqm_unlock(dqm); 2470 2471 /* 2472 * Do free_mqd and raise delete event after dqm_unlock(dqm) to avoid 2473 * circular locking 2474 */ 2475 kfd_dbg_ev_raise(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE), 2476 qpd->pqm->process, q->device, 2477 -1, false, NULL, 0); 2478 2479 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 2480 2481 return retval; 2482 2483 failed_try_destroy_debugged_queue: 2484 2485 dqm_unlock(dqm); 2486 return retval; 2487 } 2488 2489 /* 2490 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to 2491 * stay in user mode. 2492 */ 2493 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL 2494 /* APE1 limit is inclusive and 64K aligned. */ 2495 #define APE1_LIMIT_ALIGNMENT 0xFFFF 2496 2497 static bool set_cache_memory_policy(struct device_queue_manager *dqm, 2498 struct qcm_process_device *qpd, 2499 enum cache_policy default_policy, 2500 enum cache_policy alternate_policy, 2501 void __user *alternate_aperture_base, 2502 uint64_t alternate_aperture_size) 2503 { 2504 bool retval = true; 2505 2506 if (!dqm->asic_ops.set_cache_memory_policy) 2507 return retval; 2508 2509 dqm_lock(dqm); 2510 2511 if (alternate_aperture_size == 0) { 2512 /* base > limit disables APE1 */ 2513 qpd->sh_mem_ape1_base = 1; 2514 qpd->sh_mem_ape1_limit = 0; 2515 } else { 2516 /* 2517 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]}, 2518 * SH_MEM_APE1_BASE[31:0], 0x0000 } 2519 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]}, 2520 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF } 2521 * Verify that the base and size parameters can be 2522 * represented in this format and convert them. 2523 * Additionally restrict APE1 to user-mode addresses. 2524 */ 2525 2526 uint64_t base = (uintptr_t)alternate_aperture_base; 2527 uint64_t limit = base + alternate_aperture_size - 1; 2528 2529 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 || 2530 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) { 2531 retval = false; 2532 goto out; 2533 } 2534 2535 qpd->sh_mem_ape1_base = base >> 16; 2536 qpd->sh_mem_ape1_limit = limit >> 16; 2537 } 2538 2539 retval = dqm->asic_ops.set_cache_memory_policy( 2540 dqm, 2541 qpd, 2542 default_policy, 2543 alternate_policy, 2544 alternate_aperture_base, 2545 alternate_aperture_size); 2546 2547 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0)) 2548 program_sh_mem_settings(dqm, qpd); 2549 2550 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n", 2551 qpd->sh_mem_config, qpd->sh_mem_ape1_base, 2552 qpd->sh_mem_ape1_limit); 2553 2554 out: 2555 dqm_unlock(dqm); 2556 return retval; 2557 } 2558 2559 static int process_termination_nocpsch(struct device_queue_manager *dqm, 2560 struct qcm_process_device *qpd) 2561 { 2562 struct queue *q; 2563 struct device_process_node *cur, *next_dpn; 2564 int retval = 0; 2565 bool found = false; 2566 2567 dqm_lock(dqm); 2568 2569 /* Clear all user mode queues */ 2570 while (!list_empty(&qpd->queues_list)) { 2571 struct mqd_manager *mqd_mgr; 2572 int ret; 2573 2574 q = list_first_entry(&qpd->queues_list, struct queue, list); 2575 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 2576 q->properties.type)]; 2577 ret = destroy_queue_nocpsch_locked(dqm, qpd, q); 2578 if (ret) 2579 retval = ret; 2580 dqm_unlock(dqm); 2581 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 2582 dqm_lock(dqm); 2583 } 2584 2585 /* Unregister process */ 2586 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) { 2587 if (qpd == cur->qpd) { 2588 list_del(&cur->list); 2589 kfree(cur); 2590 dqm->processes_count--; 2591 found = true; 2592 break; 2593 } 2594 } 2595 2596 dqm_unlock(dqm); 2597 2598 /* Outside the DQM lock because under the DQM lock we can't do 2599 * reclaim or take other locks that others hold while reclaiming. 2600 */ 2601 if (found) 2602 kfd_dec_compute_active(dqm->dev); 2603 2604 return retval; 2605 } 2606 2607 static int get_wave_state(struct device_queue_manager *dqm, 2608 struct queue *q, 2609 void __user *ctl_stack, 2610 u32 *ctl_stack_used_size, 2611 u32 *save_area_used_size) 2612 { 2613 struct mqd_manager *mqd_mgr; 2614 2615 dqm_lock(dqm); 2616 2617 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; 2618 2619 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE || 2620 q->properties.is_active || !q->device->kfd->cwsr_enabled || 2621 !mqd_mgr->get_wave_state) { 2622 dqm_unlock(dqm); 2623 return -EINVAL; 2624 } 2625 2626 dqm_unlock(dqm); 2627 2628 /* 2629 * get_wave_state is outside the dqm lock to prevent circular locking 2630 * and the queue should be protected against destruction by the process 2631 * lock. 2632 */ 2633 return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, &q->properties, 2634 ctl_stack, ctl_stack_used_size, save_area_used_size); 2635 } 2636 2637 static void get_queue_checkpoint_info(struct device_queue_manager *dqm, 2638 const struct queue *q, 2639 u32 *mqd_size, 2640 u32 *ctl_stack_size) 2641 { 2642 struct mqd_manager *mqd_mgr; 2643 enum KFD_MQD_TYPE mqd_type = 2644 get_mqd_type_from_queue_type(q->properties.type); 2645 2646 dqm_lock(dqm); 2647 mqd_mgr = dqm->mqd_mgrs[mqd_type]; 2648 *mqd_size = mqd_mgr->mqd_size; 2649 *ctl_stack_size = 0; 2650 2651 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info) 2652 mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size); 2653 2654 dqm_unlock(dqm); 2655 } 2656 2657 static int checkpoint_mqd(struct device_queue_manager *dqm, 2658 const struct queue *q, 2659 void *mqd, 2660 void *ctl_stack) 2661 { 2662 struct mqd_manager *mqd_mgr; 2663 int r = 0; 2664 enum KFD_MQD_TYPE mqd_type = 2665 get_mqd_type_from_queue_type(q->properties.type); 2666 2667 dqm_lock(dqm); 2668 2669 if (q->properties.is_active || !q->device->kfd->cwsr_enabled) { 2670 r = -EINVAL; 2671 goto dqm_unlock; 2672 } 2673 2674 mqd_mgr = dqm->mqd_mgrs[mqd_type]; 2675 if (!mqd_mgr->checkpoint_mqd) { 2676 r = -EOPNOTSUPP; 2677 goto dqm_unlock; 2678 } 2679 2680 mqd_mgr->checkpoint_mqd(mqd_mgr, q->mqd, mqd, ctl_stack); 2681 2682 dqm_unlock: 2683 dqm_unlock(dqm); 2684 return r; 2685 } 2686 2687 static int process_termination_cpsch(struct device_queue_manager *dqm, 2688 struct qcm_process_device *qpd) 2689 { 2690 int retval; 2691 struct queue *q; 2692 struct device *dev = dqm->dev->adev->dev; 2693 struct kernel_queue *kq, *kq_next; 2694 struct mqd_manager *mqd_mgr; 2695 struct device_process_node *cur, *next_dpn; 2696 enum kfd_unmap_queues_filter filter = 2697 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES; 2698 bool found = false; 2699 2700 retval = 0; 2701 2702 dqm_lock(dqm); 2703 2704 /* Clean all kernel queues */ 2705 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) { 2706 list_del(&kq->list); 2707 decrement_queue_count(dqm, qpd, kq->queue); 2708 qpd->is_debug = false; 2709 dqm->total_queue_count--; 2710 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES; 2711 } 2712 2713 /* Clear all user mode queues */ 2714 list_for_each_entry(q, &qpd->queues_list, list) { 2715 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) 2716 deallocate_sdma_queue(dqm, q); 2717 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) 2718 deallocate_sdma_queue(dqm, q); 2719 2720 if (q->properties.is_active) { 2721 decrement_queue_count(dqm, qpd, q); 2722 2723 if (dqm->dev->kfd->shared_resources.enable_mes) { 2724 retval = remove_queue_mes(dqm, q, qpd); 2725 if (retval) 2726 dev_err(dev, "Failed to remove queue %d\n", 2727 q->properties.queue_id); 2728 } 2729 } 2730 2731 dqm->total_queue_count--; 2732 } 2733 2734 /* Unregister process */ 2735 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) { 2736 if (qpd == cur->qpd) { 2737 list_del(&cur->list); 2738 kfree(cur); 2739 dqm->processes_count--; 2740 found = true; 2741 break; 2742 } 2743 } 2744 2745 if (!dqm->dev->kfd->shared_resources.enable_mes) 2746 retval = execute_queues_cpsch(dqm, filter, 0, USE_DEFAULT_GRACE_PERIOD); 2747 2748 if ((retval || qpd->reset_wavefronts) && 2749 down_read_trylock(&dqm->dev->adev->reset_domain->sem)) { 2750 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev); 2751 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process); 2752 qpd->reset_wavefronts = false; 2753 up_read(&dqm->dev->adev->reset_domain->sem); 2754 } 2755 2756 /* Lastly, free mqd resources. 2757 * Do free_mqd() after dqm_unlock to avoid circular locking. 2758 */ 2759 while (!list_empty(&qpd->queues_list)) { 2760 q = list_first_entry(&qpd->queues_list, struct queue, list); 2761 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 2762 q->properties.type)]; 2763 list_del(&q->list); 2764 qpd->queue_count--; 2765 dqm_unlock(dqm); 2766 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 2767 dqm_lock(dqm); 2768 } 2769 dqm_unlock(dqm); 2770 2771 /* Outside the DQM lock because under the DQM lock we can't do 2772 * reclaim or take other locks that others hold while reclaiming. 2773 */ 2774 if (found) 2775 kfd_dec_compute_active(dqm->dev); 2776 2777 return retval; 2778 } 2779 2780 static int init_mqd_managers(struct device_queue_manager *dqm) 2781 { 2782 int i, j; 2783 struct device *dev = dqm->dev->adev->dev; 2784 struct mqd_manager *mqd_mgr; 2785 2786 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) { 2787 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev); 2788 if (!mqd_mgr) { 2789 dev_err(dev, "mqd manager [%d] initialization failed\n", i); 2790 goto out_free; 2791 } 2792 dqm->mqd_mgrs[i] = mqd_mgr; 2793 } 2794 2795 return 0; 2796 2797 out_free: 2798 for (j = 0; j < i; j++) { 2799 kfree(dqm->mqd_mgrs[j]); 2800 dqm->mqd_mgrs[j] = NULL; 2801 } 2802 2803 return -ENOMEM; 2804 } 2805 2806 /* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/ 2807 static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) 2808 { 2809 int retval; 2810 struct kfd_node *dev = dqm->dev; 2811 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; 2812 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * 2813 get_num_all_sdma_engines(dqm) * 2814 dev->kfd->device_info.num_sdma_queues_per_engine + 2815 (dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * 2816 NUM_XCC(dqm->dev->xcc_mask)); 2817 2818 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, 2819 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr), 2820 (void *)&(mem_obj->cpu_ptr), false); 2821 2822 return retval; 2823 } 2824 2825 struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) 2826 { 2827 struct device_queue_manager *dqm; 2828 2829 pr_debug("Loading device queue manager\n"); 2830 2831 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL); 2832 if (!dqm) 2833 return NULL; 2834 2835 switch (dev->adev->asic_type) { 2836 /* HWS is not available on Hawaii. */ 2837 case CHIP_HAWAII: 2838 /* HWS depends on CWSR for timely dequeue. CWSR is not 2839 * available on Tonga. 2840 * 2841 * FIXME: This argument also applies to Kaveri. 2842 */ 2843 case CHIP_TONGA: 2844 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS; 2845 break; 2846 default: 2847 dqm->sched_policy = sched_policy; 2848 break; 2849 } 2850 2851 dqm->dev = dev; 2852 switch (dqm->sched_policy) { 2853 case KFD_SCHED_POLICY_HWS: 2854 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: 2855 /* initialize dqm for cp scheduling */ 2856 dqm->ops.create_queue = create_queue_cpsch; 2857 dqm->ops.initialize = initialize_cpsch; 2858 dqm->ops.start = start_cpsch; 2859 dqm->ops.stop = stop_cpsch; 2860 dqm->ops.halt = halt_cpsch; 2861 dqm->ops.unhalt = unhalt_cpsch; 2862 dqm->ops.destroy_queue = destroy_queue_cpsch; 2863 dqm->ops.update_queue = update_queue; 2864 dqm->ops.register_process = register_process; 2865 dqm->ops.unregister_process = unregister_process; 2866 dqm->ops.uninitialize = uninitialize; 2867 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch; 2868 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch; 2869 dqm->ops.set_cache_memory_policy = set_cache_memory_policy; 2870 dqm->ops.process_termination = process_termination_cpsch; 2871 dqm->ops.evict_process_queues = evict_process_queues_cpsch; 2872 dqm->ops.restore_process_queues = restore_process_queues_cpsch; 2873 dqm->ops.get_wave_state = get_wave_state; 2874 dqm->ops.reset_queues = reset_queues_cpsch; 2875 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info; 2876 dqm->ops.checkpoint_mqd = checkpoint_mqd; 2877 break; 2878 case KFD_SCHED_POLICY_NO_HWS: 2879 /* initialize dqm for no cp scheduling */ 2880 dqm->ops.start = start_nocpsch; 2881 dqm->ops.stop = stop_nocpsch; 2882 dqm->ops.create_queue = create_queue_nocpsch; 2883 dqm->ops.destroy_queue = destroy_queue_nocpsch; 2884 dqm->ops.update_queue = update_queue; 2885 dqm->ops.register_process = register_process; 2886 dqm->ops.unregister_process = unregister_process; 2887 dqm->ops.initialize = initialize_nocpsch; 2888 dqm->ops.uninitialize = uninitialize; 2889 dqm->ops.set_cache_memory_policy = set_cache_memory_policy; 2890 dqm->ops.process_termination = process_termination_nocpsch; 2891 dqm->ops.evict_process_queues = evict_process_queues_nocpsch; 2892 dqm->ops.restore_process_queues = 2893 restore_process_queues_nocpsch; 2894 dqm->ops.get_wave_state = get_wave_state; 2895 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info; 2896 dqm->ops.checkpoint_mqd = checkpoint_mqd; 2897 break; 2898 default: 2899 dev_err(dev->adev->dev, "Invalid scheduling policy %d\n", dqm->sched_policy); 2900 goto out_free; 2901 } 2902 2903 switch (dev->adev->asic_type) { 2904 case CHIP_KAVERI: 2905 case CHIP_HAWAII: 2906 device_queue_manager_init_cik(&dqm->asic_ops); 2907 break; 2908 2909 case CHIP_CARRIZO: 2910 case CHIP_TONGA: 2911 case CHIP_FIJI: 2912 case CHIP_POLARIS10: 2913 case CHIP_POLARIS11: 2914 case CHIP_POLARIS12: 2915 case CHIP_VEGAM: 2916 device_queue_manager_init_vi(&dqm->asic_ops); 2917 break; 2918 2919 default: 2920 if (KFD_GC_VERSION(dev) >= IP_VERSION(12, 0, 0)) 2921 device_queue_manager_init_v12(&dqm->asic_ops); 2922 else if (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0)) 2923 device_queue_manager_init_v11(&dqm->asic_ops); 2924 else if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) 2925 device_queue_manager_init_v10(&dqm->asic_ops); 2926 else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1)) 2927 device_queue_manager_init_v9(&dqm->asic_ops); 2928 else { 2929 WARN(1, "Unexpected ASIC family %u", 2930 dev->adev->asic_type); 2931 goto out_free; 2932 } 2933 } 2934 2935 if (init_mqd_managers(dqm)) 2936 goto out_free; 2937 2938 if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) { 2939 dev_err(dev->adev->dev, "Failed to allocate hiq sdma mqd trunk buffer\n"); 2940 goto out_free; 2941 } 2942 2943 if (!dqm->ops.initialize(dqm)) { 2944 init_waitqueue_head(&dqm->destroy_wait); 2945 return dqm; 2946 } 2947 2948 out_free: 2949 kfree(dqm); 2950 return NULL; 2951 } 2952 2953 static void deallocate_hiq_sdma_mqd(struct kfd_node *dev, 2954 struct kfd_mem_obj *mqd) 2955 { 2956 WARN(!mqd, "No hiq sdma mqd trunk to free"); 2957 2958 amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem); 2959 } 2960 2961 void device_queue_manager_uninit(struct device_queue_manager *dqm) 2962 { 2963 dqm->ops.stop(dqm); 2964 dqm->ops.uninitialize(dqm); 2965 if (!dqm->dev->kfd->shared_resources.enable_mes) 2966 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd); 2967 kfree(dqm); 2968 } 2969 2970 int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id) 2971 { 2972 struct kfd_process_device *pdd; 2973 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); 2974 struct device_queue_manager *dqm = knode->dqm; 2975 struct device *dev = dqm->dev->adev->dev; 2976 struct qcm_process_device *qpd; 2977 struct queue *q = NULL; 2978 int ret = 0; 2979 2980 if (!p) 2981 return -EINVAL; 2982 2983 dqm_lock(dqm); 2984 2985 pdd = kfd_get_process_device_data(dqm->dev, p); 2986 if (pdd) { 2987 qpd = &pdd->qpd; 2988 2989 list_for_each_entry(q, &qpd->queues_list, list) { 2990 if (q->doorbell_id == doorbell_id && q->properties.is_active) { 2991 ret = suspend_all_queues_mes(dqm); 2992 if (ret) { 2993 dev_err(dev, "Suspending all queues failed"); 2994 goto out; 2995 } 2996 2997 q->properties.is_evicted = true; 2998 q->properties.is_active = false; 2999 decrement_queue_count(dqm, qpd, q); 3000 3001 ret = remove_queue_mes(dqm, q, qpd); 3002 if (ret) { 3003 dev_err(dev, "Removing bad queue failed"); 3004 goto out; 3005 } 3006 3007 ret = resume_all_queues_mes(dqm); 3008 if (ret) 3009 dev_err(dev, "Resuming all queues failed"); 3010 3011 break; 3012 } 3013 } 3014 } 3015 3016 out: 3017 dqm_unlock(dqm); 3018 return ret; 3019 } 3020 3021 static int kfd_dqm_evict_pasid_mes(struct device_queue_manager *dqm, 3022 struct qcm_process_device *qpd) 3023 { 3024 struct device *dev = dqm->dev->adev->dev; 3025 int ret = 0; 3026 3027 /* Check if process is already evicted */ 3028 dqm_lock(dqm); 3029 if (qpd->evicted) { 3030 /* Increment the evicted count to make sure the 3031 * process stays evicted before its terminated. 3032 */ 3033 qpd->evicted++; 3034 dqm_unlock(dqm); 3035 goto out; 3036 } 3037 dqm_unlock(dqm); 3038 3039 ret = suspend_all_queues_mes(dqm); 3040 if (ret) { 3041 dev_err(dev, "Suspending all queues failed"); 3042 goto out; 3043 } 3044 3045 ret = dqm->ops.evict_process_queues(dqm, qpd); 3046 if (ret) { 3047 dev_err(dev, "Evicting process queues failed"); 3048 goto out; 3049 } 3050 3051 ret = resume_all_queues_mes(dqm); 3052 if (ret) 3053 dev_err(dev, "Resuming all queues failed"); 3054 3055 out: 3056 return ret; 3057 } 3058 3059 int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid) 3060 { 3061 struct kfd_process_device *pdd; 3062 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); 3063 int ret = 0; 3064 3065 if (!p) 3066 return -EINVAL; 3067 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid); 3068 pdd = kfd_get_process_device_data(dqm->dev, p); 3069 if (pdd) { 3070 if (dqm->dev->kfd->shared_resources.enable_mes) 3071 ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd); 3072 else 3073 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd); 3074 } 3075 3076 kfd_unref_process(p); 3077 3078 return ret; 3079 } 3080 3081 static void kfd_process_hw_exception(struct work_struct *work) 3082 { 3083 struct device_queue_manager *dqm = container_of(work, 3084 struct device_queue_manager, hw_exception_work); 3085 amdgpu_amdkfd_gpu_reset(dqm->dev->adev); 3086 } 3087 3088 int reserve_debug_trap_vmid(struct device_queue_manager *dqm, 3089 struct qcm_process_device *qpd) 3090 { 3091 int r; 3092 struct device *dev = dqm->dev->adev->dev; 3093 int updated_vmid_mask; 3094 3095 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { 3096 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy); 3097 return -EINVAL; 3098 } 3099 3100 dqm_lock(dqm); 3101 3102 if (dqm->trap_debug_vmid != 0) { 3103 dev_err(dev, "Trap debug id already reserved\n"); 3104 r = -EBUSY; 3105 goto out_unlock; 3106 } 3107 3108 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 3109 USE_DEFAULT_GRACE_PERIOD, false); 3110 if (r) 3111 goto out_unlock; 3112 3113 updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; 3114 updated_vmid_mask &= ~(1 << dqm->dev->vm_info.last_vmid_kfd); 3115 3116 dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask; 3117 dqm->trap_debug_vmid = dqm->dev->vm_info.last_vmid_kfd; 3118 r = set_sched_resources(dqm); 3119 if (r) 3120 goto out_unlock; 3121 3122 r = map_queues_cpsch(dqm); 3123 if (r) 3124 goto out_unlock; 3125 3126 pr_debug("Reserved VMID for trap debug: %i\n", dqm->trap_debug_vmid); 3127 3128 out_unlock: 3129 dqm_unlock(dqm); 3130 return r; 3131 } 3132 3133 /* 3134 * Releases vmid for the trap debugger 3135 */ 3136 int release_debug_trap_vmid(struct device_queue_manager *dqm, 3137 struct qcm_process_device *qpd) 3138 { 3139 struct device *dev = dqm->dev->adev->dev; 3140 int r; 3141 int updated_vmid_mask; 3142 uint32_t trap_debug_vmid; 3143 3144 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { 3145 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy); 3146 return -EINVAL; 3147 } 3148 3149 dqm_lock(dqm); 3150 trap_debug_vmid = dqm->trap_debug_vmid; 3151 if (dqm->trap_debug_vmid == 0) { 3152 dev_err(dev, "Trap debug id is not reserved\n"); 3153 r = -EINVAL; 3154 goto out_unlock; 3155 } 3156 3157 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 3158 USE_DEFAULT_GRACE_PERIOD, false); 3159 if (r) 3160 goto out_unlock; 3161 3162 updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; 3163 updated_vmid_mask |= (1 << dqm->dev->vm_info.last_vmid_kfd); 3164 3165 dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask; 3166 dqm->trap_debug_vmid = 0; 3167 r = set_sched_resources(dqm); 3168 if (r) 3169 goto out_unlock; 3170 3171 r = map_queues_cpsch(dqm); 3172 if (r) 3173 goto out_unlock; 3174 3175 pr_debug("Released VMID for trap debug: %i\n", trap_debug_vmid); 3176 3177 out_unlock: 3178 dqm_unlock(dqm); 3179 return r; 3180 } 3181 3182 #define QUEUE_NOT_FOUND -1 3183 /* invalidate queue operation in array */ 3184 static void q_array_invalidate(uint32_t num_queues, uint32_t *queue_ids) 3185 { 3186 int i; 3187 3188 for (i = 0; i < num_queues; i++) 3189 queue_ids[i] |= KFD_DBG_QUEUE_INVALID_MASK; 3190 } 3191 3192 /* find queue index in array */ 3193 static int q_array_get_index(unsigned int queue_id, 3194 uint32_t num_queues, 3195 uint32_t *queue_ids) 3196 { 3197 int i; 3198 3199 for (i = 0; i < num_queues; i++) 3200 if (queue_id == (queue_ids[i] & ~KFD_DBG_QUEUE_INVALID_MASK)) 3201 return i; 3202 3203 return QUEUE_NOT_FOUND; 3204 } 3205 3206 struct copy_context_work_handler_workarea { 3207 struct work_struct copy_context_work; 3208 struct kfd_process *p; 3209 }; 3210 3211 static void copy_context_work_handler(struct work_struct *work) 3212 { 3213 struct copy_context_work_handler_workarea *workarea; 3214 struct mqd_manager *mqd_mgr; 3215 struct queue *q; 3216 struct mm_struct *mm; 3217 struct kfd_process *p; 3218 uint32_t tmp_ctl_stack_used_size, tmp_save_area_used_size; 3219 int i; 3220 3221 workarea = container_of(work, 3222 struct copy_context_work_handler_workarea, 3223 copy_context_work); 3224 3225 p = workarea->p; 3226 mm = get_task_mm(p->lead_thread); 3227 3228 if (!mm) 3229 return; 3230 3231 kthread_use_mm(mm); 3232 for (i = 0; i < p->n_pdds; i++) { 3233 struct kfd_process_device *pdd = p->pdds[i]; 3234 struct device_queue_manager *dqm = pdd->dev->dqm; 3235 struct qcm_process_device *qpd = &pdd->qpd; 3236 3237 list_for_each_entry(q, &qpd->queues_list, list) { 3238 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE) 3239 continue; 3240 3241 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; 3242 3243 /* We ignore the return value from get_wave_state 3244 * because 3245 * i) right now, it always returns 0, and 3246 * ii) if we hit an error, we would continue to the 3247 * next queue anyway. 3248 */ 3249 mqd_mgr->get_wave_state(mqd_mgr, 3250 q->mqd, 3251 &q->properties, 3252 (void __user *) q->properties.ctx_save_restore_area_address, 3253 &tmp_ctl_stack_used_size, 3254 &tmp_save_area_used_size); 3255 } 3256 } 3257 kthread_unuse_mm(mm); 3258 mmput(mm); 3259 } 3260 3261 static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array) 3262 { 3263 size_t array_size = num_queues * sizeof(uint32_t); 3264 3265 if (!usr_queue_id_array) 3266 return NULL; 3267 3268 return memdup_user(usr_queue_id_array, array_size); 3269 } 3270 3271 int resume_queues(struct kfd_process *p, 3272 uint32_t num_queues, 3273 uint32_t *usr_queue_id_array) 3274 { 3275 uint32_t *queue_ids = NULL; 3276 int total_resumed = 0; 3277 int i; 3278 3279 if (usr_queue_id_array) { 3280 queue_ids = get_queue_ids(num_queues, usr_queue_id_array); 3281 3282 if (IS_ERR(queue_ids)) 3283 return PTR_ERR(queue_ids); 3284 3285 /* mask all queues as invalid. unmask per successful request */ 3286 q_array_invalidate(num_queues, queue_ids); 3287 } 3288 3289 for (i = 0; i < p->n_pdds; i++) { 3290 struct kfd_process_device *pdd = p->pdds[i]; 3291 struct device_queue_manager *dqm = pdd->dev->dqm; 3292 struct device *dev = dqm->dev->adev->dev; 3293 struct qcm_process_device *qpd = &pdd->qpd; 3294 struct queue *q; 3295 int r, per_device_resumed = 0; 3296 3297 dqm_lock(dqm); 3298 3299 /* unmask queues that resume or already resumed as valid */ 3300 list_for_each_entry(q, &qpd->queues_list, list) { 3301 int q_idx = QUEUE_NOT_FOUND; 3302 3303 if (queue_ids) 3304 q_idx = q_array_get_index( 3305 q->properties.queue_id, 3306 num_queues, 3307 queue_ids); 3308 3309 if (!queue_ids || q_idx != QUEUE_NOT_FOUND) { 3310 int err = resume_single_queue(dqm, &pdd->qpd, q); 3311 3312 if (queue_ids) { 3313 if (!err) { 3314 queue_ids[q_idx] &= 3315 ~KFD_DBG_QUEUE_INVALID_MASK; 3316 } else { 3317 queue_ids[q_idx] |= 3318 KFD_DBG_QUEUE_ERROR_MASK; 3319 break; 3320 } 3321 } 3322 3323 if (dqm->dev->kfd->shared_resources.enable_mes) { 3324 wake_up_all(&dqm->destroy_wait); 3325 if (!err) 3326 total_resumed++; 3327 } else { 3328 per_device_resumed++; 3329 } 3330 } 3331 } 3332 3333 if (!per_device_resumed) { 3334 dqm_unlock(dqm); 3335 continue; 3336 } 3337 3338 r = execute_queues_cpsch(dqm, 3339 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 3340 0, 3341 USE_DEFAULT_GRACE_PERIOD); 3342 if (r) { 3343 dev_err(dev, "Failed to resume process queues\n"); 3344 if (queue_ids) { 3345 list_for_each_entry(q, &qpd->queues_list, list) { 3346 int q_idx = q_array_get_index( 3347 q->properties.queue_id, 3348 num_queues, 3349 queue_ids); 3350 3351 /* mask queue as error on resume fail */ 3352 if (q_idx != QUEUE_NOT_FOUND) 3353 queue_ids[q_idx] |= 3354 KFD_DBG_QUEUE_ERROR_MASK; 3355 } 3356 } 3357 } else { 3358 wake_up_all(&dqm->destroy_wait); 3359 total_resumed += per_device_resumed; 3360 } 3361 3362 dqm_unlock(dqm); 3363 } 3364 3365 if (queue_ids) { 3366 if (copy_to_user((void __user *)usr_queue_id_array, queue_ids, 3367 num_queues * sizeof(uint32_t))) 3368 pr_err("copy_to_user failed on queue resume\n"); 3369 3370 kfree(queue_ids); 3371 } 3372 3373 return total_resumed; 3374 } 3375 3376 int suspend_queues(struct kfd_process *p, 3377 uint32_t num_queues, 3378 uint32_t grace_period, 3379 uint64_t exception_clear_mask, 3380 uint32_t *usr_queue_id_array) 3381 { 3382 uint32_t *queue_ids = get_queue_ids(num_queues, usr_queue_id_array); 3383 int total_suspended = 0; 3384 int i; 3385 3386 if (IS_ERR(queue_ids)) 3387 return PTR_ERR(queue_ids); 3388 3389 /* mask all queues as invalid. umask on successful request */ 3390 q_array_invalidate(num_queues, queue_ids); 3391 3392 for (i = 0; i < p->n_pdds; i++) { 3393 struct kfd_process_device *pdd = p->pdds[i]; 3394 struct device_queue_manager *dqm = pdd->dev->dqm; 3395 struct device *dev = dqm->dev->adev->dev; 3396 struct qcm_process_device *qpd = &pdd->qpd; 3397 struct queue *q; 3398 int r, per_device_suspended = 0; 3399 3400 mutex_lock(&p->event_mutex); 3401 dqm_lock(dqm); 3402 3403 /* unmask queues that suspend or already suspended */ 3404 list_for_each_entry(q, &qpd->queues_list, list) { 3405 int q_idx = q_array_get_index(q->properties.queue_id, 3406 num_queues, 3407 queue_ids); 3408 3409 if (q_idx != QUEUE_NOT_FOUND) { 3410 int err = suspend_single_queue(dqm, pdd, q); 3411 bool is_mes = dqm->dev->kfd->shared_resources.enable_mes; 3412 3413 if (!err) { 3414 queue_ids[q_idx] &= ~KFD_DBG_QUEUE_INVALID_MASK; 3415 if (exception_clear_mask && is_mes) 3416 q->properties.exception_status &= 3417 ~exception_clear_mask; 3418 3419 if (is_mes) 3420 total_suspended++; 3421 else 3422 per_device_suspended++; 3423 } else if (err != -EBUSY) { 3424 r = err; 3425 queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK; 3426 break; 3427 } 3428 } 3429 } 3430 3431 if (!per_device_suspended) { 3432 dqm_unlock(dqm); 3433 mutex_unlock(&p->event_mutex); 3434 if (total_suspended) 3435 amdgpu_amdkfd_debug_mem_fence(dqm->dev->adev); 3436 continue; 3437 } 3438 3439 r = execute_queues_cpsch(dqm, 3440 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, 3441 grace_period); 3442 3443 if (r) 3444 dev_err(dev, "Failed to suspend process queues.\n"); 3445 else 3446 total_suspended += per_device_suspended; 3447 3448 list_for_each_entry(q, &qpd->queues_list, list) { 3449 int q_idx = q_array_get_index(q->properties.queue_id, 3450 num_queues, queue_ids); 3451 3452 if (q_idx == QUEUE_NOT_FOUND) 3453 continue; 3454 3455 /* mask queue as error on suspend fail */ 3456 if (r) 3457 queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK; 3458 else if (exception_clear_mask) 3459 q->properties.exception_status &= 3460 ~exception_clear_mask; 3461 } 3462 3463 dqm_unlock(dqm); 3464 mutex_unlock(&p->event_mutex); 3465 amdgpu_device_flush_hdp(dqm->dev->adev, NULL); 3466 } 3467 3468 if (total_suspended) { 3469 struct copy_context_work_handler_workarea copy_context_worker; 3470 3471 INIT_WORK_ONSTACK( 3472 ©_context_worker.copy_context_work, 3473 copy_context_work_handler); 3474 3475 copy_context_worker.p = p; 3476 3477 schedule_work(©_context_worker.copy_context_work); 3478 3479 3480 flush_work(©_context_worker.copy_context_work); 3481 destroy_work_on_stack(©_context_worker.copy_context_work); 3482 } 3483 3484 if (copy_to_user((void __user *)usr_queue_id_array, queue_ids, 3485 num_queues * sizeof(uint32_t))) 3486 pr_err("copy_to_user failed on queue suspend\n"); 3487 3488 kfree(queue_ids); 3489 3490 return total_suspended; 3491 } 3492 3493 static uint32_t set_queue_type_for_user(struct queue_properties *q_props) 3494 { 3495 switch (q_props->type) { 3496 case KFD_QUEUE_TYPE_COMPUTE: 3497 return q_props->format == KFD_QUEUE_FORMAT_PM4 3498 ? KFD_IOC_QUEUE_TYPE_COMPUTE 3499 : KFD_IOC_QUEUE_TYPE_COMPUTE_AQL; 3500 case KFD_QUEUE_TYPE_SDMA: 3501 return KFD_IOC_QUEUE_TYPE_SDMA; 3502 case KFD_QUEUE_TYPE_SDMA_XGMI: 3503 return KFD_IOC_QUEUE_TYPE_SDMA_XGMI; 3504 default: 3505 WARN_ONCE(true, "queue type not recognized!"); 3506 return 0xffffffff; 3507 }; 3508 } 3509 3510 void set_queue_snapshot_entry(struct queue *q, 3511 uint64_t exception_clear_mask, 3512 struct kfd_queue_snapshot_entry *qss_entry) 3513 { 3514 qss_entry->ring_base_address = q->properties.queue_address; 3515 qss_entry->write_pointer_address = (uint64_t)q->properties.write_ptr; 3516 qss_entry->read_pointer_address = (uint64_t)q->properties.read_ptr; 3517 qss_entry->ctx_save_restore_address = 3518 q->properties.ctx_save_restore_area_address; 3519 qss_entry->ctx_save_restore_area_size = 3520 q->properties.ctx_save_restore_area_size; 3521 qss_entry->exception_status = q->properties.exception_status; 3522 qss_entry->queue_id = q->properties.queue_id; 3523 qss_entry->gpu_id = q->device->id; 3524 qss_entry->ring_size = (uint32_t)q->properties.queue_size; 3525 qss_entry->queue_type = set_queue_type_for_user(&q->properties); 3526 q->properties.exception_status &= ~exception_clear_mask; 3527 } 3528 3529 int debug_lock_and_unmap(struct device_queue_manager *dqm) 3530 { 3531 struct device *dev = dqm->dev->adev->dev; 3532 int r; 3533 3534 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { 3535 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy); 3536 return -EINVAL; 3537 } 3538 3539 if (!kfd_dbg_is_per_vmid_supported(dqm->dev)) 3540 return 0; 3541 3542 dqm_lock(dqm); 3543 3544 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false); 3545 if (r) 3546 dqm_unlock(dqm); 3547 3548 return r; 3549 } 3550 3551 int debug_map_and_unlock(struct device_queue_manager *dqm) 3552 { 3553 struct device *dev = dqm->dev->adev->dev; 3554 int r; 3555 3556 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { 3557 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy); 3558 return -EINVAL; 3559 } 3560 3561 if (!kfd_dbg_is_per_vmid_supported(dqm->dev)) 3562 return 0; 3563 3564 r = map_queues_cpsch(dqm); 3565 3566 dqm_unlock(dqm); 3567 3568 return r; 3569 } 3570 3571 int debug_refresh_runlist(struct device_queue_manager *dqm) 3572 { 3573 int r = debug_lock_and_unmap(dqm); 3574 3575 if (r) 3576 return r; 3577 3578 return debug_map_and_unlock(dqm); 3579 } 3580 3581 bool kfd_dqm_is_queue_in_process(struct device_queue_manager *dqm, 3582 struct qcm_process_device *qpd, 3583 int doorbell_off, u32 *queue_format) 3584 { 3585 struct queue *q; 3586 bool r = false; 3587 3588 if (!queue_format) 3589 return r; 3590 3591 dqm_lock(dqm); 3592 3593 list_for_each_entry(q, &qpd->queues_list, list) { 3594 if (q->properties.doorbell_off == doorbell_off) { 3595 *queue_format = q->properties.format; 3596 r = true; 3597 goto out; 3598 } 3599 } 3600 3601 out: 3602 dqm_unlock(dqm); 3603 return r; 3604 } 3605 #if defined(CONFIG_DEBUG_FS) 3606 3607 static void seq_reg_dump(struct seq_file *m, 3608 uint32_t (*dump)[2], uint32_t n_regs) 3609 { 3610 uint32_t i, count; 3611 3612 for (i = 0, count = 0; i < n_regs; i++) { 3613 if (count == 0 || 3614 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) { 3615 seq_printf(m, "%s %08x: %08x", 3616 i ? "\n" : "", 3617 dump[i][0], dump[i][1]); 3618 count = 7; 3619 } else { 3620 seq_printf(m, " %08x", dump[i][1]); 3621 count--; 3622 } 3623 } 3624 3625 seq_puts(m, "\n"); 3626 } 3627 3628 int dqm_debugfs_hqds(struct seq_file *m, void *data) 3629 { 3630 struct device_queue_manager *dqm = data; 3631 uint32_t xcc_mask = dqm->dev->xcc_mask; 3632 uint32_t (*dump)[2], n_regs; 3633 int pipe, queue; 3634 int r = 0, xcc_id; 3635 uint32_t sdma_engine_start; 3636 3637 if (!dqm->sched_running) { 3638 seq_puts(m, " Device is stopped\n"); 3639 return 0; 3640 } 3641 3642 for_each_inst(xcc_id, xcc_mask) { 3643 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, 3644 KFD_CIK_HIQ_PIPE, 3645 KFD_CIK_HIQ_QUEUE, &dump, 3646 &n_regs, xcc_id); 3647 if (!r) { 3648 seq_printf( 3649 m, 3650 " Inst %d, HIQ on MEC %d Pipe %d Queue %d\n", 3651 xcc_id, 3652 KFD_CIK_HIQ_PIPE / get_pipes_per_mec(dqm) + 1, 3653 KFD_CIK_HIQ_PIPE % get_pipes_per_mec(dqm), 3654 KFD_CIK_HIQ_QUEUE); 3655 seq_reg_dump(m, dump, n_regs); 3656 3657 kfree(dump); 3658 } 3659 3660 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { 3661 int pipe_offset = pipe * get_queues_per_pipe(dqm); 3662 3663 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { 3664 if (!test_bit(pipe_offset + queue, 3665 dqm->dev->kfd->shared_resources.cp_queue_bitmap)) 3666 continue; 3667 3668 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, 3669 pipe, queue, 3670 &dump, &n_regs, 3671 xcc_id); 3672 if (r) 3673 break; 3674 3675 seq_printf(m, 3676 " Inst %d, CP Pipe %d, Queue %d\n", 3677 xcc_id, pipe, queue); 3678 seq_reg_dump(m, dump, n_regs); 3679 3680 kfree(dump); 3681 } 3682 } 3683 } 3684 3685 sdma_engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm); 3686 for (pipe = sdma_engine_start; 3687 pipe < (sdma_engine_start + get_num_all_sdma_engines(dqm)); 3688 pipe++) { 3689 for (queue = 0; 3690 queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine; 3691 queue++) { 3692 r = dqm->dev->kfd2kgd->hqd_sdma_dump( 3693 dqm->dev->adev, pipe, queue, &dump, &n_regs); 3694 if (r) 3695 break; 3696 3697 seq_printf(m, " SDMA Engine %d, RLC %d\n", 3698 pipe, queue); 3699 seq_reg_dump(m, dump, n_regs); 3700 3701 kfree(dump); 3702 } 3703 } 3704 3705 return r; 3706 } 3707 3708 int dqm_debugfs_hang_hws(struct device_queue_manager *dqm) 3709 { 3710 int r = 0; 3711 3712 dqm_lock(dqm); 3713 r = pm_debugfs_hang_hws(&dqm->packet_mgr); 3714 if (r) { 3715 dqm_unlock(dqm); 3716 return r; 3717 } 3718 dqm->active_runlist = true; 3719 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 3720 0, USE_DEFAULT_GRACE_PERIOD); 3721 dqm_unlock(dqm); 3722 3723 return r; 3724 } 3725 3726 #endif 3727