1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/slab.h> 26 #include <linux/list.h> 27 #include "kfd_device_queue_manager.h" 28 #include "kfd_priv.h" 29 #include "kfd_kernel_queue.h" 30 #include "amdgpu_amdkfd.h" 31 #include "amdgpu_reset.h" 32 33 static inline struct process_queue_node *get_queue_by_qid( 34 struct process_queue_manager *pqm, unsigned int qid) 35 { 36 struct process_queue_node *pqn; 37 38 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { 39 if ((pqn->q && pqn->q->properties.queue_id == qid) || 40 (pqn->kq && pqn->kq->queue->properties.queue_id == qid)) 41 return pqn; 42 } 43 44 return NULL; 45 } 46 47 static int assign_queue_slot_by_qid(struct process_queue_manager *pqm, 48 unsigned int qid) 49 { 50 if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) 51 return -EINVAL; 52 53 if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) { 54 pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid); 55 return -ENOSPC; 56 } 57 58 return 0; 59 } 60 61 static int find_available_queue_slot(struct process_queue_manager *pqm, 62 unsigned int *qid) 63 { 64 unsigned long found; 65 66 found = find_first_zero_bit(pqm->queue_slot_bitmap, 67 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); 68 69 pr_debug("The new slot id %lu\n", found); 70 71 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { 72 pr_info("Cannot open more queues for process with pid %d\n", 73 pqm->process->lead_thread->pid); 74 return -ENOMEM; 75 } 76 77 set_bit(found, pqm->queue_slot_bitmap); 78 *qid = found; 79 80 return 0; 81 } 82 83 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) 84 { 85 struct kfd_node *dev = pdd->dev; 86 87 if (pdd->already_dequeued) 88 return; 89 /* The MES context flush needs to filter out the case which the 90 * KFD process is created without setting up the MES context and 91 * queue for creating a compute queue. 92 */ 93 dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd); 94 if (dev->kfd->shared_resources.enable_mes && !!pdd->proc_ctx_gpu_addr && 95 down_read_trylock(&dev->adev->reset_domain->sem)) { 96 amdgpu_mes_flush_shader_debugger(dev->adev, 97 pdd->proc_ctx_gpu_addr, 98 ffs(pdd->dev->xcc_mask) - 1); 99 up_read(&dev->adev->reset_domain->sem); 100 } 101 pdd->already_dequeued = true; 102 } 103 104 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, 105 void *gws) 106 { 107 struct mqd_update_info minfo = {0}; 108 struct kfd_node *dev = NULL; 109 struct process_queue_node *pqn; 110 struct kfd_process_device *pdd; 111 struct kgd_mem *mem = NULL; 112 int ret; 113 114 pqn = get_queue_by_qid(pqm, qid); 115 if (!pqn) { 116 pr_err("Queue id does not match any known queue\n"); 117 return -EINVAL; 118 } 119 120 if (pqn->q) 121 dev = pqn->q->device; 122 if (WARN_ON(!dev)) 123 return -ENODEV; 124 125 pdd = kfd_get_process_device_data(dev, pqm->process); 126 if (!pdd) { 127 pr_err("Process device data doesn't exist\n"); 128 return -EINVAL; 129 } 130 131 /* Only allow one queue per process can have GWS assigned */ 132 if (gws && pdd->qpd.num_gws) 133 return -EBUSY; 134 135 if (!gws && pdd->qpd.num_gws == 0) 136 return -EINVAL; 137 138 if ((KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) && 139 KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) && 140 KFD_GC_VERSION(dev) != IP_VERSION(9, 5, 0)) && 141 !dev->kfd->shared_resources.enable_mes) { 142 if (gws) 143 ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info, 144 gws, &mem); 145 else 146 ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info, 147 pqn->q->gws); 148 if (unlikely(ret)) 149 return ret; 150 pqn->q->gws = mem; 151 } else { 152 /* 153 * Intentionally set GWS to a non-NULL value 154 * for devices that do not use GWS for global wave 155 * synchronization but require the formality 156 * of setting GWS for cooperative groups. 157 */ 158 pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL; 159 } 160 161 pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0; 162 minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0; 163 164 return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, 165 pqn->q, &minfo); 166 } 167 168 void kfd_process_dequeue_from_all_devices(struct kfd_process *p) 169 { 170 int i; 171 172 for (i = 0; i < p->n_pdds; i++) 173 kfd_process_dequeue_from_device(p->pdds[i]); 174 } 175 176 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) 177 { 178 INIT_LIST_HEAD(&pqm->queues); 179 pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, 180 GFP_KERNEL); 181 if (!pqm->queue_slot_bitmap) 182 return -ENOMEM; 183 pqm->process = p; 184 185 return 0; 186 } 187 188 static void pqm_clean_queue_resource(struct process_queue_manager *pqm, 189 struct process_queue_node *pqn) 190 { 191 struct kfd_node *dev; 192 struct kfd_process_device *pdd; 193 194 dev = pqn->q->device; 195 196 pdd = kfd_get_process_device_data(dev, pqm->process); 197 if (!pdd) { 198 pr_err("Process device data doesn't exist\n"); 199 return; 200 } 201 202 if (pqn->q->gws) { 203 if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) && 204 KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 4) && 205 KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 5, 0) && 206 !dev->kfd->shared_resources.enable_mes) 207 amdgpu_amdkfd_remove_gws_from_process( 208 pqm->process->kgd_process_info, pqn->q->gws); 209 pdd->qpd.num_gws = 0; 210 } 211 212 if (dev->kfd->shared_resources.enable_mes) { 213 amdgpu_amdkfd_free_kernel_mem(dev->adev, &pqn->q->gang_ctx_bo); 214 amdgpu_amdkfd_free_kernel_mem(dev->adev, (void **)&pqn->q->wptr_bo_gart); 215 } 216 } 217 218 void pqm_uninit(struct process_queue_manager *pqm) 219 { 220 struct process_queue_node *pqn, *next; 221 222 list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { 223 if (pqn->q) { 224 struct kfd_process_device *pdd = kfd_get_process_device_data(pqn->q->device, 225 pqm->process); 226 if (pdd) { 227 kfd_queue_unref_bo_vas(pdd, &pqn->q->properties); 228 kfd_queue_release_buffers(pdd, &pqn->q->properties); 229 } else { 230 WARN_ON(!pdd); 231 } 232 pqm_clean_queue_resource(pqm, pqn); 233 } 234 235 kfd_procfs_del_queue(pqn->q); 236 uninit_queue(pqn->q); 237 list_del(&pqn->process_queue_list); 238 kfree(pqn); 239 } 240 241 bitmap_free(pqm->queue_slot_bitmap); 242 pqm->queue_slot_bitmap = NULL; 243 } 244 245 static int init_user_queue(struct process_queue_manager *pqm, 246 struct kfd_node *dev, struct queue **q, 247 struct queue_properties *q_properties, 248 unsigned int qid) 249 { 250 int retval; 251 252 /* Doorbell initialized in user space*/ 253 q_properties->doorbell_ptr = NULL; 254 q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW); 255 256 /* let DQM handle it*/ 257 q_properties->vmid = 0; 258 q_properties->queue_id = qid; 259 260 retval = init_queue(q, q_properties); 261 if (retval != 0) 262 return retval; 263 264 (*q)->device = dev; 265 (*q)->process = pqm->process; 266 267 if (dev->kfd->shared_resources.enable_mes) { 268 retval = amdgpu_amdkfd_alloc_kernel_mem(dev->adev, 269 AMDGPU_MES_GANG_CTX_SIZE, 270 AMDGPU_GEM_DOMAIN_GTT, 271 &(*q)->gang_ctx_bo, 272 &(*q)->gang_ctx_gpu_addr, 273 &(*q)->gang_ctx_cpu_ptr, 274 false); 275 if (retval) { 276 pr_err("failed to allocate gang context bo\n"); 277 goto cleanup; 278 } 279 memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE); 280 281 /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work 282 * on unmapped queues for usermode queue oversubscription (no aggregated doorbell) 283 */ 284 if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) { 285 pr_err("Queue memory allocated to wrong device\n"); 286 retval = -EINVAL; 287 goto free_gang_ctx_bo; 288 } 289 290 retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo, 291 &(*q)->wptr_bo_gart); 292 if (retval) { 293 pr_err("Failed to map wptr bo to GART\n"); 294 goto free_gang_ctx_bo; 295 } 296 } 297 298 pr_debug("PQM After init queue"); 299 return 0; 300 301 free_gang_ctx_bo: 302 amdgpu_amdkfd_free_kernel_mem(dev->adev, &(*q)->gang_ctx_bo); 303 cleanup: 304 uninit_queue(*q); 305 *q = NULL; 306 return retval; 307 } 308 309 int pqm_create_queue(struct process_queue_manager *pqm, 310 struct kfd_node *dev, 311 struct queue_properties *properties, 312 unsigned int *qid, 313 const struct kfd_criu_queue_priv_data *q_data, 314 const void *restore_mqd, 315 const void *restore_ctl_stack, 316 uint32_t *p_doorbell_offset_in_process) 317 { 318 int retval; 319 struct kfd_process_device *pdd; 320 struct queue *q; 321 struct process_queue_node *pqn; 322 struct kernel_queue *kq; 323 enum kfd_queue_type type = properties->type; 324 unsigned int max_queues = 127; /* HWS limit */ 325 326 /* 327 * On GFX 9.4.3/9.5.0, increase the number of queues that 328 * can be created to 255. No HWS limit on GFX 9.4.3/9.5.0. 329 */ 330 if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) || 331 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) || 332 KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)) 333 max_queues = 255; 334 335 q = NULL; 336 kq = NULL; 337 338 pdd = kfd_get_process_device_data(dev, pqm->process); 339 if (!pdd) { 340 pr_err("Process device data doesn't exist\n"); 341 return -1; 342 } 343 344 /* 345 * for debug process, verify that it is within the static queues limit 346 * currently limit is set to half of the total avail HQD slots 347 * If we are just about to create DIQ, the is_debug flag is not set yet 348 * Hence we also check the type as well 349 */ 350 if (pdd->qpd.is_debug) 351 max_queues = dev->kfd->device_info.max_no_of_hqd/2; 352 353 if (pdd->qpd.queue_count >= max_queues) 354 return -ENOSPC; 355 356 if (q_data) { 357 retval = assign_queue_slot_by_qid(pqm, q_data->q_id); 358 *qid = q_data->q_id; 359 } else 360 retval = find_available_queue_slot(pqm, qid); 361 362 if (retval != 0) 363 return retval; 364 365 /* Register process if this is the first queue */ 366 if (list_empty(&pdd->qpd.queues_list) && 367 list_empty(&pdd->qpd.priv_queue_list)) 368 dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); 369 370 /* Allocate proc_ctx_bo only if MES is enabled and this is the first queue */ 371 if (!pdd->proc_ctx_cpu_ptr && dev->kfd->shared_resources.enable_mes) { 372 retval = amdgpu_amdkfd_alloc_kernel_mem(dev->adev, 373 AMDGPU_MES_PROC_CTX_SIZE, 374 AMDGPU_GEM_DOMAIN_GTT, 375 &pdd->proc_ctx_bo, 376 &pdd->proc_ctx_gpu_addr, 377 &pdd->proc_ctx_cpu_ptr, 378 false); 379 if (retval) { 380 dev_err(dev->adev->dev, "failed to allocate process context bo\n"); 381 return retval; 382 } 383 memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); 384 } 385 386 pqn = kzalloc_obj(*pqn); 387 if (!pqn) { 388 retval = -ENOMEM; 389 goto err_allocate_pqn; 390 } 391 392 switch (type) { 393 case KFD_QUEUE_TYPE_SDMA: 394 case KFD_QUEUE_TYPE_SDMA_XGMI: 395 case KFD_QUEUE_TYPE_SDMA_BY_ENG_ID: 396 /* SDMA queues are always allocated statically no matter 397 * which scheduler mode is used. We also do not need to 398 * check whether a SDMA queue can be allocated here, because 399 * allocate_sdma_queue() in create_queue() has the 400 * corresponding check logic. 401 */ 402 retval = init_user_queue(pqm, dev, &q, properties, *qid); 403 if (retval != 0) 404 goto err_create_queue; 405 pqn->q = q; 406 pqn->kq = NULL; 407 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data, 408 restore_mqd, restore_ctl_stack); 409 print_queue(q); 410 break; 411 412 case KFD_QUEUE_TYPE_COMPUTE: 413 /* check if there is over subscription */ 414 if ((dev->dqm->sched_policy == 415 KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && 416 ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) || 417 (dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) { 418 pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n"); 419 retval = -EPERM; 420 goto err_create_queue; 421 } 422 423 retval = init_user_queue(pqm, dev, &q, properties, *qid); 424 if (retval != 0) 425 goto err_create_queue; 426 pqn->q = q; 427 pqn->kq = NULL; 428 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data, 429 restore_mqd, restore_ctl_stack); 430 print_queue(q); 431 break; 432 default: 433 WARN(1, "Invalid queue type %d", type); 434 retval = -EINVAL; 435 } 436 437 if (retval != 0) { 438 if ((type == KFD_QUEUE_TYPE_SDMA || 439 type == KFD_QUEUE_TYPE_SDMA_XGMI || 440 type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) && 441 retval == -ENOMEM) 442 pr_warn("process pid %d DQM create queue type %d failed. ret %d\n", 443 pqm->process->lead_thread->pid, type, retval); 444 else 445 pr_err("process pid %d DQM create queue type %d failed. ret %d\n", 446 pqm->process->lead_thread->pid, type, retval); 447 goto err_create_queue; 448 } 449 450 if (q && p_doorbell_offset_in_process) { 451 /* Return the doorbell offset within the doorbell page 452 * to the caller so it can be passed up to user mode 453 * (in bytes). 454 * relative doorbell index = Absolute doorbell index - 455 * absolute index of first doorbell in the page. 456 */ 457 uint32_t first_db_index = amdgpu_doorbell_index_on_bar(pdd->dev->adev, 458 pdd->qpd.proc_doorbells, 459 0, 460 pdd->dev->kfd->device_info.doorbell_size); 461 462 *p_doorbell_offset_in_process = (q->properties.doorbell_off 463 - first_db_index) * sizeof(uint32_t); 464 } 465 466 pr_debug("PQM After DQM create queue\n"); 467 468 list_add(&pqn->process_queue_list, &pqm->queues); 469 470 if (q) { 471 pr_debug("PQM done creating queue\n"); 472 kfd_procfs_add_queue(q); 473 print_queue_properties(&q->properties); 474 } 475 476 return retval; 477 478 err_create_queue: 479 uninit_queue(q); 480 if (kq) 481 kernel_queue_uninit(kq); 482 kfree(pqn); 483 err_allocate_pqn: 484 /* check if queues list is empty unregister process from device */ 485 clear_bit(*qid, pqm->queue_slot_bitmap); 486 if (list_empty(&pdd->qpd.queues_list) && 487 list_empty(&pdd->qpd.priv_queue_list)) 488 dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd); 489 return retval; 490 } 491 492 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) 493 { 494 struct process_queue_node *pqn; 495 struct kfd_process_device *pdd; 496 struct device_queue_manager *dqm; 497 struct kfd_node *dev; 498 int retval; 499 500 dqm = NULL; 501 502 retval = 0; 503 504 pqn = get_queue_by_qid(pqm, qid); 505 if (!pqn) { 506 pr_err("Queue id does not match any known queue\n"); 507 return -EINVAL; 508 } 509 510 dev = NULL; 511 if (pqn->kq) 512 dev = pqn->kq->dev; 513 if (pqn->q) 514 dev = pqn->q->device; 515 if (WARN_ON(!dev)) 516 return -ENODEV; 517 518 pdd = kfd_get_process_device_data(dev, pqm->process); 519 if (!pdd) { 520 pr_err("Process device data doesn't exist\n"); 521 return -1; 522 } 523 524 if (pqn->kq) { 525 /* destroy kernel queue (DIQ) */ 526 dqm = pqn->kq->dev->dqm; 527 dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd); 528 kernel_queue_uninit(pqn->kq); 529 } 530 531 if (pqn->q) { 532 retval = kfd_queue_unref_bo_vas(pdd, &pqn->q->properties); 533 if (retval) 534 goto err_destroy_queue; 535 536 dqm = pqn->q->device->dqm; 537 retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); 538 if (retval) { 539 pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n", 540 pdd->pasid, 541 pqn->q->properties.queue_id, retval); 542 if (retval != -ETIME && retval != -EIO) 543 goto err_destroy_queue; 544 } 545 kfd_procfs_del_queue(pqn->q); 546 kfd_queue_release_buffers(pdd, &pqn->q->properties); 547 pqm_clean_queue_resource(pqm, pqn); 548 uninit_queue(pqn->q); 549 } 550 551 list_del(&pqn->process_queue_list); 552 kfree(pqn); 553 clear_bit(qid, pqm->queue_slot_bitmap); 554 555 if (list_empty(&pdd->qpd.queues_list) && 556 list_empty(&pdd->qpd.priv_queue_list)) 557 dqm->ops.unregister_process(dqm, &pdd->qpd); 558 559 err_destroy_queue: 560 return retval; 561 } 562 563 int pqm_update_queue_properties(struct process_queue_manager *pqm, 564 unsigned int qid, struct queue_properties *p) 565 { 566 int retval; 567 struct process_queue_node *pqn; 568 569 pqn = get_queue_by_qid(pqm, qid); 570 if (!pqn || !pqn->q) { 571 pr_debug("No queue %d exists for update operation\n", qid); 572 return -EFAULT; 573 } 574 575 /* 576 * Update with NULL ring address is used to disable the queue 577 */ 578 if (p->queue_address && p->queue_size) { 579 struct kfd_process_device *pdd; 580 struct amdgpu_vm *vm; 581 struct queue *q = pqn->q; 582 int err; 583 584 pdd = kfd_get_process_device_data(q->device, q->process); 585 if (!pdd) 586 return -ENODEV; 587 vm = drm_priv_to_vm(pdd->drm_priv); 588 err = amdgpu_bo_reserve(vm->root.bo, false); 589 if (err) 590 return err; 591 592 if (kfd_queue_buffer_get(vm, (void *)p->queue_address, &p->ring_bo, 593 p->queue_size + 594 pqn->q->properties.metadata_queue_size)) { 595 pr_debug("ring buf 0x%llx size 0x%llx not mapped on GPU\n", 596 p->queue_address, p->queue_size); 597 amdgpu_bo_unreserve(vm->root.bo); 598 return -EFAULT; 599 } 600 601 kfd_queue_unref_bo_va(vm, &pqn->q->properties.ring_bo); 602 kfd_queue_buffer_put(&pqn->q->properties.ring_bo); 603 amdgpu_bo_unreserve(vm->root.bo); 604 605 pqn->q->properties.ring_bo = p->ring_bo; 606 } 607 608 pqn->q->properties.queue_address = p->queue_address; 609 pqn->q->properties.queue_size = p->queue_size; 610 pqn->q->properties.queue_percent = p->queue_percent; 611 pqn->q->properties.priority = p->priority; 612 pqn->q->properties.pm4_target_xcc = p->pm4_target_xcc; 613 614 retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, 615 pqn->q, NULL); 616 if (retval != 0) 617 return retval; 618 619 return 0; 620 } 621 622 int pqm_update_mqd(struct process_queue_manager *pqm, 623 unsigned int qid, struct mqd_update_info *minfo) 624 { 625 int retval; 626 struct process_queue_node *pqn; 627 628 pqn = get_queue_by_qid(pqm, qid); 629 if (!pqn) { 630 pr_debug("No queue %d exists for update operation\n", qid); 631 return -EFAULT; 632 } 633 634 /* CUs are masked for debugger requirements so deny user mask */ 635 if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr) 636 return -EBUSY; 637 638 /* ASICs that have WGPs must enforce pairwise enabled mask checks. */ 639 if (minfo && minfo->cu_mask.ptr && 640 KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) { 641 int i; 642 643 for (i = 0; i < minfo->cu_mask.count; i += 2) { 644 uint32_t cu_pair = (minfo->cu_mask.ptr[i / 32] >> (i % 32)) & 0x3; 645 646 if (cu_pair && cu_pair != 0x3) { 647 pr_debug("CUs must be adjacent pairwise enabled.\n"); 648 return -EINVAL; 649 } 650 } 651 } 652 653 retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, 654 pqn->q, minfo); 655 if (retval != 0) 656 return retval; 657 658 if (minfo && minfo->cu_mask.ptr) 659 pqn->q->properties.is_user_cu_masked = true; 660 661 return 0; 662 } 663 664 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm, 665 unsigned int qid) 666 { 667 struct process_queue_node *pqn; 668 669 pqn = get_queue_by_qid(pqm, qid); 670 return pqn ? pqn->q : NULL; 671 } 672 673 int pqm_get_wave_state(struct process_queue_manager *pqm, 674 unsigned int qid, 675 void __user *ctl_stack, 676 u32 *ctl_stack_used_size, 677 u32 *save_area_used_size) 678 { 679 struct process_queue_node *pqn; 680 681 pqn = get_queue_by_qid(pqm, qid); 682 if (!pqn) { 683 pr_debug("amdkfd: No queue %d exists for operation\n", 684 qid); 685 return -EFAULT; 686 } 687 688 return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm, 689 pqn->q, 690 ctl_stack, 691 ctl_stack_used_size, 692 save_area_used_size); 693 } 694 695 int pqm_get_queue_snapshot(struct process_queue_manager *pqm, 696 uint64_t exception_clear_mask, 697 void __user *buf, 698 int *num_qss_entries, 699 uint32_t *entry_size) 700 { 701 struct process_queue_node *pqn; 702 struct kfd_queue_snapshot_entry src; 703 uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries; 704 int r = 0; 705 706 *num_qss_entries = 0; 707 if (!(*entry_size)) 708 return -EINVAL; 709 710 *entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry)); 711 mutex_lock(&pqm->process->event_mutex); 712 713 memset(&src, 0, sizeof(src)); 714 715 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { 716 if (!pqn->q) 717 continue; 718 719 if (*num_qss_entries < tmp_qss_entries) { 720 set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src); 721 722 if (copy_to_user(buf, &src, *entry_size)) { 723 r = -EFAULT; 724 break; 725 } 726 buf += tmp_entry_size; 727 } 728 *num_qss_entries += 1; 729 } 730 731 mutex_unlock(&pqm->process->event_mutex); 732 return r; 733 } 734 735 static int get_queue_data_sizes(struct kfd_process_device *pdd, 736 struct queue *q, 737 uint32_t *mqd_size, 738 uint32_t *ctl_stack_size) 739 { 740 int ret; 741 742 ret = pqm_get_queue_checkpoint_info(&pdd->process->pqm, 743 q->properties.queue_id, 744 mqd_size, 745 ctl_stack_size); 746 if (ret) 747 pr_err("Failed to get queue dump info (%d)\n", ret); 748 749 return ret; 750 } 751 752 int kfd_process_get_queue_info(struct kfd_process *p, 753 uint32_t *num_queues, 754 uint64_t *priv_data_sizes) 755 { 756 uint32_t extra_data_sizes = 0; 757 struct queue *q; 758 int i; 759 int ret; 760 761 *num_queues = 0; 762 763 /* Run over all PDDs of the process */ 764 for (i = 0; i < p->n_pdds; i++) { 765 struct kfd_process_device *pdd = p->pdds[i]; 766 767 list_for_each_entry(q, &pdd->qpd.queues_list, list) { 768 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || 769 q->properties.type == KFD_QUEUE_TYPE_SDMA || 770 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { 771 uint32_t mqd_size, ctl_stack_size; 772 773 *num_queues = *num_queues + 1; 774 775 ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size); 776 if (ret) 777 return ret; 778 779 extra_data_sizes += mqd_size + ctl_stack_size; 780 } else { 781 pr_err("Unsupported queue type (%d)\n", q->properties.type); 782 return -EOPNOTSUPP; 783 } 784 } 785 } 786 *priv_data_sizes = extra_data_sizes + 787 (*num_queues * sizeof(struct kfd_criu_queue_priv_data)); 788 789 return 0; 790 } 791 792 static int pqm_checkpoint_mqd(struct process_queue_manager *pqm, 793 unsigned int qid, 794 void *mqd, 795 void *ctl_stack) 796 { 797 struct process_queue_node *pqn; 798 799 pqn = get_queue_by_qid(pqm, qid); 800 if (!pqn) { 801 pr_debug("amdkfd: No queue %d exists for operation\n", qid); 802 return -EFAULT; 803 } 804 805 if (!pqn->q->device->dqm->ops.checkpoint_mqd) { 806 pr_err("amdkfd: queue dumping not supported on this device\n"); 807 return -EOPNOTSUPP; 808 } 809 810 return pqn->q->device->dqm->ops.checkpoint_mqd(pqn->q->device->dqm, 811 pqn->q, mqd, ctl_stack); 812 } 813 814 static int criu_checkpoint_queue(struct kfd_process_device *pdd, 815 struct queue *q, 816 struct kfd_criu_queue_priv_data *q_data) 817 { 818 uint8_t *mqd, *ctl_stack; 819 int ret; 820 821 mqd = (void *)(q_data + 1); 822 ctl_stack = mqd + q_data->mqd_size; 823 824 q_data->gpu_id = pdd->user_gpu_id; 825 q_data->type = q->properties.type; 826 q_data->format = q->properties.format; 827 q_data->q_id = q->properties.queue_id; 828 q_data->q_address = q->properties.queue_address; 829 q_data->q_size = q->properties.queue_size; 830 q_data->priority = q->properties.priority; 831 q_data->q_percent = q->properties.queue_percent; 832 q_data->read_ptr_addr = (uint64_t)q->properties.read_ptr; 833 q_data->write_ptr_addr = (uint64_t)q->properties.write_ptr; 834 q_data->doorbell_id = q->doorbell_id; 835 836 q_data->sdma_id = q->sdma_id; 837 838 q_data->eop_ring_buffer_address = 839 q->properties.eop_ring_buffer_address; 840 841 q_data->eop_ring_buffer_size = q->properties.eop_ring_buffer_size; 842 843 q_data->ctx_save_restore_area_address = 844 q->properties.ctx_save_restore_area_address; 845 846 q_data->ctx_save_restore_area_size = 847 q->properties.ctx_save_restore_area_size; 848 849 q_data->gws = !!q->gws; 850 851 ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack); 852 if (ret) { 853 pr_err("Failed checkpoint queue_mqd (%d)\n", ret); 854 return ret; 855 } 856 857 pr_debug("Dumping Queue: gpu_id:%x queue_id:%u\n", q_data->gpu_id, q_data->q_id); 858 return ret; 859 } 860 861 static int criu_checkpoint_queues_device(struct kfd_process_device *pdd, 862 uint8_t __user *user_priv, 863 unsigned int *q_index, 864 uint64_t *queues_priv_data_offset) 865 { 866 unsigned int q_private_data_size = 0; 867 uint8_t *q_private_data = NULL; /* Local buffer to store individual queue private data */ 868 struct queue *q; 869 int ret = 0; 870 871 list_for_each_entry(q, &pdd->qpd.queues_list, list) { 872 struct kfd_criu_queue_priv_data *q_data; 873 uint64_t q_data_size; 874 uint32_t mqd_size; 875 uint32_t ctl_stack_size; 876 877 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE && 878 q->properties.type != KFD_QUEUE_TYPE_SDMA && 879 q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI) { 880 881 pr_err("Unsupported queue type (%d)\n", q->properties.type); 882 ret = -EOPNOTSUPP; 883 break; 884 } 885 886 ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size); 887 if (ret) 888 break; 889 890 q_data_size = sizeof(*q_data) + mqd_size + ctl_stack_size; 891 892 /* Increase local buffer space if needed */ 893 if (q_private_data_size < q_data_size) { 894 kfree(q_private_data); 895 896 q_private_data = kzalloc(q_data_size, GFP_KERNEL); 897 if (!q_private_data) { 898 ret = -ENOMEM; 899 break; 900 } 901 q_private_data_size = q_data_size; 902 } 903 904 q_data = (struct kfd_criu_queue_priv_data *)q_private_data; 905 906 /* 907 * data stored in this order: 908 * priv_data, mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]... 909 */ 910 q_data->mqd_size = mqd_size; 911 q_data->ctl_stack_size = ctl_stack_size; 912 913 ret = criu_checkpoint_queue(pdd, q, q_data); 914 if (ret) 915 break; 916 917 q_data->object_type = KFD_CRIU_OBJECT_TYPE_QUEUE; 918 919 ret = copy_to_user(user_priv + *queues_priv_data_offset, 920 q_data, q_data_size); 921 if (ret) { 922 ret = -EFAULT; 923 break; 924 } 925 *queues_priv_data_offset += q_data_size; 926 *q_index = *q_index + 1; 927 } 928 929 kfree(q_private_data); 930 931 return ret; 932 } 933 934 int kfd_criu_checkpoint_queues(struct kfd_process *p, 935 uint8_t __user *user_priv_data, 936 uint64_t *priv_data_offset) 937 { 938 int ret = 0, pdd_index, q_index = 0; 939 940 for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) { 941 struct kfd_process_device *pdd = p->pdds[pdd_index]; 942 943 /* 944 * criu_checkpoint_queues_device will copy data to user and update q_index and 945 * queues_priv_data_offset 946 */ 947 ret = criu_checkpoint_queues_device(pdd, user_priv_data, &q_index, 948 priv_data_offset); 949 950 if (ret) 951 break; 952 } 953 954 return ret; 955 } 956 957 static void set_queue_properties_from_criu(struct queue_properties *qp, 958 struct kfd_criu_queue_priv_data *q_data, uint32_t num_xcc) 959 { 960 qp->is_interop = false; 961 qp->queue_percent = q_data->q_percent; 962 qp->priority = q_data->priority; 963 qp->queue_address = q_data->q_address; 964 qp->queue_size = q_data->q_size; 965 qp->read_ptr = (uint32_t *) q_data->read_ptr_addr; 966 qp->write_ptr = (uint32_t *) q_data->write_ptr_addr; 967 qp->eop_ring_buffer_address = q_data->eop_ring_buffer_address; 968 qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size; 969 qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address; 970 qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size; 971 if (q_data->type == KFD_QUEUE_TYPE_COMPUTE) 972 qp->ctl_stack_size = q_data->ctl_stack_size / num_xcc; 973 else 974 qp->ctl_stack_size = q_data->ctl_stack_size; 975 976 qp->type = q_data->type; 977 qp->format = q_data->format; 978 } 979 980 int kfd_criu_restore_queue(struct kfd_process *p, 981 uint8_t __user *user_priv_ptr, 982 uint64_t *priv_data_offset, 983 uint64_t max_priv_data_size) 984 { 985 uint8_t *mqd, *ctl_stack, *q_extra_data = NULL; 986 struct kfd_criu_queue_priv_data *q_data; 987 struct kfd_process_device *pdd; 988 uint64_t q_extra_data_size; 989 struct queue_properties qp; 990 unsigned int queue_id; 991 int ret = 0; 992 993 if (*priv_data_offset + sizeof(*q_data) > max_priv_data_size) 994 return -EINVAL; 995 996 q_data = kmalloc_obj(*q_data); 997 if (!q_data) 998 return -ENOMEM; 999 1000 ret = copy_from_user(q_data, user_priv_ptr + *priv_data_offset, sizeof(*q_data)); 1001 if (ret) { 1002 ret = -EFAULT; 1003 goto exit; 1004 } 1005 1006 *priv_data_offset += sizeof(*q_data); 1007 q_extra_data_size = (uint64_t)q_data->ctl_stack_size + q_data->mqd_size; 1008 1009 if (*priv_data_offset + q_extra_data_size > max_priv_data_size) { 1010 ret = -EINVAL; 1011 goto exit; 1012 } 1013 1014 q_extra_data = kmalloc(q_extra_data_size, GFP_KERNEL); 1015 if (!q_extra_data) { 1016 ret = -ENOMEM; 1017 goto exit; 1018 } 1019 1020 ret = copy_from_user(q_extra_data, user_priv_ptr + *priv_data_offset, q_extra_data_size); 1021 if (ret) { 1022 ret = -EFAULT; 1023 goto exit; 1024 } 1025 1026 *priv_data_offset += q_extra_data_size; 1027 1028 pdd = kfd_process_device_data_by_id(p, q_data->gpu_id); 1029 if (!pdd) { 1030 pr_err("Failed to get pdd\n"); 1031 ret = -EINVAL; 1032 goto exit; 1033 } 1034 1035 /* 1036 * data stored in this order: 1037 * mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]... 1038 */ 1039 mqd = q_extra_data; 1040 ctl_stack = mqd + q_data->mqd_size; 1041 1042 memset(&qp, 0, sizeof(qp)); 1043 set_queue_properties_from_criu(&qp, q_data, NUM_XCC(pdd->dev->adev->gfx.xcc_mask)); 1044 1045 print_queue_properties(&qp); 1046 1047 ret = pqm_create_queue(&p->pqm, pdd->dev, &qp, &queue_id, q_data, mqd, ctl_stack, NULL); 1048 if (ret) { 1049 pr_err("Failed to create new queue err:%d\n", ret); 1050 goto exit; 1051 } 1052 1053 if (q_data->gws) 1054 ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws); 1055 1056 exit: 1057 if (ret) 1058 pr_err("Failed to restore queue (%d)\n", ret); 1059 else 1060 pr_debug("Queue id %d was restored successfully\n", queue_id); 1061 1062 kfree(q_data); 1063 kfree(q_extra_data); 1064 1065 return ret; 1066 } 1067 1068 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm, 1069 unsigned int qid, 1070 uint32_t *mqd_size, 1071 uint32_t *ctl_stack_size) 1072 { 1073 struct process_queue_node *pqn; 1074 int ret; 1075 1076 pqn = get_queue_by_qid(pqm, qid); 1077 if (!pqn) { 1078 pr_debug("amdkfd: No queue %d exists for operation\n", qid); 1079 return -EFAULT; 1080 } 1081 1082 if (!pqn->q->device->dqm->ops.get_queue_checkpoint_info) { 1083 pr_err("amdkfd: queue dumping not supported on this device\n"); 1084 return -EOPNOTSUPP; 1085 } 1086 1087 ret = pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm, 1088 pqn->q, mqd_size, 1089 ctl_stack_size); 1090 if (ret) { 1091 pr_debug("amdkfd: Overflow while computing stack size for queue %d\n", qid); 1092 return ret; 1093 } 1094 1095 return 0; 1096 } 1097 1098 #if defined(CONFIG_DEBUG_FS) 1099 1100 int pqm_debugfs_mqds(struct seq_file *m, void *data) 1101 { 1102 struct process_queue_manager *pqm = data; 1103 struct process_queue_node *pqn; 1104 struct queue *q; 1105 enum KFD_MQD_TYPE mqd_type; 1106 struct mqd_manager *mqd_mgr; 1107 int r = 0, xcc, num_xccs = 1; 1108 void *mqd; 1109 uint64_t size = 0; 1110 1111 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { 1112 if (pqn->q) { 1113 q = pqn->q; 1114 switch (q->properties.type) { 1115 case KFD_QUEUE_TYPE_SDMA: 1116 case KFD_QUEUE_TYPE_SDMA_XGMI: 1117 seq_printf(m, " SDMA queue on device %x\n", 1118 q->device->id); 1119 mqd_type = KFD_MQD_TYPE_SDMA; 1120 break; 1121 case KFD_QUEUE_TYPE_COMPUTE: 1122 seq_printf(m, " Compute queue on device %x\n", 1123 q->device->id); 1124 mqd_type = KFD_MQD_TYPE_CP; 1125 num_xccs = NUM_XCC(q->device->xcc_mask); 1126 break; 1127 default: 1128 seq_printf(m, 1129 " Queue node with bad user queue type %d on device %x\n", 1130 q->properties.type, q->device->id); 1131 continue; 1132 } 1133 mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type]; 1134 size = mqd_mgr->mqd_stride(mqd_mgr, 1135 &q->properties); 1136 } 1137 1138 for (xcc = 0; xcc < num_xccs; xcc++) { 1139 mqd = q->mqd + size * xcc; 1140 r = mqd_mgr->debugfs_show_mqd(m, mqd); 1141 if (r != 0) 1142 break; 1143 } 1144 } 1145 1146 return r; 1147 } 1148 1149 #endif 1150