1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/mutex.h> 25 #include <linux/log2.h> 26 #include <linux/sched.h> 27 #include <linux/sched/mm.h> 28 #include <linux/sched/task.h> 29 #include <linux/mmu_context.h> 30 #include <linux/slab.h> 31 #include <linux/notifier.h> 32 #include <linux/compat.h> 33 #include <linux/mman.h> 34 #include <linux/file.h> 35 #include <linux/pm_runtime.h> 36 #include "amdgpu_amdkfd.h" 37 #include "amdgpu.h" 38 39 struct mm_struct; 40 41 #include "kfd_priv.h" 42 #include "kfd_device_queue_manager.h" 43 #include "kfd_svm.h" 44 #include "kfd_smi_events.h" 45 #include "kfd_debug.h" 46 47 /* 48 * List of struct kfd_process (field kfd_process). 49 * Unique/indexed by mm_struct* 50 */ 51 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 52 DEFINE_MUTEX(kfd_processes_mutex); 53 54 DEFINE_SRCU(kfd_processes_srcu); 55 56 /* For process termination handling */ 57 static struct workqueue_struct *kfd_process_wq; 58 59 /* Ordered, single-threaded workqueue for restoring evicted 60 * processes. Restoring multiple processes concurrently under memory 61 * pressure can lead to processes blocking each other from validating 62 * their BOs and result in a live-lock situation where processes 63 * remain evicted indefinitely. 64 */ 65 static struct workqueue_struct *kfd_restore_wq; 66 67 static struct kfd_process *find_process(const struct task_struct *thread, 68 bool ref); 69 static void kfd_process_ref_release(struct kref *ref); 70 static struct kfd_process *create_process(const struct task_struct *thread); 71 72 static void evict_process_worker(struct work_struct *work); 73 static void restore_process_worker(struct work_struct *work); 74 75 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd); 76 77 struct kfd_procfs_tree { 78 struct kobject *kobj; 79 }; 80 81 static struct kfd_procfs_tree procfs; 82 83 /* 84 * Structure for SDMA activity tracking 85 */ 86 struct kfd_sdma_activity_handler_workarea { 87 struct work_struct sdma_activity_work; 88 struct kfd_process_device *pdd; 89 uint64_t sdma_activity_counter; 90 }; 91 92 struct temp_sdma_queue_list { 93 uint64_t __user *rptr; 94 uint64_t sdma_val; 95 unsigned int queue_id; 96 struct list_head list; 97 }; 98 99 static void kfd_sdma_activity_worker(struct work_struct *work) 100 { 101 struct kfd_sdma_activity_handler_workarea *workarea; 102 struct kfd_process_device *pdd; 103 uint64_t val; 104 struct mm_struct *mm; 105 struct queue *q; 106 struct qcm_process_device *qpd; 107 struct device_queue_manager *dqm; 108 int ret = 0; 109 struct temp_sdma_queue_list sdma_q_list; 110 struct temp_sdma_queue_list *sdma_q, *next; 111 112 workarea = container_of(work, struct kfd_sdma_activity_handler_workarea, 113 sdma_activity_work); 114 115 pdd = workarea->pdd; 116 if (!pdd) 117 return; 118 dqm = pdd->dev->dqm; 119 qpd = &pdd->qpd; 120 if (!dqm || !qpd) 121 return; 122 /* 123 * Total SDMA activity is current SDMA activity + past SDMA activity 124 * Past SDMA count is stored in pdd. 125 * To get the current activity counters for all active SDMA queues, 126 * we loop over all SDMA queues and get their counts from user-space. 127 * 128 * We cannot call get_user() with dqm_lock held as it can cause 129 * a circular lock dependency situation. To read the SDMA stats, 130 * we need to do the following: 131 * 132 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list, 133 * with dqm_lock/dqm_unlock(). 134 * 2. Call get_user() for each node in temporary list without dqm_lock. 135 * Save the SDMA count for each node and also add the count to the total 136 * SDMA count counter. 137 * Its possible, during this step, a few SDMA queue nodes got deleted 138 * from the qpd->queues_list. 139 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted. 140 * If any node got deleted, its SDMA count would be captured in the sdma 141 * past activity counter. So subtract the SDMA counter stored in step 2 142 * for this node from the total SDMA count. 143 */ 144 INIT_LIST_HEAD(&sdma_q_list.list); 145 146 /* 147 * Create the temp list of all SDMA queues 148 */ 149 dqm_lock(dqm); 150 151 list_for_each_entry(q, &qpd->queues_list, list) { 152 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) && 153 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI)) 154 continue; 155 156 sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL); 157 if (!sdma_q) { 158 dqm_unlock(dqm); 159 goto cleanup; 160 } 161 162 INIT_LIST_HEAD(&sdma_q->list); 163 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr; 164 sdma_q->queue_id = q->properties.queue_id; 165 list_add_tail(&sdma_q->list, &sdma_q_list.list); 166 } 167 168 /* 169 * If the temp list is empty, then no SDMA queues nodes were found in 170 * qpd->queues_list. Return the past activity count as the total sdma 171 * count 172 */ 173 if (list_empty(&sdma_q_list.list)) { 174 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter; 175 dqm_unlock(dqm); 176 return; 177 } 178 179 dqm_unlock(dqm); 180 181 /* 182 * Get the usage count for each SDMA queue in temp_list. 183 */ 184 mm = get_task_mm(pdd->process->lead_thread); 185 if (!mm) 186 goto cleanup; 187 188 kthread_use_mm(mm); 189 190 list_for_each_entry(sdma_q, &sdma_q_list.list, list) { 191 val = 0; 192 ret = read_sdma_queue_counter(sdma_q->rptr, &val); 193 if (ret) { 194 pr_debug("Failed to read SDMA queue active counter for queue id: %d", 195 sdma_q->queue_id); 196 } else { 197 sdma_q->sdma_val = val; 198 workarea->sdma_activity_counter += val; 199 } 200 } 201 202 kthread_unuse_mm(mm); 203 mmput(mm); 204 205 /* 206 * Do a second iteration over qpd_queues_list to check if any SDMA 207 * nodes got deleted while fetching SDMA counter. 208 */ 209 dqm_lock(dqm); 210 211 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter; 212 213 list_for_each_entry(q, &qpd->queues_list, list) { 214 if (list_empty(&sdma_q_list.list)) 215 break; 216 217 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) && 218 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI)) 219 continue; 220 221 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) { 222 if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) && 223 (sdma_q->queue_id == q->properties.queue_id)) { 224 list_del(&sdma_q->list); 225 kfree(sdma_q); 226 break; 227 } 228 } 229 } 230 231 dqm_unlock(dqm); 232 233 /* 234 * If temp list is not empty, it implies some queues got deleted 235 * from qpd->queues_list during SDMA usage read. Subtract the SDMA 236 * count for each node from the total SDMA count. 237 */ 238 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) { 239 workarea->sdma_activity_counter -= sdma_q->sdma_val; 240 list_del(&sdma_q->list); 241 kfree(sdma_q); 242 } 243 244 return; 245 246 cleanup: 247 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) { 248 list_del(&sdma_q->list); 249 kfree(sdma_q); 250 } 251 } 252 253 /** 254 * kfd_get_cu_occupancy - Collect number of waves in-flight on this device 255 * by current process. Translates acquired wave count into number of compute units 256 * that are occupied. 257 * 258 * @attr: Handle of attribute that allows reporting of wave count. The attribute 259 * handle encapsulates GPU device it is associated with, thereby allowing collection 260 * of waves in flight, etc 261 * @buffer: Handle of user provided buffer updated with wave count 262 * 263 * Return: Number of bytes written to user buffer or an error value 264 */ 265 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) 266 { 267 int cu_cnt; 268 int wave_cnt; 269 int max_waves_per_cu; 270 struct kfd_node *dev = NULL; 271 struct kfd_process *proc = NULL; 272 struct kfd_process_device *pdd = NULL; 273 274 pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy); 275 dev = pdd->dev; 276 if (dev->kfd2kgd->get_cu_occupancy == NULL) 277 return -EINVAL; 278 279 cu_cnt = 0; 280 proc = pdd->process; 281 if (pdd->qpd.queue_count == 0) { 282 pr_debug("Gpu-Id: %d has no active queues for process %d\n", 283 dev->id, proc->pasid); 284 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt); 285 } 286 287 /* Collect wave count from device if it supports */ 288 wave_cnt = 0; 289 max_waves_per_cu = 0; 290 dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt, 291 &max_waves_per_cu, 0); 292 293 /* Translate wave count to number of compute units */ 294 cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu; 295 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt); 296 } 297 298 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, 299 char *buffer) 300 { 301 if (strcmp(attr->name, "pasid") == 0) { 302 struct kfd_process *p = container_of(attr, struct kfd_process, 303 attr_pasid); 304 305 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid); 306 } else if (strncmp(attr->name, "vram_", 5) == 0) { 307 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, 308 attr_vram); 309 return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage)); 310 } else if (strncmp(attr->name, "sdma_", 5) == 0) { 311 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, 312 attr_sdma); 313 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler; 314 315 INIT_WORK(&sdma_activity_work_handler.sdma_activity_work, 316 kfd_sdma_activity_worker); 317 318 sdma_activity_work_handler.pdd = pdd; 319 sdma_activity_work_handler.sdma_activity_counter = 0; 320 321 schedule_work(&sdma_activity_work_handler.sdma_activity_work); 322 323 flush_work(&sdma_activity_work_handler.sdma_activity_work); 324 325 return snprintf(buffer, PAGE_SIZE, "%llu\n", 326 (sdma_activity_work_handler.sdma_activity_counter)/ 327 SDMA_ACTIVITY_DIVISOR); 328 } else { 329 pr_err("Invalid attribute"); 330 return -EINVAL; 331 } 332 333 return 0; 334 } 335 336 static void kfd_procfs_kobj_release(struct kobject *kobj) 337 { 338 kfree(kobj); 339 } 340 341 static const struct sysfs_ops kfd_procfs_ops = { 342 .show = kfd_procfs_show, 343 }; 344 345 static const struct kobj_type procfs_type = { 346 .release = kfd_procfs_kobj_release, 347 .sysfs_ops = &kfd_procfs_ops, 348 }; 349 350 void kfd_procfs_init(void) 351 { 352 int ret = 0; 353 354 procfs.kobj = kfd_alloc_struct(procfs.kobj); 355 if (!procfs.kobj) 356 return; 357 358 ret = kobject_init_and_add(procfs.kobj, &procfs_type, 359 &kfd_device->kobj, "proc"); 360 if (ret) { 361 pr_warn("Could not create procfs proc folder"); 362 /* If we fail to create the procfs, clean up */ 363 kfd_procfs_shutdown(); 364 } 365 } 366 367 void kfd_procfs_shutdown(void) 368 { 369 if (procfs.kobj) { 370 kobject_del(procfs.kobj); 371 kobject_put(procfs.kobj); 372 procfs.kobj = NULL; 373 } 374 } 375 376 static ssize_t kfd_procfs_queue_show(struct kobject *kobj, 377 struct attribute *attr, char *buffer) 378 { 379 struct queue *q = container_of(kobj, struct queue, kobj); 380 381 if (!strcmp(attr->name, "size")) 382 return snprintf(buffer, PAGE_SIZE, "%llu", 383 q->properties.queue_size); 384 else if (!strcmp(attr->name, "type")) 385 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type); 386 else if (!strcmp(attr->name, "gpuid")) 387 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id); 388 else 389 pr_err("Invalid attribute"); 390 391 return 0; 392 } 393 394 static ssize_t kfd_procfs_stats_show(struct kobject *kobj, 395 struct attribute *attr, char *buffer) 396 { 397 if (strcmp(attr->name, "evicted_ms") == 0) { 398 struct kfd_process_device *pdd = container_of(attr, 399 struct kfd_process_device, 400 attr_evict); 401 uint64_t evict_jiffies; 402 403 evict_jiffies = atomic64_read(&pdd->evict_duration_counter); 404 405 return snprintf(buffer, 406 PAGE_SIZE, 407 "%llu\n", 408 jiffies64_to_msecs(evict_jiffies)); 409 410 /* Sysfs handle that gets CU occupancy is per device */ 411 } else if (strcmp(attr->name, "cu_occupancy") == 0) { 412 return kfd_get_cu_occupancy(attr, buffer); 413 } else { 414 pr_err("Invalid attribute"); 415 } 416 417 return 0; 418 } 419 420 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj, 421 struct attribute *attr, char *buf) 422 { 423 struct kfd_process_device *pdd; 424 425 if (!strcmp(attr->name, "faults")) { 426 pdd = container_of(attr, struct kfd_process_device, 427 attr_faults); 428 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults)); 429 } 430 if (!strcmp(attr->name, "page_in")) { 431 pdd = container_of(attr, struct kfd_process_device, 432 attr_page_in); 433 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in)); 434 } 435 if (!strcmp(attr->name, "page_out")) { 436 pdd = container_of(attr, struct kfd_process_device, 437 attr_page_out); 438 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out)); 439 } 440 return 0; 441 } 442 443 static struct attribute attr_queue_size = { 444 .name = "size", 445 .mode = KFD_SYSFS_FILE_MODE 446 }; 447 448 static struct attribute attr_queue_type = { 449 .name = "type", 450 .mode = KFD_SYSFS_FILE_MODE 451 }; 452 453 static struct attribute attr_queue_gpuid = { 454 .name = "gpuid", 455 .mode = KFD_SYSFS_FILE_MODE 456 }; 457 458 static struct attribute *procfs_queue_attrs[] = { 459 &attr_queue_size, 460 &attr_queue_type, 461 &attr_queue_gpuid, 462 NULL 463 }; 464 ATTRIBUTE_GROUPS(procfs_queue); 465 466 static const struct sysfs_ops procfs_queue_ops = { 467 .show = kfd_procfs_queue_show, 468 }; 469 470 static const struct kobj_type procfs_queue_type = { 471 .sysfs_ops = &procfs_queue_ops, 472 .default_groups = procfs_queue_groups, 473 }; 474 475 static const struct sysfs_ops procfs_stats_ops = { 476 .show = kfd_procfs_stats_show, 477 }; 478 479 static const struct kobj_type procfs_stats_type = { 480 .sysfs_ops = &procfs_stats_ops, 481 .release = kfd_procfs_kobj_release, 482 }; 483 484 static const struct sysfs_ops sysfs_counters_ops = { 485 .show = kfd_sysfs_counters_show, 486 }; 487 488 static const struct kobj_type sysfs_counters_type = { 489 .sysfs_ops = &sysfs_counters_ops, 490 .release = kfd_procfs_kobj_release, 491 }; 492 493 int kfd_procfs_add_queue(struct queue *q) 494 { 495 struct kfd_process *proc; 496 int ret; 497 498 if (!q || !q->process) 499 return -EINVAL; 500 proc = q->process; 501 502 /* Create proc/<pid>/queues/<queue id> folder */ 503 if (!proc->kobj_queues) 504 return -EFAULT; 505 ret = kobject_init_and_add(&q->kobj, &procfs_queue_type, 506 proc->kobj_queues, "%u", q->properties.queue_id); 507 if (ret < 0) { 508 pr_warn("Creating proc/<pid>/queues/%u failed", 509 q->properties.queue_id); 510 kobject_put(&q->kobj); 511 return ret; 512 } 513 514 return 0; 515 } 516 517 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr, 518 char *name) 519 { 520 int ret; 521 522 if (!kobj || !attr || !name) 523 return; 524 525 attr->name = name; 526 attr->mode = KFD_SYSFS_FILE_MODE; 527 sysfs_attr_init(attr); 528 529 ret = sysfs_create_file(kobj, attr); 530 if (ret) 531 pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret); 532 } 533 534 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p) 535 { 536 int ret; 537 int i; 538 char stats_dir_filename[MAX_SYSFS_FILENAME_LEN]; 539 540 if (!p || !p->kobj) 541 return; 542 543 /* 544 * Create sysfs files for each GPU: 545 * - proc/<pid>/stats_<gpuid>/ 546 * - proc/<pid>/stats_<gpuid>/evicted_ms 547 * - proc/<pid>/stats_<gpuid>/cu_occupancy 548 */ 549 for (i = 0; i < p->n_pdds; i++) { 550 struct kfd_process_device *pdd = p->pdds[i]; 551 552 snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN, 553 "stats_%u", pdd->dev->id); 554 pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats); 555 if (!pdd->kobj_stats) 556 return; 557 558 ret = kobject_init_and_add(pdd->kobj_stats, 559 &procfs_stats_type, 560 p->kobj, 561 stats_dir_filename); 562 563 if (ret) { 564 pr_warn("Creating KFD proc/stats_%s folder failed", 565 stats_dir_filename); 566 kobject_put(pdd->kobj_stats); 567 pdd->kobj_stats = NULL; 568 return; 569 } 570 571 kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict, 572 "evicted_ms"); 573 /* Add sysfs file to report compute unit occupancy */ 574 if (pdd->dev->kfd2kgd->get_cu_occupancy) 575 kfd_sysfs_create_file(pdd->kobj_stats, 576 &pdd->attr_cu_occupancy, 577 "cu_occupancy"); 578 } 579 } 580 581 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p) 582 { 583 int ret = 0; 584 int i; 585 char counters_dir_filename[MAX_SYSFS_FILENAME_LEN]; 586 587 if (!p || !p->kobj) 588 return; 589 590 /* 591 * Create sysfs files for each GPU which supports SVM 592 * - proc/<pid>/counters_<gpuid>/ 593 * - proc/<pid>/counters_<gpuid>/faults 594 * - proc/<pid>/counters_<gpuid>/page_in 595 * - proc/<pid>/counters_<gpuid>/page_out 596 */ 597 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) { 598 struct kfd_process_device *pdd = p->pdds[i]; 599 struct kobject *kobj_counters; 600 601 snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN, 602 "counters_%u", pdd->dev->id); 603 kobj_counters = kfd_alloc_struct(kobj_counters); 604 if (!kobj_counters) 605 return; 606 607 ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type, 608 p->kobj, counters_dir_filename); 609 if (ret) { 610 pr_warn("Creating KFD proc/%s folder failed", 611 counters_dir_filename); 612 kobject_put(kobj_counters); 613 return; 614 } 615 616 pdd->kobj_counters = kobj_counters; 617 kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults, 618 "faults"); 619 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in, 620 "page_in"); 621 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out, 622 "page_out"); 623 } 624 } 625 626 static void kfd_procfs_add_sysfs_files(struct kfd_process *p) 627 { 628 int i; 629 630 if (!p || !p->kobj) 631 return; 632 633 /* 634 * Create sysfs files for each GPU: 635 * - proc/<pid>/vram_<gpuid> 636 * - proc/<pid>/sdma_<gpuid> 637 */ 638 for (i = 0; i < p->n_pdds; i++) { 639 struct kfd_process_device *pdd = p->pdds[i]; 640 641 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u", 642 pdd->dev->id); 643 kfd_sysfs_create_file(p->kobj, &pdd->attr_vram, 644 pdd->vram_filename); 645 646 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u", 647 pdd->dev->id); 648 kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma, 649 pdd->sdma_filename); 650 } 651 } 652 653 void kfd_procfs_del_queue(struct queue *q) 654 { 655 if (!q) 656 return; 657 658 kobject_del(&q->kobj); 659 kobject_put(&q->kobj); 660 } 661 662 int kfd_process_create_wq(void) 663 { 664 if (!kfd_process_wq) 665 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0); 666 if (!kfd_restore_wq) 667 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 668 WQ_FREEZABLE); 669 670 if (!kfd_process_wq || !kfd_restore_wq) { 671 kfd_process_destroy_wq(); 672 return -ENOMEM; 673 } 674 675 return 0; 676 } 677 678 void kfd_process_destroy_wq(void) 679 { 680 if (kfd_process_wq) { 681 destroy_workqueue(kfd_process_wq); 682 kfd_process_wq = NULL; 683 } 684 if (kfd_restore_wq) { 685 destroy_workqueue(kfd_restore_wq); 686 kfd_restore_wq = NULL; 687 } 688 } 689 690 static void kfd_process_free_gpuvm(struct kgd_mem *mem, 691 struct kfd_process_device *pdd, void **kptr) 692 { 693 struct kfd_node *dev = pdd->dev; 694 695 if (kptr && *kptr) { 696 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem); 697 *kptr = NULL; 698 } 699 700 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv); 701 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv, 702 NULL); 703 } 704 705 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process 706 * This function should be only called right after the process 707 * is created and when kfd_processes_mutex is still being held 708 * to avoid concurrency. Because of that exclusiveness, we do 709 * not need to take p->mutex. 710 */ 711 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, 712 uint64_t gpu_va, uint32_t size, 713 uint32_t flags, struct kgd_mem **mem, void **kptr) 714 { 715 struct kfd_node *kdev = pdd->dev; 716 int err; 717 718 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size, 719 pdd->drm_priv, mem, NULL, 720 flags, false); 721 if (err) 722 goto err_alloc_mem; 723 724 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem, 725 pdd->drm_priv); 726 if (err) 727 goto err_map_mem; 728 729 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true); 730 if (err) { 731 pr_debug("Sync memory failed, wait interrupted by user signal\n"); 732 goto sync_memory_failed; 733 } 734 735 if (kptr) { 736 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel( 737 (struct kgd_mem *)*mem, kptr, NULL); 738 if (err) { 739 pr_debug("Map GTT BO to kernel failed\n"); 740 goto sync_memory_failed; 741 } 742 } 743 744 return err; 745 746 sync_memory_failed: 747 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv); 748 749 err_map_mem: 750 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv, 751 NULL); 752 err_alloc_mem: 753 *mem = NULL; 754 *kptr = NULL; 755 return err; 756 } 757 758 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the 759 * process for IB usage The memory reserved is for KFD to submit 760 * IB to AMDGPU from kernel. If the memory is reserved 761 * successfully, ib_kaddr will have the CPU/kernel 762 * address. Check ib_kaddr before accessing the memory. 763 */ 764 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) 765 { 766 struct qcm_process_device *qpd = &pdd->qpd; 767 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT | 768 KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE | 769 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE | 770 KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 771 struct kgd_mem *mem; 772 void *kaddr; 773 int ret; 774 775 if (qpd->ib_kaddr || !qpd->ib_base) 776 return 0; 777 778 /* ib_base is only set for dGPU */ 779 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags, 780 &mem, &kaddr); 781 if (ret) 782 return ret; 783 784 qpd->ib_mem = mem; 785 qpd->ib_kaddr = kaddr; 786 787 return 0; 788 } 789 790 static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd) 791 { 792 struct qcm_process_device *qpd = &pdd->qpd; 793 794 if (!qpd->ib_kaddr || !qpd->ib_base) 795 return; 796 797 kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr); 798 } 799 800 struct kfd_process *kfd_create_process(struct task_struct *thread) 801 { 802 struct kfd_process *process; 803 int ret; 804 805 if (!(thread->mm && mmget_not_zero(thread->mm))) 806 return ERR_PTR(-EINVAL); 807 808 /* Only the pthreads threading model is supported. */ 809 if (thread->group_leader->mm != thread->mm) { 810 mmput(thread->mm); 811 return ERR_PTR(-EINVAL); 812 } 813 814 /* 815 * take kfd processes mutex before starting of process creation 816 * so there won't be a case where two threads of the same process 817 * create two kfd_process structures 818 */ 819 mutex_lock(&kfd_processes_mutex); 820 821 if (kfd_is_locked()) { 822 pr_debug("KFD is locked! Cannot create process"); 823 process = ERR_PTR(-EINVAL); 824 goto out; 825 } 826 827 /* A prior open of /dev/kfd could have already created the process. */ 828 process = find_process(thread, false); 829 if (process) { 830 pr_debug("Process already found\n"); 831 } else { 832 /* If the process just called exec(3), it is possible that the 833 * cleanup of the kfd_process (following the release of the mm 834 * of the old process image) is still in the cleanup work queue. 835 * Make sure to drain any job before trying to recreate any 836 * resource for this process. 837 */ 838 flush_workqueue(kfd_process_wq); 839 840 process = create_process(thread); 841 if (IS_ERR(process)) 842 goto out; 843 844 if (!procfs.kobj) 845 goto out; 846 847 process->kobj = kfd_alloc_struct(process->kobj); 848 if (!process->kobj) { 849 pr_warn("Creating procfs kobject failed"); 850 goto out; 851 } 852 ret = kobject_init_and_add(process->kobj, &procfs_type, 853 procfs.kobj, "%d", 854 (int)process->lead_thread->pid); 855 if (ret) { 856 pr_warn("Creating procfs pid directory failed"); 857 kobject_put(process->kobj); 858 goto out; 859 } 860 861 kfd_sysfs_create_file(process->kobj, &process->attr_pasid, 862 "pasid"); 863 864 process->kobj_queues = kobject_create_and_add("queues", 865 process->kobj); 866 if (!process->kobj_queues) 867 pr_warn("Creating KFD proc/queues folder failed"); 868 869 kfd_procfs_add_sysfs_stats(process); 870 kfd_procfs_add_sysfs_files(process); 871 kfd_procfs_add_sysfs_counters(process); 872 873 init_waitqueue_head(&process->wait_irq_drain); 874 } 875 out: 876 if (!IS_ERR(process)) 877 kref_get(&process->ref); 878 mutex_unlock(&kfd_processes_mutex); 879 mmput(thread->mm); 880 881 return process; 882 } 883 884 struct kfd_process *kfd_get_process(const struct task_struct *thread) 885 { 886 struct kfd_process *process; 887 888 if (!thread->mm) 889 return ERR_PTR(-EINVAL); 890 891 /* Only the pthreads threading model is supported. */ 892 if (thread->group_leader->mm != thread->mm) 893 return ERR_PTR(-EINVAL); 894 895 process = find_process(thread, false); 896 if (!process) 897 return ERR_PTR(-EINVAL); 898 899 return process; 900 } 901 902 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm) 903 { 904 struct kfd_process *process; 905 906 hash_for_each_possible_rcu(kfd_processes_table, process, 907 kfd_processes, (uintptr_t)mm) 908 if (process->mm == mm) 909 return process; 910 911 return NULL; 912 } 913 914 static struct kfd_process *find_process(const struct task_struct *thread, 915 bool ref) 916 { 917 struct kfd_process *p; 918 int idx; 919 920 idx = srcu_read_lock(&kfd_processes_srcu); 921 p = find_process_by_mm(thread->mm); 922 if (p && ref) 923 kref_get(&p->ref); 924 srcu_read_unlock(&kfd_processes_srcu, idx); 925 926 return p; 927 } 928 929 void kfd_unref_process(struct kfd_process *p) 930 { 931 kref_put(&p->ref, kfd_process_ref_release); 932 } 933 934 /* This increments the process->ref counter. */ 935 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid) 936 { 937 struct task_struct *task = NULL; 938 struct kfd_process *p = NULL; 939 940 if (!pid) { 941 task = current; 942 get_task_struct(task); 943 } else { 944 task = get_pid_task(pid, PIDTYPE_PID); 945 } 946 947 if (task) { 948 p = find_process(task, true); 949 put_task_struct(task); 950 } 951 952 return p; 953 } 954 955 static void kfd_process_device_free_bos(struct kfd_process_device *pdd) 956 { 957 struct kfd_process *p = pdd->process; 958 void *mem; 959 int id; 960 int i; 961 962 /* 963 * Remove all handles from idr and release appropriate 964 * local memory object 965 */ 966 idr_for_each_entry(&pdd->alloc_idr, mem, id) { 967 968 for (i = 0; i < p->n_pdds; i++) { 969 struct kfd_process_device *peer_pdd = p->pdds[i]; 970 971 if (!peer_pdd->drm_priv) 972 continue; 973 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 974 peer_pdd->dev->adev, mem, peer_pdd->drm_priv); 975 } 976 977 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem, 978 pdd->drm_priv, NULL); 979 kfd_process_device_remove_obj_handle(pdd, id); 980 } 981 } 982 983 /* 984 * Just kunmap and unpin signal BO here. It will be freed in 985 * kfd_process_free_outstanding_kfd_bos() 986 */ 987 static void kfd_process_kunmap_signal_bo(struct kfd_process *p) 988 { 989 struct kfd_process_device *pdd; 990 struct kfd_node *kdev; 991 void *mem; 992 993 kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle)); 994 if (!kdev) 995 return; 996 997 mutex_lock(&p->mutex); 998 999 pdd = kfd_get_process_device_data(kdev, p); 1000 if (!pdd) 1001 goto out; 1002 1003 mem = kfd_process_device_translate_handle( 1004 pdd, GET_IDR_HANDLE(p->signal_handle)); 1005 if (!mem) 1006 goto out; 1007 1008 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem); 1009 1010 out: 1011 mutex_unlock(&p->mutex); 1012 } 1013 1014 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p) 1015 { 1016 int i; 1017 1018 for (i = 0; i < p->n_pdds; i++) 1019 kfd_process_device_free_bos(p->pdds[i]); 1020 } 1021 1022 static void kfd_process_destroy_pdds(struct kfd_process *p) 1023 { 1024 int i; 1025 1026 for (i = 0; i < p->n_pdds; i++) { 1027 struct kfd_process_device *pdd = p->pdds[i]; 1028 1029 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n", 1030 pdd->dev->id, p->pasid); 1031 1032 kfd_process_device_destroy_cwsr_dgpu(pdd); 1033 kfd_process_device_destroy_ib_mem(pdd); 1034 1035 if (pdd->drm_file) { 1036 amdgpu_amdkfd_gpuvm_release_process_vm( 1037 pdd->dev->adev, pdd->drm_priv); 1038 fput(pdd->drm_file); 1039 } 1040 1041 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base) 1042 free_pages((unsigned long)pdd->qpd.cwsr_kaddr, 1043 get_order(KFD_CWSR_TBA_TMA_SIZE)); 1044 1045 idr_destroy(&pdd->alloc_idr); 1046 1047 kfd_free_process_doorbells(pdd->dev->kfd, pdd); 1048 1049 if (pdd->dev->kfd->shared_resources.enable_mes) 1050 amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev, 1051 pdd->proc_ctx_bo); 1052 /* 1053 * before destroying pdd, make sure to report availability 1054 * for auto suspend 1055 */ 1056 if (pdd->runtime_inuse) { 1057 pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev); 1058 pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev); 1059 pdd->runtime_inuse = false; 1060 } 1061 1062 kfree(pdd); 1063 p->pdds[i] = NULL; 1064 } 1065 p->n_pdds = 0; 1066 } 1067 1068 static void kfd_process_remove_sysfs(struct kfd_process *p) 1069 { 1070 struct kfd_process_device *pdd; 1071 int i; 1072 1073 if (!p->kobj) 1074 return; 1075 1076 sysfs_remove_file(p->kobj, &p->attr_pasid); 1077 kobject_del(p->kobj_queues); 1078 kobject_put(p->kobj_queues); 1079 p->kobj_queues = NULL; 1080 1081 for (i = 0; i < p->n_pdds; i++) { 1082 pdd = p->pdds[i]; 1083 1084 sysfs_remove_file(p->kobj, &pdd->attr_vram); 1085 sysfs_remove_file(p->kobj, &pdd->attr_sdma); 1086 1087 sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict); 1088 if (pdd->dev->kfd2kgd->get_cu_occupancy) 1089 sysfs_remove_file(pdd->kobj_stats, 1090 &pdd->attr_cu_occupancy); 1091 kobject_del(pdd->kobj_stats); 1092 kobject_put(pdd->kobj_stats); 1093 pdd->kobj_stats = NULL; 1094 } 1095 1096 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) { 1097 pdd = p->pdds[i]; 1098 1099 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults); 1100 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in); 1101 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out); 1102 kobject_del(pdd->kobj_counters); 1103 kobject_put(pdd->kobj_counters); 1104 pdd->kobj_counters = NULL; 1105 } 1106 1107 kobject_del(p->kobj); 1108 kobject_put(p->kobj); 1109 p->kobj = NULL; 1110 } 1111 1112 /* No process locking is needed in this function, because the process 1113 * is not findable any more. We must assume that no other thread is 1114 * using it any more, otherwise we couldn't safely free the process 1115 * structure in the end. 1116 */ 1117 static void kfd_process_wq_release(struct work_struct *work) 1118 { 1119 struct kfd_process *p = container_of(work, struct kfd_process, 1120 release_work); 1121 struct dma_fence *ef; 1122 1123 kfd_process_dequeue_from_all_devices(p); 1124 pqm_uninit(&p->pqm); 1125 1126 /* Signal the eviction fence after user mode queues are 1127 * destroyed. This allows any BOs to be freed without 1128 * triggering pointless evictions or waiting for fences. 1129 */ 1130 synchronize_rcu(); 1131 ef = rcu_access_pointer(p->ef); 1132 dma_fence_signal(ef); 1133 1134 kfd_process_remove_sysfs(p); 1135 1136 kfd_process_kunmap_signal_bo(p); 1137 kfd_process_free_outstanding_kfd_bos(p); 1138 svm_range_list_fini(p); 1139 1140 kfd_process_destroy_pdds(p); 1141 dma_fence_put(ef); 1142 1143 kfd_event_free_process(p); 1144 1145 kfd_pasid_free(p->pasid); 1146 mutex_destroy(&p->mutex); 1147 1148 put_task_struct(p->lead_thread); 1149 1150 kfree(p); 1151 } 1152 1153 static void kfd_process_ref_release(struct kref *ref) 1154 { 1155 struct kfd_process *p = container_of(ref, struct kfd_process, ref); 1156 1157 INIT_WORK(&p->release_work, kfd_process_wq_release); 1158 queue_work(kfd_process_wq, &p->release_work); 1159 } 1160 1161 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm) 1162 { 1163 int idx = srcu_read_lock(&kfd_processes_srcu); 1164 struct kfd_process *p = find_process_by_mm(mm); 1165 1166 srcu_read_unlock(&kfd_processes_srcu, idx); 1167 1168 return p ? &p->mmu_notifier : ERR_PTR(-ESRCH); 1169 } 1170 1171 static void kfd_process_free_notifier(struct mmu_notifier *mn) 1172 { 1173 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier)); 1174 } 1175 1176 static void kfd_process_notifier_release_internal(struct kfd_process *p) 1177 { 1178 int i; 1179 1180 cancel_delayed_work_sync(&p->eviction_work); 1181 cancel_delayed_work_sync(&p->restore_work); 1182 1183 for (i = 0; i < p->n_pdds; i++) { 1184 struct kfd_process_device *pdd = p->pdds[i]; 1185 1186 /* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */ 1187 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup) 1188 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 1189 } 1190 1191 /* Indicate to other users that MM is no longer valid */ 1192 p->mm = NULL; 1193 kfd_dbg_trap_disable(p); 1194 1195 if (atomic_read(&p->debugged_process_count) > 0) { 1196 struct kfd_process *target; 1197 unsigned int temp; 1198 int idx = srcu_read_lock(&kfd_processes_srcu); 1199 1200 hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) { 1201 if (target->debugger_process && target->debugger_process == p) { 1202 mutex_lock_nested(&target->mutex, 1); 1203 kfd_dbg_trap_disable(target); 1204 mutex_unlock(&target->mutex); 1205 if (atomic_read(&p->debugged_process_count) == 0) 1206 break; 1207 } 1208 } 1209 1210 srcu_read_unlock(&kfd_processes_srcu, idx); 1211 } 1212 1213 mmu_notifier_put(&p->mmu_notifier); 1214 } 1215 1216 static void kfd_process_notifier_release(struct mmu_notifier *mn, 1217 struct mm_struct *mm) 1218 { 1219 struct kfd_process *p; 1220 1221 /* 1222 * The kfd_process structure can not be free because the 1223 * mmu_notifier srcu is read locked 1224 */ 1225 p = container_of(mn, struct kfd_process, mmu_notifier); 1226 if (WARN_ON(p->mm != mm)) 1227 return; 1228 1229 mutex_lock(&kfd_processes_mutex); 1230 /* 1231 * Do early return if table is empty. 1232 * 1233 * This could potentially happen if this function is called concurrently 1234 * by mmu_notifier and by kfd_cleanup_pocesses. 1235 * 1236 */ 1237 if (hash_empty(kfd_processes_table)) { 1238 mutex_unlock(&kfd_processes_mutex); 1239 return; 1240 } 1241 hash_del_rcu(&p->kfd_processes); 1242 mutex_unlock(&kfd_processes_mutex); 1243 synchronize_srcu(&kfd_processes_srcu); 1244 1245 kfd_process_notifier_release_internal(p); 1246 } 1247 1248 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = { 1249 .release = kfd_process_notifier_release, 1250 .alloc_notifier = kfd_process_alloc_notifier, 1251 .free_notifier = kfd_process_free_notifier, 1252 }; 1253 1254 /* 1255 * This code handles the case when driver is being unloaded before all 1256 * mm_struct are released. We need to safely free the kfd_process and 1257 * avoid race conditions with mmu_notifier that might try to free them. 1258 * 1259 */ 1260 void kfd_cleanup_processes(void) 1261 { 1262 struct kfd_process *p; 1263 struct hlist_node *p_temp; 1264 unsigned int temp; 1265 HLIST_HEAD(cleanup_list); 1266 1267 /* 1268 * Move all remaining kfd_process from the process table to a 1269 * temp list for processing. Once done, callback from mmu_notifier 1270 * release will not see the kfd_process in the table and do early return, 1271 * avoiding double free issues. 1272 */ 1273 mutex_lock(&kfd_processes_mutex); 1274 hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) { 1275 hash_del_rcu(&p->kfd_processes); 1276 synchronize_srcu(&kfd_processes_srcu); 1277 hlist_add_head(&p->kfd_processes, &cleanup_list); 1278 } 1279 mutex_unlock(&kfd_processes_mutex); 1280 1281 hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes) 1282 kfd_process_notifier_release_internal(p); 1283 1284 /* 1285 * Ensures that all outstanding free_notifier get called, triggering 1286 * the release of the kfd_process struct. 1287 */ 1288 mmu_notifier_synchronize(); 1289 } 1290 1291 int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) 1292 { 1293 unsigned long offset; 1294 int i; 1295 1296 if (p->has_cwsr) 1297 return 0; 1298 1299 for (i = 0; i < p->n_pdds; i++) { 1300 struct kfd_node *dev = p->pdds[i]->dev; 1301 struct qcm_process_device *qpd = &p->pdds[i]->qpd; 1302 1303 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) 1304 continue; 1305 1306 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id); 1307 qpd->tba_addr = (int64_t)vm_mmap(filep, 0, 1308 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC, 1309 MAP_SHARED, offset); 1310 1311 if (IS_ERR_VALUE(qpd->tba_addr)) { 1312 int err = qpd->tba_addr; 1313 1314 pr_err("Failure to set tba address. error %d.\n", err); 1315 qpd->tba_addr = 0; 1316 qpd->cwsr_kaddr = NULL; 1317 return err; 1318 } 1319 1320 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); 1321 1322 kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled); 1323 1324 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; 1325 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", 1326 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); 1327 } 1328 1329 p->has_cwsr = true; 1330 1331 return 0; 1332 } 1333 1334 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) 1335 { 1336 struct kfd_node *dev = pdd->dev; 1337 struct qcm_process_device *qpd = &pdd->qpd; 1338 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT 1339 | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE 1340 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 1341 struct kgd_mem *mem; 1342 void *kaddr; 1343 int ret; 1344 1345 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) 1346 return 0; 1347 1348 /* cwsr_base is only set for dGPU */ 1349 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base, 1350 KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr); 1351 if (ret) 1352 return ret; 1353 1354 qpd->cwsr_mem = mem; 1355 qpd->cwsr_kaddr = kaddr; 1356 qpd->tba_addr = qpd->cwsr_base; 1357 1358 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); 1359 1360 kfd_process_set_trap_debug_flag(&pdd->qpd, 1361 pdd->process->debug_trap_enabled); 1362 1363 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; 1364 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", 1365 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); 1366 1367 return 0; 1368 } 1369 1370 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd) 1371 { 1372 struct kfd_node *dev = pdd->dev; 1373 struct qcm_process_device *qpd = &pdd->qpd; 1374 1375 if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base) 1376 return; 1377 1378 kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr); 1379 } 1380 1381 void kfd_process_set_trap_handler(struct qcm_process_device *qpd, 1382 uint64_t tba_addr, 1383 uint64_t tma_addr) 1384 { 1385 if (qpd->cwsr_kaddr) { 1386 /* KFD trap handler is bound, record as second-level TBA/TMA 1387 * in first-level TMA. First-level trap will jump to second. 1388 */ 1389 uint64_t *tma = 1390 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET); 1391 tma[0] = tba_addr; 1392 tma[1] = tma_addr; 1393 } else { 1394 /* No trap handler bound, bind as first-level TBA/TMA. */ 1395 qpd->tba_addr = tba_addr; 1396 qpd->tma_addr = tma_addr; 1397 } 1398 } 1399 1400 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) 1401 { 1402 int i; 1403 1404 /* On most GFXv9 GPUs, the retry mode in the SQ must match the 1405 * boot time retry setting. Mixing processes with different 1406 * XNACK/retry settings can hang the GPU. 1407 * 1408 * Different GPUs can have different noretry settings depending 1409 * on HW bugs or limitations. We need to find at least one 1410 * XNACK mode for this process that's compatible with all GPUs. 1411 * Fortunately GPUs with retry enabled (noretry=0) can run code 1412 * built for XNACK-off. On GFXv9 it may perform slower. 1413 * 1414 * Therefore applications built for XNACK-off can always be 1415 * supported and will be our fallback if any GPU does not 1416 * support retry. 1417 */ 1418 for (i = 0; i < p->n_pdds; i++) { 1419 struct kfd_node *dev = p->pdds[i]->dev; 1420 1421 /* Only consider GFXv9 and higher GPUs. Older GPUs don't 1422 * support the SVM APIs and don't need to be considered 1423 * for the XNACK mode selection. 1424 */ 1425 if (!KFD_IS_SOC15(dev)) 1426 continue; 1427 /* Aldebaran can always support XNACK because it can support 1428 * per-process XNACK mode selection. But let the dev->noretry 1429 * setting still influence the default XNACK mode. 1430 */ 1431 if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) { 1432 if (!amdgpu_sriov_xnack_support(dev->kfd->adev)) { 1433 pr_debug("SRIOV platform xnack not supported\n"); 1434 return false; 1435 } 1436 continue; 1437 } 1438 1439 /* GFXv10 and later GPUs do not support shader preemption 1440 * during page faults. This can lead to poor QoS for queue 1441 * management and memory-manager-related preemptions or 1442 * even deadlocks. 1443 */ 1444 if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) 1445 return false; 1446 1447 if (dev->kfd->noretry) 1448 return false; 1449 } 1450 1451 return true; 1452 } 1453 1454 void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd, 1455 bool enabled) 1456 { 1457 if (qpd->cwsr_kaddr) { 1458 uint64_t *tma = 1459 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET); 1460 tma[2] = enabled; 1461 } 1462 } 1463 1464 /* 1465 * On return the kfd_process is fully operational and will be freed when the 1466 * mm is released 1467 */ 1468 static struct kfd_process *create_process(const struct task_struct *thread) 1469 { 1470 struct kfd_process *process; 1471 struct mmu_notifier *mn; 1472 int err = -ENOMEM; 1473 1474 process = kzalloc(sizeof(*process), GFP_KERNEL); 1475 if (!process) 1476 goto err_alloc_process; 1477 1478 kref_init(&process->ref); 1479 mutex_init(&process->mutex); 1480 process->mm = thread->mm; 1481 process->lead_thread = thread->group_leader; 1482 process->n_pdds = 0; 1483 process->queues_paused = false; 1484 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker); 1485 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker); 1486 process->last_restore_timestamp = get_jiffies_64(); 1487 err = kfd_event_init_process(process); 1488 if (err) 1489 goto err_event_init; 1490 process->is_32bit_user_mode = in_compat_syscall(); 1491 process->debug_trap_enabled = false; 1492 process->debugger_process = NULL; 1493 process->exception_enable_mask = 0; 1494 atomic_set(&process->debugged_process_count, 0); 1495 sema_init(&process->runtime_enable_sema, 0); 1496 1497 process->pasid = kfd_pasid_alloc(); 1498 if (process->pasid == 0) { 1499 err = -ENOSPC; 1500 goto err_alloc_pasid; 1501 } 1502 1503 err = pqm_init(&process->pqm, process); 1504 if (err != 0) 1505 goto err_process_pqm_init; 1506 1507 /* init process apertures*/ 1508 err = kfd_init_apertures(process); 1509 if (err != 0) 1510 goto err_init_apertures; 1511 1512 /* Check XNACK support after PDDs are created in kfd_init_apertures */ 1513 process->xnack_enabled = kfd_process_xnack_mode(process, false); 1514 1515 err = svm_range_list_init(process); 1516 if (err) 1517 goto err_init_svm_range_list; 1518 1519 /* alloc_notifier needs to find the process in the hash table */ 1520 hash_add_rcu(kfd_processes_table, &process->kfd_processes, 1521 (uintptr_t)process->mm); 1522 1523 /* Avoid free_notifier to start kfd_process_wq_release if 1524 * mmu_notifier_get failed because of pending signal. 1525 */ 1526 kref_get(&process->ref); 1527 1528 /* MMU notifier registration must be the last call that can fail 1529 * because after this point we cannot unwind the process creation. 1530 * After this point, mmu_notifier_put will trigger the cleanup by 1531 * dropping the last process reference in the free_notifier. 1532 */ 1533 mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm); 1534 if (IS_ERR(mn)) { 1535 err = PTR_ERR(mn); 1536 goto err_register_notifier; 1537 } 1538 BUG_ON(mn != &process->mmu_notifier); 1539 1540 kfd_unref_process(process); 1541 get_task_struct(process->lead_thread); 1542 1543 INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler); 1544 1545 return process; 1546 1547 err_register_notifier: 1548 hash_del_rcu(&process->kfd_processes); 1549 svm_range_list_fini(process); 1550 err_init_svm_range_list: 1551 kfd_process_free_outstanding_kfd_bos(process); 1552 kfd_process_destroy_pdds(process); 1553 err_init_apertures: 1554 pqm_uninit(&process->pqm); 1555 err_process_pqm_init: 1556 kfd_pasid_free(process->pasid); 1557 err_alloc_pasid: 1558 kfd_event_free_process(process); 1559 err_event_init: 1560 mutex_destroy(&process->mutex); 1561 kfree(process); 1562 err_alloc_process: 1563 return ERR_PTR(err); 1564 } 1565 1566 struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, 1567 struct kfd_process *p) 1568 { 1569 int i; 1570 1571 for (i = 0; i < p->n_pdds; i++) 1572 if (p->pdds[i]->dev == dev) 1573 return p->pdds[i]; 1574 1575 return NULL; 1576 } 1577 1578 struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, 1579 struct kfd_process *p) 1580 { 1581 struct kfd_process_device *pdd = NULL; 1582 int retval = 0; 1583 1584 if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE)) 1585 return NULL; 1586 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL); 1587 if (!pdd) 1588 return NULL; 1589 1590 pdd->dev = dev; 1591 INIT_LIST_HEAD(&pdd->qpd.queues_list); 1592 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); 1593 pdd->qpd.dqm = dev->dqm; 1594 pdd->qpd.pqm = &p->pqm; 1595 pdd->qpd.evicted = 0; 1596 pdd->qpd.mapped_gws_queue = false; 1597 pdd->process = p; 1598 pdd->bound = PDD_UNBOUND; 1599 pdd->already_dequeued = false; 1600 pdd->runtime_inuse = false; 1601 pdd->vram_usage = 0; 1602 pdd->sdma_past_activity_counter = 0; 1603 pdd->user_gpu_id = dev->id; 1604 atomic64_set(&pdd->evict_duration_counter, 0); 1605 1606 if (dev->kfd->shared_resources.enable_mes) { 1607 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, 1608 AMDGPU_MES_PROC_CTX_SIZE, 1609 &pdd->proc_ctx_bo, 1610 &pdd->proc_ctx_gpu_addr, 1611 &pdd->proc_ctx_cpu_ptr, 1612 false); 1613 if (retval) { 1614 pr_err("failed to allocate process context bo\n"); 1615 goto err_free_pdd; 1616 } 1617 memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); 1618 } 1619 1620 p->pdds[p->n_pdds++] = pdd; 1621 if (kfd_dbg_is_per_vmid_supported(pdd->dev)) 1622 pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap( 1623 pdd->dev->adev, 1624 false, 1625 0); 1626 1627 /* Init idr used for memory handle translation */ 1628 idr_init(&pdd->alloc_idr); 1629 1630 return pdd; 1631 1632 err_free_pdd: 1633 kfree(pdd); 1634 return NULL; 1635 } 1636 1637 /** 1638 * kfd_process_device_init_vm - Initialize a VM for a process-device 1639 * 1640 * @pdd: The process-device 1641 * @drm_file: Optional pointer to a DRM file descriptor 1642 * 1643 * If @drm_file is specified, it will be used to acquire the VM from 1644 * that file descriptor. If successful, the @pdd takes ownership of 1645 * the file descriptor. 1646 * 1647 * If @drm_file is NULL, a new VM is created. 1648 * 1649 * Returns 0 on success, -errno on failure. 1650 */ 1651 int kfd_process_device_init_vm(struct kfd_process_device *pdd, 1652 struct file *drm_file) 1653 { 1654 struct amdgpu_fpriv *drv_priv; 1655 struct amdgpu_vm *avm; 1656 struct kfd_process *p; 1657 struct dma_fence *ef; 1658 struct kfd_node *dev; 1659 int ret; 1660 1661 if (!drm_file) 1662 return -EINVAL; 1663 1664 if (pdd->drm_priv) 1665 return -EBUSY; 1666 1667 ret = amdgpu_file_to_fpriv(drm_file, &drv_priv); 1668 if (ret) 1669 return ret; 1670 avm = &drv_priv->vm; 1671 1672 p = pdd->process; 1673 dev = pdd->dev; 1674 1675 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm, 1676 &p->kgd_process_info, 1677 &ef); 1678 if (ret) { 1679 pr_err("Failed to create process VM object\n"); 1680 return ret; 1681 } 1682 RCU_INIT_POINTER(p->ef, ef); 1683 pdd->drm_priv = drm_file->private_data; 1684 1685 ret = kfd_process_device_reserve_ib_mem(pdd); 1686 if (ret) 1687 goto err_reserve_ib_mem; 1688 ret = kfd_process_device_init_cwsr_dgpu(pdd); 1689 if (ret) 1690 goto err_init_cwsr; 1691 1692 ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid); 1693 if (ret) 1694 goto err_set_pasid; 1695 1696 pdd->drm_file = drm_file; 1697 1698 return 0; 1699 1700 err_set_pasid: 1701 kfd_process_device_destroy_cwsr_dgpu(pdd); 1702 err_init_cwsr: 1703 kfd_process_device_destroy_ib_mem(pdd); 1704 err_reserve_ib_mem: 1705 pdd->drm_priv = NULL; 1706 amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm); 1707 1708 return ret; 1709 } 1710 1711 /* 1712 * Direct the IOMMU to bind the process (specifically the pasid->mm) 1713 * to the device. 1714 * Unbinding occurs when the process dies or the device is removed. 1715 * 1716 * Assumes that the process lock is held. 1717 */ 1718 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, 1719 struct kfd_process *p) 1720 { 1721 struct kfd_process_device *pdd; 1722 int err; 1723 1724 pdd = kfd_get_process_device_data(dev, p); 1725 if (!pdd) { 1726 pr_err("Process device data doesn't exist\n"); 1727 return ERR_PTR(-ENOMEM); 1728 } 1729 1730 if (!pdd->drm_priv) 1731 return ERR_PTR(-ENODEV); 1732 1733 /* 1734 * signal runtime-pm system to auto resume and prevent 1735 * further runtime suspend once device pdd is created until 1736 * pdd is destroyed. 1737 */ 1738 if (!pdd->runtime_inuse) { 1739 err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev); 1740 if (err < 0) { 1741 pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev); 1742 return ERR_PTR(err); 1743 } 1744 } 1745 1746 /* 1747 * make sure that runtime_usage counter is incremented just once 1748 * per pdd 1749 */ 1750 pdd->runtime_inuse = true; 1751 1752 return pdd; 1753 } 1754 1755 /* Create specific handle mapped to mem from process local memory idr 1756 * Assumes that the process lock is held. 1757 */ 1758 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 1759 void *mem) 1760 { 1761 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL); 1762 } 1763 1764 /* Translate specific handle from process local memory idr 1765 * Assumes that the process lock is held. 1766 */ 1767 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd, 1768 int handle) 1769 { 1770 if (handle < 0) 1771 return NULL; 1772 1773 return idr_find(&pdd->alloc_idr, handle); 1774 } 1775 1776 /* Remove specific handle from process local memory idr 1777 * Assumes that the process lock is held. 1778 */ 1779 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 1780 int handle) 1781 { 1782 if (handle >= 0) 1783 idr_remove(&pdd->alloc_idr, handle); 1784 } 1785 1786 /* This increments the process->ref counter. */ 1787 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid) 1788 { 1789 struct kfd_process *p, *ret_p = NULL; 1790 unsigned int temp; 1791 1792 int idx = srcu_read_lock(&kfd_processes_srcu); 1793 1794 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 1795 if (p->pasid == pasid) { 1796 kref_get(&p->ref); 1797 ret_p = p; 1798 break; 1799 } 1800 } 1801 1802 srcu_read_unlock(&kfd_processes_srcu, idx); 1803 1804 return ret_p; 1805 } 1806 1807 /* This increments the process->ref counter. */ 1808 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm) 1809 { 1810 struct kfd_process *p; 1811 1812 int idx = srcu_read_lock(&kfd_processes_srcu); 1813 1814 p = find_process_by_mm(mm); 1815 if (p) 1816 kref_get(&p->ref); 1817 1818 srcu_read_unlock(&kfd_processes_srcu, idx); 1819 1820 return p; 1821 } 1822 1823 /* kfd_process_evict_queues - Evict all user queues of a process 1824 * 1825 * Eviction is reference-counted per process-device. This means multiple 1826 * evictions from different sources can be nested safely. 1827 */ 1828 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger) 1829 { 1830 int r = 0; 1831 int i; 1832 unsigned int n_evicted = 0; 1833 1834 for (i = 0; i < p->n_pdds; i++) { 1835 struct kfd_process_device *pdd = p->pdds[i]; 1836 1837 kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid, 1838 trigger); 1839 1840 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, 1841 &pdd->qpd); 1842 /* evict return -EIO if HWS is hang or asic is resetting, in this case 1843 * we would like to set all the queues to be in evicted state to prevent 1844 * them been add back since they actually not be saved right now. 1845 */ 1846 if (r && r != -EIO) { 1847 pr_err("Failed to evict process queues\n"); 1848 goto fail; 1849 } 1850 n_evicted++; 1851 } 1852 1853 return r; 1854 1855 fail: 1856 /* To keep state consistent, roll back partial eviction by 1857 * restoring queues 1858 */ 1859 for (i = 0; i < p->n_pdds; i++) { 1860 struct kfd_process_device *pdd = p->pdds[i]; 1861 1862 if (n_evicted == 0) 1863 break; 1864 1865 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); 1866 1867 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, 1868 &pdd->qpd)) 1869 pr_err("Failed to restore queues\n"); 1870 1871 n_evicted--; 1872 } 1873 1874 return r; 1875 } 1876 1877 /* kfd_process_restore_queues - Restore all user queues of a process */ 1878 int kfd_process_restore_queues(struct kfd_process *p) 1879 { 1880 int r, ret = 0; 1881 int i; 1882 1883 for (i = 0; i < p->n_pdds; i++) { 1884 struct kfd_process_device *pdd = p->pdds[i]; 1885 1886 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); 1887 1888 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, 1889 &pdd->qpd); 1890 if (r) { 1891 pr_err("Failed to restore process queues\n"); 1892 if (!ret) 1893 ret = r; 1894 } 1895 } 1896 1897 return ret; 1898 } 1899 1900 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id) 1901 { 1902 int i; 1903 1904 for (i = 0; i < p->n_pdds; i++) 1905 if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id) 1906 return i; 1907 return -EINVAL; 1908 } 1909 1910 int 1911 kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, 1912 uint32_t *gpuid, uint32_t *gpuidx) 1913 { 1914 int i; 1915 1916 for (i = 0; i < p->n_pdds; i++) 1917 if (p->pdds[i] && p->pdds[i]->dev == node) { 1918 *gpuid = p->pdds[i]->user_gpu_id; 1919 *gpuidx = i; 1920 return 0; 1921 } 1922 return -EINVAL; 1923 } 1924 1925 static int signal_eviction_fence(struct kfd_process *p) 1926 { 1927 struct dma_fence *ef; 1928 int ret; 1929 1930 rcu_read_lock(); 1931 ef = dma_fence_get_rcu_safe(&p->ef); 1932 rcu_read_unlock(); 1933 if (!ef) 1934 return -EINVAL; 1935 1936 ret = dma_fence_signal(ef); 1937 dma_fence_put(ef); 1938 1939 return ret; 1940 } 1941 1942 static void evict_process_worker(struct work_struct *work) 1943 { 1944 int ret; 1945 struct kfd_process *p; 1946 struct delayed_work *dwork; 1947 1948 dwork = to_delayed_work(work); 1949 1950 /* Process termination destroys this worker thread. So during the 1951 * lifetime of this thread, kfd_process p will be valid 1952 */ 1953 p = container_of(dwork, struct kfd_process, eviction_work); 1954 1955 pr_debug("Started evicting pasid 0x%x\n", p->pasid); 1956 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM); 1957 if (!ret) { 1958 /* If another thread already signaled the eviction fence, 1959 * they are responsible stopping the queues and scheduling 1960 * the restore work. 1961 */ 1962 if (signal_eviction_fence(p) || 1963 mod_delayed_work(kfd_restore_wq, &p->restore_work, 1964 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS))) 1965 kfd_process_restore_queues(p); 1966 1967 pr_debug("Finished evicting pasid 0x%x\n", p->pasid); 1968 } else 1969 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid); 1970 } 1971 1972 static int restore_process_helper(struct kfd_process *p) 1973 { 1974 int ret = 0; 1975 1976 /* VMs may not have been acquired yet during debugging. */ 1977 if (p->kgd_process_info) { 1978 ret = amdgpu_amdkfd_gpuvm_restore_process_bos( 1979 p->kgd_process_info, &p->ef); 1980 if (ret) 1981 return ret; 1982 } 1983 1984 ret = kfd_process_restore_queues(p); 1985 if (!ret) 1986 pr_debug("Finished restoring pasid 0x%x\n", p->pasid); 1987 else 1988 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid); 1989 1990 return ret; 1991 } 1992 1993 static void restore_process_worker(struct work_struct *work) 1994 { 1995 struct delayed_work *dwork; 1996 struct kfd_process *p; 1997 int ret = 0; 1998 1999 dwork = to_delayed_work(work); 2000 2001 /* Process termination destroys this worker thread. So during the 2002 * lifetime of this thread, kfd_process p will be valid 2003 */ 2004 p = container_of(dwork, struct kfd_process, restore_work); 2005 pr_debug("Started restoring pasid 0x%x\n", p->pasid); 2006 2007 /* Setting last_restore_timestamp before successful restoration. 2008 * Otherwise this would have to be set by KGD (restore_process_bos) 2009 * before KFD BOs are unreserved. If not, the process can be evicted 2010 * again before the timestamp is set. 2011 * If restore fails, the timestamp will be set again in the next 2012 * attempt. This would mean that the minimum GPU quanta would be 2013 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two 2014 * functions) 2015 */ 2016 2017 p->last_restore_timestamp = get_jiffies_64(); 2018 2019 ret = restore_process_helper(p); 2020 if (ret) { 2021 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n", 2022 p->pasid, PROCESS_BACK_OFF_TIME_MS); 2023 if (mod_delayed_work(kfd_restore_wq, &p->restore_work, 2024 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS))) 2025 kfd_process_restore_queues(p); 2026 } 2027 } 2028 2029 void kfd_suspend_all_processes(void) 2030 { 2031 struct kfd_process *p; 2032 unsigned int temp; 2033 int idx = srcu_read_lock(&kfd_processes_srcu); 2034 2035 WARN(debug_evictions, "Evicting all processes"); 2036 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 2037 if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND)) 2038 pr_err("Failed to suspend process 0x%x\n", p->pasid); 2039 signal_eviction_fence(p); 2040 } 2041 srcu_read_unlock(&kfd_processes_srcu, idx); 2042 } 2043 2044 int kfd_resume_all_processes(void) 2045 { 2046 struct kfd_process *p; 2047 unsigned int temp; 2048 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu); 2049 2050 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 2051 if (restore_process_helper(p)) { 2052 pr_err("Restore process %d failed during resume\n", 2053 p->pasid); 2054 ret = -EFAULT; 2055 } 2056 } 2057 srcu_read_unlock(&kfd_processes_srcu, idx); 2058 return ret; 2059 } 2060 2061 int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, 2062 struct vm_area_struct *vma) 2063 { 2064 struct kfd_process_device *pdd; 2065 struct qcm_process_device *qpd; 2066 2067 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) { 2068 pr_err("Incorrect CWSR mapping size.\n"); 2069 return -EINVAL; 2070 } 2071 2072 pdd = kfd_get_process_device_data(dev, process); 2073 if (!pdd) 2074 return -EINVAL; 2075 qpd = &pdd->qpd; 2076 2077 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2078 get_order(KFD_CWSR_TBA_TMA_SIZE)); 2079 if (!qpd->cwsr_kaddr) { 2080 pr_err("Error allocating per process CWSR buffer.\n"); 2081 return -ENOMEM; 2082 } 2083 2084 vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND 2085 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP); 2086 /* Mapping pages to user process */ 2087 return remap_pfn_range(vma, vma->vm_start, 2088 PFN_DOWN(__pa(qpd->cwsr_kaddr)), 2089 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot); 2090 } 2091 2092 /* assumes caller holds process lock. */ 2093 int kfd_process_drain_interrupts(struct kfd_process_device *pdd) 2094 { 2095 uint32_t irq_drain_fence[8]; 2096 uint8_t node_id = 0; 2097 int r = 0; 2098 2099 if (!KFD_IS_SOC15(pdd->dev)) 2100 return 0; 2101 2102 pdd->process->irq_drain_is_open = true; 2103 2104 memset(irq_drain_fence, 0, sizeof(irq_drain_fence)); 2105 irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) | 2106 KFD_IRQ_FENCE_CLIENTID; 2107 irq_drain_fence[3] = pdd->process->pasid; 2108 2109 /* 2110 * For GFX 9.4.3, send the NodeId also in IH cookie DW[3] 2111 */ 2112 if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3)) { 2113 node_id = ffs(pdd->dev->interrupt_bitmap) - 1; 2114 irq_drain_fence[3] |= node_id << 16; 2115 } 2116 2117 /* ensure stale irqs scheduled KFD interrupts and send drain fence. */ 2118 if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev, 2119 irq_drain_fence)) { 2120 pdd->process->irq_drain_is_open = false; 2121 return 0; 2122 } 2123 2124 r = wait_event_interruptible(pdd->process->wait_irq_drain, 2125 !READ_ONCE(pdd->process->irq_drain_is_open)); 2126 if (r) 2127 pdd->process->irq_drain_is_open = false; 2128 2129 return r; 2130 } 2131 2132 void kfd_process_close_interrupt_drain(unsigned int pasid) 2133 { 2134 struct kfd_process *p; 2135 2136 p = kfd_lookup_process_by_pasid(pasid); 2137 2138 if (!p) 2139 return; 2140 2141 WRITE_ONCE(p->irq_drain_is_open, false); 2142 wake_up_all(&p->wait_irq_drain); 2143 kfd_unref_process(p); 2144 } 2145 2146 struct send_exception_work_handler_workarea { 2147 struct work_struct work; 2148 struct kfd_process *p; 2149 unsigned int queue_id; 2150 uint64_t error_reason; 2151 }; 2152 2153 static void send_exception_work_handler(struct work_struct *work) 2154 { 2155 struct send_exception_work_handler_workarea *workarea; 2156 struct kfd_process *p; 2157 struct queue *q; 2158 struct mm_struct *mm; 2159 struct kfd_context_save_area_header __user *csa_header; 2160 uint64_t __user *err_payload_ptr; 2161 uint64_t cur_err; 2162 uint32_t ev_id; 2163 2164 workarea = container_of(work, 2165 struct send_exception_work_handler_workarea, 2166 work); 2167 p = workarea->p; 2168 2169 mm = get_task_mm(p->lead_thread); 2170 2171 if (!mm) 2172 return; 2173 2174 kthread_use_mm(mm); 2175 2176 q = pqm_get_user_queue(&p->pqm, workarea->queue_id); 2177 2178 if (!q) 2179 goto out; 2180 2181 csa_header = (void __user *)q->properties.ctx_save_restore_area_address; 2182 2183 get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr); 2184 get_user(cur_err, err_payload_ptr); 2185 cur_err |= workarea->error_reason; 2186 put_user(cur_err, err_payload_ptr); 2187 get_user(ev_id, &csa_header->err_event_id); 2188 2189 kfd_set_event(p, ev_id); 2190 2191 out: 2192 kthread_unuse_mm(mm); 2193 mmput(mm); 2194 } 2195 2196 int kfd_send_exception_to_runtime(struct kfd_process *p, 2197 unsigned int queue_id, 2198 uint64_t error_reason) 2199 { 2200 struct send_exception_work_handler_workarea worker; 2201 2202 INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler); 2203 2204 worker.p = p; 2205 worker.queue_id = queue_id; 2206 worker.error_reason = error_reason; 2207 2208 schedule_work(&worker.work); 2209 flush_work(&worker.work); 2210 destroy_work_on_stack(&worker.work); 2211 2212 return 0; 2213 } 2214 2215 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id) 2216 { 2217 int i; 2218 2219 if (gpu_id) { 2220 for (i = 0; i < p->n_pdds; i++) { 2221 struct kfd_process_device *pdd = p->pdds[i]; 2222 2223 if (pdd->user_gpu_id == gpu_id) 2224 return pdd; 2225 } 2226 } 2227 return NULL; 2228 } 2229 2230 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id) 2231 { 2232 int i; 2233 2234 if (!actual_gpu_id) 2235 return 0; 2236 2237 for (i = 0; i < p->n_pdds; i++) { 2238 struct kfd_process_device *pdd = p->pdds[i]; 2239 2240 if (pdd->dev->id == actual_gpu_id) 2241 return pdd->user_gpu_id; 2242 } 2243 return -EINVAL; 2244 } 2245 2246 #if defined(CONFIG_DEBUG_FS) 2247 2248 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data) 2249 { 2250 struct kfd_process *p; 2251 unsigned int temp; 2252 int r = 0; 2253 2254 int idx = srcu_read_lock(&kfd_processes_srcu); 2255 2256 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 2257 seq_printf(m, "Process %d PASID 0x%x:\n", 2258 p->lead_thread->tgid, p->pasid); 2259 2260 mutex_lock(&p->mutex); 2261 r = pqm_debugfs_mqds(m, &p->pqm); 2262 mutex_unlock(&p->mutex); 2263 2264 if (r) 2265 break; 2266 } 2267 2268 srcu_read_unlock(&kfd_processes_srcu, idx); 2269 2270 return r; 2271 } 2272 2273 #endif 2274