1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/mutex.h> 25 #include <linux/log2.h> 26 #include <linux/sched.h> 27 #include <linux/sched/mm.h> 28 #include <linux/sched/task.h> 29 #include <linux/mmu_context.h> 30 #include <linux/slab.h> 31 #include <linux/notifier.h> 32 #include <linux/compat.h> 33 #include <linux/mman.h> 34 #include <linux/file.h> 35 #include <linux/pm_runtime.h> 36 #include "amdgpu_amdkfd.h" 37 #include "amdgpu.h" 38 39 struct mm_struct; 40 41 #include "kfd_priv.h" 42 #include "kfd_device_queue_manager.h" 43 #include "kfd_svm.h" 44 #include "kfd_smi_events.h" 45 #include "kfd_debug.h" 46 47 /* 48 * List of struct kfd_process (field kfd_process). 49 * Unique/indexed by mm_struct* 50 */ 51 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 52 DEFINE_MUTEX(kfd_processes_mutex); 53 54 DEFINE_SRCU(kfd_processes_srcu); 55 56 /* For process termination handling */ 57 static struct workqueue_struct *kfd_process_wq; 58 59 /* Ordered, single-threaded workqueue for restoring evicted 60 * processes. Restoring multiple processes concurrently under memory 61 * pressure can lead to processes blocking each other from validating 62 * their BOs and result in a live-lock situation where processes 63 * remain evicted indefinitely. 64 */ 65 static struct workqueue_struct *kfd_restore_wq; 66 67 static struct kfd_process *find_process(const struct task_struct *thread, 68 bool ref); 69 static void kfd_process_ref_release(struct kref *ref); 70 static struct kfd_process *create_process(const struct task_struct *thread); 71 72 static void evict_process_worker(struct work_struct *work); 73 static void restore_process_worker(struct work_struct *work); 74 75 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd); 76 77 struct kfd_procfs_tree { 78 struct kobject *kobj; 79 }; 80 81 static struct kfd_procfs_tree procfs; 82 83 /* 84 * Structure for SDMA activity tracking 85 */ 86 struct kfd_sdma_activity_handler_workarea { 87 struct work_struct sdma_activity_work; 88 struct kfd_process_device *pdd; 89 uint64_t sdma_activity_counter; 90 }; 91 92 struct temp_sdma_queue_list { 93 uint64_t __user *rptr; 94 uint64_t sdma_val; 95 unsigned int queue_id; 96 struct list_head list; 97 }; 98 99 static void kfd_sdma_activity_worker(struct work_struct *work) 100 { 101 struct kfd_sdma_activity_handler_workarea *workarea; 102 struct kfd_process_device *pdd; 103 uint64_t val; 104 struct mm_struct *mm; 105 struct queue *q; 106 struct qcm_process_device *qpd; 107 struct device_queue_manager *dqm; 108 int ret = 0; 109 struct temp_sdma_queue_list sdma_q_list; 110 struct temp_sdma_queue_list *sdma_q, *next; 111 112 workarea = container_of(work, struct kfd_sdma_activity_handler_workarea, 113 sdma_activity_work); 114 115 pdd = workarea->pdd; 116 if (!pdd) 117 return; 118 dqm = pdd->dev->dqm; 119 qpd = &pdd->qpd; 120 if (!dqm || !qpd) 121 return; 122 /* 123 * Total SDMA activity is current SDMA activity + past SDMA activity 124 * Past SDMA count is stored in pdd. 125 * To get the current activity counters for all active SDMA queues, 126 * we loop over all SDMA queues and get their counts from user-space. 127 * 128 * We cannot call get_user() with dqm_lock held as it can cause 129 * a circular lock dependency situation. To read the SDMA stats, 130 * we need to do the following: 131 * 132 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list, 133 * with dqm_lock/dqm_unlock(). 134 * 2. Call get_user() for each node in temporary list without dqm_lock. 135 * Save the SDMA count for each node and also add the count to the total 136 * SDMA count counter. 137 * Its possible, during this step, a few SDMA queue nodes got deleted 138 * from the qpd->queues_list. 139 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted. 140 * If any node got deleted, its SDMA count would be captured in the sdma 141 * past activity counter. So subtract the SDMA counter stored in step 2 142 * for this node from the total SDMA count. 143 */ 144 INIT_LIST_HEAD(&sdma_q_list.list); 145 146 /* 147 * Create the temp list of all SDMA queues 148 */ 149 dqm_lock(dqm); 150 151 list_for_each_entry(q, &qpd->queues_list, list) { 152 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) && 153 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI)) 154 continue; 155 156 sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL); 157 if (!sdma_q) { 158 dqm_unlock(dqm); 159 goto cleanup; 160 } 161 162 INIT_LIST_HEAD(&sdma_q->list); 163 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr; 164 sdma_q->queue_id = q->properties.queue_id; 165 list_add_tail(&sdma_q->list, &sdma_q_list.list); 166 } 167 168 /* 169 * If the temp list is empty, then no SDMA queues nodes were found in 170 * qpd->queues_list. Return the past activity count as the total sdma 171 * count 172 */ 173 if (list_empty(&sdma_q_list.list)) { 174 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter; 175 dqm_unlock(dqm); 176 return; 177 } 178 179 dqm_unlock(dqm); 180 181 /* 182 * Get the usage count for each SDMA queue in temp_list. 183 */ 184 mm = get_task_mm(pdd->process->lead_thread); 185 if (!mm) 186 goto cleanup; 187 188 kthread_use_mm(mm); 189 190 list_for_each_entry(sdma_q, &sdma_q_list.list, list) { 191 val = 0; 192 ret = read_sdma_queue_counter(sdma_q->rptr, &val); 193 if (ret) { 194 pr_debug("Failed to read SDMA queue active counter for queue id: %d", 195 sdma_q->queue_id); 196 } else { 197 sdma_q->sdma_val = val; 198 workarea->sdma_activity_counter += val; 199 } 200 } 201 202 kthread_unuse_mm(mm); 203 mmput(mm); 204 205 /* 206 * Do a second iteration over qpd_queues_list to check if any SDMA 207 * nodes got deleted while fetching SDMA counter. 208 */ 209 dqm_lock(dqm); 210 211 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter; 212 213 list_for_each_entry(q, &qpd->queues_list, list) { 214 if (list_empty(&sdma_q_list.list)) 215 break; 216 217 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) && 218 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI)) 219 continue; 220 221 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) { 222 if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) && 223 (sdma_q->queue_id == q->properties.queue_id)) { 224 list_del(&sdma_q->list); 225 kfree(sdma_q); 226 break; 227 } 228 } 229 } 230 231 dqm_unlock(dqm); 232 233 /* 234 * If temp list is not empty, it implies some queues got deleted 235 * from qpd->queues_list during SDMA usage read. Subtract the SDMA 236 * count for each node from the total SDMA count. 237 */ 238 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) { 239 workarea->sdma_activity_counter -= sdma_q->sdma_val; 240 list_del(&sdma_q->list); 241 kfree(sdma_q); 242 } 243 244 return; 245 246 cleanup: 247 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) { 248 list_del(&sdma_q->list); 249 kfree(sdma_q); 250 } 251 } 252 253 /** 254 * kfd_get_cu_occupancy - Collect number of waves in-flight on this device 255 * by current process. Translates acquired wave count into number of compute units 256 * that are occupied. 257 * 258 * @attr: Handle of attribute that allows reporting of wave count. The attribute 259 * handle encapsulates GPU device it is associated with, thereby allowing collection 260 * of waves in flight, etc 261 * @buffer: Handle of user provided buffer updated with wave count 262 * 263 * Return: Number of bytes written to user buffer or an error value 264 */ 265 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) 266 { 267 int cu_cnt; 268 int wave_cnt; 269 int max_waves_per_cu; 270 struct kfd_node *dev = NULL; 271 struct kfd_process *proc = NULL; 272 struct kfd_process_device *pdd = NULL; 273 int i; 274 struct kfd_cu_occupancy cu_occupancy[AMDGPU_MAX_QUEUES]; 275 u32 queue_format; 276 277 memset(cu_occupancy, 0x0, sizeof(cu_occupancy)); 278 279 pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy); 280 dev = pdd->dev; 281 if (dev->kfd2kgd->get_cu_occupancy == NULL) 282 return -EINVAL; 283 284 cu_cnt = 0; 285 proc = pdd->process; 286 if (pdd->qpd.queue_count == 0) { 287 pr_debug("Gpu-Id: %d has no active queues for process %d\n", 288 dev->id, proc->pasid); 289 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt); 290 } 291 292 /* Collect wave count from device if it supports */ 293 wave_cnt = 0; 294 max_waves_per_cu = 0; 295 296 /* 297 * For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition. 298 * For AQL queues, because of cooperative dispatch we multiply the wave count 299 * by number of XCCs in the partition to get the total wave counts across all 300 * XCCs in the partition. 301 * For PM4 queues, there is no cooperative dispatch so wave_cnt stay as it is. 302 */ 303 dev->kfd2kgd->get_cu_occupancy(dev->adev, cu_occupancy, 304 &max_waves_per_cu, ffs(dev->xcc_mask) - 1); 305 306 for (i = 0; i < AMDGPU_MAX_QUEUES; i++) { 307 if (cu_occupancy[i].wave_cnt != 0 && 308 kfd_dqm_is_queue_in_process(dev->dqm, &pdd->qpd, 309 cu_occupancy[i].doorbell_off, 310 &queue_format)) { 311 if (unlikely(queue_format == KFD_QUEUE_FORMAT_PM4)) 312 wave_cnt += cu_occupancy[i].wave_cnt; 313 else 314 wave_cnt += (NUM_XCC(dev->xcc_mask) * 315 cu_occupancy[i].wave_cnt); 316 } 317 } 318 319 /* Translate wave count to number of compute units */ 320 cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu; 321 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt); 322 } 323 324 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, 325 char *buffer) 326 { 327 if (strcmp(attr->name, "pasid") == 0) { 328 struct kfd_process *p = container_of(attr, struct kfd_process, 329 attr_pasid); 330 331 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid); 332 } else if (strncmp(attr->name, "vram_", 5) == 0) { 333 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, 334 attr_vram); 335 return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage)); 336 } else if (strncmp(attr->name, "sdma_", 5) == 0) { 337 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, 338 attr_sdma); 339 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler; 340 341 INIT_WORK(&sdma_activity_work_handler.sdma_activity_work, 342 kfd_sdma_activity_worker); 343 344 sdma_activity_work_handler.pdd = pdd; 345 sdma_activity_work_handler.sdma_activity_counter = 0; 346 347 schedule_work(&sdma_activity_work_handler.sdma_activity_work); 348 349 flush_work(&sdma_activity_work_handler.sdma_activity_work); 350 351 return snprintf(buffer, PAGE_SIZE, "%llu\n", 352 (sdma_activity_work_handler.sdma_activity_counter)/ 353 SDMA_ACTIVITY_DIVISOR); 354 } else { 355 pr_err("Invalid attribute"); 356 return -EINVAL; 357 } 358 359 return 0; 360 } 361 362 static void kfd_procfs_kobj_release(struct kobject *kobj) 363 { 364 kfree(kobj); 365 } 366 367 static const struct sysfs_ops kfd_procfs_ops = { 368 .show = kfd_procfs_show, 369 }; 370 371 static const struct kobj_type procfs_type = { 372 .release = kfd_procfs_kobj_release, 373 .sysfs_ops = &kfd_procfs_ops, 374 }; 375 376 void kfd_procfs_init(void) 377 { 378 int ret = 0; 379 380 procfs.kobj = kfd_alloc_struct(procfs.kobj); 381 if (!procfs.kobj) 382 return; 383 384 ret = kobject_init_and_add(procfs.kobj, &procfs_type, 385 &kfd_device->kobj, "proc"); 386 if (ret) { 387 pr_warn("Could not create procfs proc folder"); 388 /* If we fail to create the procfs, clean up */ 389 kfd_procfs_shutdown(); 390 } 391 } 392 393 void kfd_procfs_shutdown(void) 394 { 395 if (procfs.kobj) { 396 kobject_del(procfs.kobj); 397 kobject_put(procfs.kobj); 398 procfs.kobj = NULL; 399 } 400 } 401 402 static ssize_t kfd_procfs_queue_show(struct kobject *kobj, 403 struct attribute *attr, char *buffer) 404 { 405 struct queue *q = container_of(kobj, struct queue, kobj); 406 407 if (!strcmp(attr->name, "size")) 408 return snprintf(buffer, PAGE_SIZE, "%llu", 409 q->properties.queue_size); 410 else if (!strcmp(attr->name, "type")) 411 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type); 412 else if (!strcmp(attr->name, "gpuid")) 413 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id); 414 else 415 pr_err("Invalid attribute"); 416 417 return 0; 418 } 419 420 static ssize_t kfd_procfs_stats_show(struct kobject *kobj, 421 struct attribute *attr, char *buffer) 422 { 423 if (strcmp(attr->name, "evicted_ms") == 0) { 424 struct kfd_process_device *pdd = container_of(attr, 425 struct kfd_process_device, 426 attr_evict); 427 uint64_t evict_jiffies; 428 429 evict_jiffies = atomic64_read(&pdd->evict_duration_counter); 430 431 return snprintf(buffer, 432 PAGE_SIZE, 433 "%llu\n", 434 jiffies64_to_msecs(evict_jiffies)); 435 436 /* Sysfs handle that gets CU occupancy is per device */ 437 } else if (strcmp(attr->name, "cu_occupancy") == 0) { 438 return kfd_get_cu_occupancy(attr, buffer); 439 } else { 440 pr_err("Invalid attribute"); 441 } 442 443 return 0; 444 } 445 446 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj, 447 struct attribute *attr, char *buf) 448 { 449 struct kfd_process_device *pdd; 450 451 if (!strcmp(attr->name, "faults")) { 452 pdd = container_of(attr, struct kfd_process_device, 453 attr_faults); 454 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults)); 455 } 456 if (!strcmp(attr->name, "page_in")) { 457 pdd = container_of(attr, struct kfd_process_device, 458 attr_page_in); 459 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in)); 460 } 461 if (!strcmp(attr->name, "page_out")) { 462 pdd = container_of(attr, struct kfd_process_device, 463 attr_page_out); 464 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out)); 465 } 466 return 0; 467 } 468 469 static struct attribute attr_queue_size = { 470 .name = "size", 471 .mode = KFD_SYSFS_FILE_MODE 472 }; 473 474 static struct attribute attr_queue_type = { 475 .name = "type", 476 .mode = KFD_SYSFS_FILE_MODE 477 }; 478 479 static struct attribute attr_queue_gpuid = { 480 .name = "gpuid", 481 .mode = KFD_SYSFS_FILE_MODE 482 }; 483 484 static struct attribute *procfs_queue_attrs[] = { 485 &attr_queue_size, 486 &attr_queue_type, 487 &attr_queue_gpuid, 488 NULL 489 }; 490 ATTRIBUTE_GROUPS(procfs_queue); 491 492 static const struct sysfs_ops procfs_queue_ops = { 493 .show = kfd_procfs_queue_show, 494 }; 495 496 static const struct kobj_type procfs_queue_type = { 497 .sysfs_ops = &procfs_queue_ops, 498 .default_groups = procfs_queue_groups, 499 }; 500 501 static const struct sysfs_ops procfs_stats_ops = { 502 .show = kfd_procfs_stats_show, 503 }; 504 505 static const struct kobj_type procfs_stats_type = { 506 .sysfs_ops = &procfs_stats_ops, 507 .release = kfd_procfs_kobj_release, 508 }; 509 510 static const struct sysfs_ops sysfs_counters_ops = { 511 .show = kfd_sysfs_counters_show, 512 }; 513 514 static const struct kobj_type sysfs_counters_type = { 515 .sysfs_ops = &sysfs_counters_ops, 516 .release = kfd_procfs_kobj_release, 517 }; 518 519 int kfd_procfs_add_queue(struct queue *q) 520 { 521 struct kfd_process *proc; 522 int ret; 523 524 if (!q || !q->process) 525 return -EINVAL; 526 proc = q->process; 527 528 /* Create proc/<pid>/queues/<queue id> folder */ 529 if (!proc->kobj_queues) 530 return -EFAULT; 531 ret = kobject_init_and_add(&q->kobj, &procfs_queue_type, 532 proc->kobj_queues, "%u", q->properties.queue_id); 533 if (ret < 0) { 534 pr_warn("Creating proc/<pid>/queues/%u failed", 535 q->properties.queue_id); 536 kobject_put(&q->kobj); 537 return ret; 538 } 539 540 return 0; 541 } 542 543 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr, 544 char *name) 545 { 546 int ret; 547 548 if (!kobj || !attr || !name) 549 return; 550 551 attr->name = name; 552 attr->mode = KFD_SYSFS_FILE_MODE; 553 sysfs_attr_init(attr); 554 555 ret = sysfs_create_file(kobj, attr); 556 if (ret) 557 pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret); 558 } 559 560 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p) 561 { 562 int ret; 563 int i; 564 char stats_dir_filename[MAX_SYSFS_FILENAME_LEN]; 565 566 if (!p || !p->kobj) 567 return; 568 569 /* 570 * Create sysfs files for each GPU: 571 * - proc/<pid>/stats_<gpuid>/ 572 * - proc/<pid>/stats_<gpuid>/evicted_ms 573 * - proc/<pid>/stats_<gpuid>/cu_occupancy 574 */ 575 for (i = 0; i < p->n_pdds; i++) { 576 struct kfd_process_device *pdd = p->pdds[i]; 577 578 snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN, 579 "stats_%u", pdd->dev->id); 580 pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats); 581 if (!pdd->kobj_stats) 582 return; 583 584 ret = kobject_init_and_add(pdd->kobj_stats, 585 &procfs_stats_type, 586 p->kobj, 587 stats_dir_filename); 588 589 if (ret) { 590 pr_warn("Creating KFD proc/stats_%s folder failed", 591 stats_dir_filename); 592 kobject_put(pdd->kobj_stats); 593 pdd->kobj_stats = NULL; 594 return; 595 } 596 597 kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict, 598 "evicted_ms"); 599 /* Add sysfs file to report compute unit occupancy */ 600 if (pdd->dev->kfd2kgd->get_cu_occupancy) 601 kfd_sysfs_create_file(pdd->kobj_stats, 602 &pdd->attr_cu_occupancy, 603 "cu_occupancy"); 604 } 605 } 606 607 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p) 608 { 609 int ret = 0; 610 int i; 611 char counters_dir_filename[MAX_SYSFS_FILENAME_LEN]; 612 613 if (!p || !p->kobj) 614 return; 615 616 /* 617 * Create sysfs files for each GPU which supports SVM 618 * - proc/<pid>/counters_<gpuid>/ 619 * - proc/<pid>/counters_<gpuid>/faults 620 * - proc/<pid>/counters_<gpuid>/page_in 621 * - proc/<pid>/counters_<gpuid>/page_out 622 */ 623 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) { 624 struct kfd_process_device *pdd = p->pdds[i]; 625 struct kobject *kobj_counters; 626 627 snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN, 628 "counters_%u", pdd->dev->id); 629 kobj_counters = kfd_alloc_struct(kobj_counters); 630 if (!kobj_counters) 631 return; 632 633 ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type, 634 p->kobj, counters_dir_filename); 635 if (ret) { 636 pr_warn("Creating KFD proc/%s folder failed", 637 counters_dir_filename); 638 kobject_put(kobj_counters); 639 return; 640 } 641 642 pdd->kobj_counters = kobj_counters; 643 kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults, 644 "faults"); 645 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in, 646 "page_in"); 647 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out, 648 "page_out"); 649 } 650 } 651 652 static void kfd_procfs_add_sysfs_files(struct kfd_process *p) 653 { 654 int i; 655 656 if (!p || !p->kobj) 657 return; 658 659 /* 660 * Create sysfs files for each GPU: 661 * - proc/<pid>/vram_<gpuid> 662 * - proc/<pid>/sdma_<gpuid> 663 */ 664 for (i = 0; i < p->n_pdds; i++) { 665 struct kfd_process_device *pdd = p->pdds[i]; 666 667 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u", 668 pdd->dev->id); 669 kfd_sysfs_create_file(p->kobj, &pdd->attr_vram, 670 pdd->vram_filename); 671 672 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u", 673 pdd->dev->id); 674 kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma, 675 pdd->sdma_filename); 676 } 677 } 678 679 void kfd_procfs_del_queue(struct queue *q) 680 { 681 if (!q) 682 return; 683 684 kobject_del(&q->kobj); 685 kobject_put(&q->kobj); 686 } 687 688 int kfd_process_create_wq(void) 689 { 690 if (!kfd_process_wq) 691 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0); 692 if (!kfd_restore_wq) 693 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 694 WQ_FREEZABLE); 695 696 if (!kfd_process_wq || !kfd_restore_wq) { 697 kfd_process_destroy_wq(); 698 return -ENOMEM; 699 } 700 701 return 0; 702 } 703 704 void kfd_process_destroy_wq(void) 705 { 706 if (kfd_process_wq) { 707 destroy_workqueue(kfd_process_wq); 708 kfd_process_wq = NULL; 709 } 710 if (kfd_restore_wq) { 711 destroy_workqueue(kfd_restore_wq); 712 kfd_restore_wq = NULL; 713 } 714 } 715 716 static void kfd_process_free_gpuvm(struct kgd_mem *mem, 717 struct kfd_process_device *pdd, void **kptr) 718 { 719 struct kfd_node *dev = pdd->dev; 720 721 if (kptr && *kptr) { 722 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem); 723 *kptr = NULL; 724 } 725 726 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv); 727 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv, 728 NULL); 729 } 730 731 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process 732 * This function should be only called right after the process 733 * is created and when kfd_processes_mutex is still being held 734 * to avoid concurrency. Because of that exclusiveness, we do 735 * not need to take p->mutex. 736 */ 737 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, 738 uint64_t gpu_va, uint32_t size, 739 uint32_t flags, struct kgd_mem **mem, void **kptr) 740 { 741 struct kfd_node *kdev = pdd->dev; 742 int err; 743 744 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size, 745 pdd->drm_priv, mem, NULL, 746 flags, false); 747 if (err) 748 goto err_alloc_mem; 749 750 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem, 751 pdd->drm_priv); 752 if (err) 753 goto err_map_mem; 754 755 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true); 756 if (err) { 757 pr_debug("Sync memory failed, wait interrupted by user signal\n"); 758 goto sync_memory_failed; 759 } 760 761 if (kptr) { 762 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel( 763 (struct kgd_mem *)*mem, kptr, NULL); 764 if (err) { 765 pr_debug("Map GTT BO to kernel failed\n"); 766 goto sync_memory_failed; 767 } 768 } 769 770 return err; 771 772 sync_memory_failed: 773 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv); 774 775 err_map_mem: 776 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv, 777 NULL); 778 err_alloc_mem: 779 *mem = NULL; 780 *kptr = NULL; 781 return err; 782 } 783 784 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the 785 * process for IB usage The memory reserved is for KFD to submit 786 * IB to AMDGPU from kernel. If the memory is reserved 787 * successfully, ib_kaddr will have the CPU/kernel 788 * address. Check ib_kaddr before accessing the memory. 789 */ 790 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) 791 { 792 struct qcm_process_device *qpd = &pdd->qpd; 793 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT | 794 KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE | 795 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE | 796 KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 797 struct kgd_mem *mem; 798 void *kaddr; 799 int ret; 800 801 if (qpd->ib_kaddr || !qpd->ib_base) 802 return 0; 803 804 /* ib_base is only set for dGPU */ 805 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags, 806 &mem, &kaddr); 807 if (ret) 808 return ret; 809 810 qpd->ib_mem = mem; 811 qpd->ib_kaddr = kaddr; 812 813 return 0; 814 } 815 816 static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd) 817 { 818 struct qcm_process_device *qpd = &pdd->qpd; 819 820 if (!qpd->ib_kaddr || !qpd->ib_base) 821 return; 822 823 kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr); 824 } 825 826 struct kfd_process *kfd_create_process(struct task_struct *thread) 827 { 828 struct kfd_process *process; 829 int ret; 830 831 if (!(thread->mm && mmget_not_zero(thread->mm))) 832 return ERR_PTR(-EINVAL); 833 834 /* Only the pthreads threading model is supported. */ 835 if (thread->group_leader->mm != thread->mm) { 836 mmput(thread->mm); 837 return ERR_PTR(-EINVAL); 838 } 839 840 /* 841 * take kfd processes mutex before starting of process creation 842 * so there won't be a case where two threads of the same process 843 * create two kfd_process structures 844 */ 845 mutex_lock(&kfd_processes_mutex); 846 847 if (kfd_is_locked()) { 848 pr_debug("KFD is locked! Cannot create process"); 849 process = ERR_PTR(-EINVAL); 850 goto out; 851 } 852 853 /* A prior open of /dev/kfd could have already created the process. */ 854 process = find_process(thread, false); 855 if (process) { 856 pr_debug("Process already found\n"); 857 } else { 858 /* If the process just called exec(3), it is possible that the 859 * cleanup of the kfd_process (following the release of the mm 860 * of the old process image) is still in the cleanup work queue. 861 * Make sure to drain any job before trying to recreate any 862 * resource for this process. 863 */ 864 flush_workqueue(kfd_process_wq); 865 866 process = create_process(thread); 867 if (IS_ERR(process)) 868 goto out; 869 870 if (!procfs.kobj) 871 goto out; 872 873 process->kobj = kfd_alloc_struct(process->kobj); 874 if (!process->kobj) { 875 pr_warn("Creating procfs kobject failed"); 876 goto out; 877 } 878 ret = kobject_init_and_add(process->kobj, &procfs_type, 879 procfs.kobj, "%d", 880 (int)process->lead_thread->pid); 881 if (ret) { 882 pr_warn("Creating procfs pid directory failed"); 883 kobject_put(process->kobj); 884 goto out; 885 } 886 887 kfd_sysfs_create_file(process->kobj, &process->attr_pasid, 888 "pasid"); 889 890 process->kobj_queues = kobject_create_and_add("queues", 891 process->kobj); 892 if (!process->kobj_queues) 893 pr_warn("Creating KFD proc/queues folder failed"); 894 895 kfd_procfs_add_sysfs_stats(process); 896 kfd_procfs_add_sysfs_files(process); 897 kfd_procfs_add_sysfs_counters(process); 898 899 init_waitqueue_head(&process->wait_irq_drain); 900 } 901 out: 902 if (!IS_ERR(process)) 903 kref_get(&process->ref); 904 mutex_unlock(&kfd_processes_mutex); 905 mmput(thread->mm); 906 907 return process; 908 } 909 910 struct kfd_process *kfd_get_process(const struct task_struct *thread) 911 { 912 struct kfd_process *process; 913 914 if (!thread->mm) 915 return ERR_PTR(-EINVAL); 916 917 /* Only the pthreads threading model is supported. */ 918 if (thread->group_leader->mm != thread->mm) 919 return ERR_PTR(-EINVAL); 920 921 process = find_process(thread, false); 922 if (!process) 923 return ERR_PTR(-EINVAL); 924 925 return process; 926 } 927 928 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm) 929 { 930 struct kfd_process *process; 931 932 hash_for_each_possible_rcu(kfd_processes_table, process, 933 kfd_processes, (uintptr_t)mm) 934 if (process->mm == mm) 935 return process; 936 937 return NULL; 938 } 939 940 static struct kfd_process *find_process(const struct task_struct *thread, 941 bool ref) 942 { 943 struct kfd_process *p; 944 int idx; 945 946 idx = srcu_read_lock(&kfd_processes_srcu); 947 p = find_process_by_mm(thread->mm); 948 if (p && ref) 949 kref_get(&p->ref); 950 srcu_read_unlock(&kfd_processes_srcu, idx); 951 952 return p; 953 } 954 955 void kfd_unref_process(struct kfd_process *p) 956 { 957 kref_put(&p->ref, kfd_process_ref_release); 958 } 959 960 /* This increments the process->ref counter. */ 961 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid) 962 { 963 struct task_struct *task = NULL; 964 struct kfd_process *p = NULL; 965 966 if (!pid) { 967 task = current; 968 get_task_struct(task); 969 } else { 970 task = get_pid_task(pid, PIDTYPE_PID); 971 } 972 973 if (task) { 974 p = find_process(task, true); 975 put_task_struct(task); 976 } 977 978 return p; 979 } 980 981 static void kfd_process_device_free_bos(struct kfd_process_device *pdd) 982 { 983 struct kfd_process *p = pdd->process; 984 void *mem; 985 int id; 986 int i; 987 988 /* 989 * Remove all handles from idr and release appropriate 990 * local memory object 991 */ 992 idr_for_each_entry(&pdd->alloc_idr, mem, id) { 993 994 for (i = 0; i < p->n_pdds; i++) { 995 struct kfd_process_device *peer_pdd = p->pdds[i]; 996 997 if (!peer_pdd->drm_priv) 998 continue; 999 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 1000 peer_pdd->dev->adev, mem, peer_pdd->drm_priv); 1001 } 1002 1003 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem, 1004 pdd->drm_priv, NULL); 1005 kfd_process_device_remove_obj_handle(pdd, id); 1006 } 1007 } 1008 1009 /* 1010 * Just kunmap and unpin signal BO here. It will be freed in 1011 * kfd_process_free_outstanding_kfd_bos() 1012 */ 1013 static void kfd_process_kunmap_signal_bo(struct kfd_process *p) 1014 { 1015 struct kfd_process_device *pdd; 1016 struct kfd_node *kdev; 1017 void *mem; 1018 1019 kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle)); 1020 if (!kdev) 1021 return; 1022 1023 mutex_lock(&p->mutex); 1024 1025 pdd = kfd_get_process_device_data(kdev, p); 1026 if (!pdd) 1027 goto out; 1028 1029 mem = kfd_process_device_translate_handle( 1030 pdd, GET_IDR_HANDLE(p->signal_handle)); 1031 if (!mem) 1032 goto out; 1033 1034 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem); 1035 1036 out: 1037 mutex_unlock(&p->mutex); 1038 } 1039 1040 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p) 1041 { 1042 int i; 1043 1044 for (i = 0; i < p->n_pdds; i++) 1045 kfd_process_device_free_bos(p->pdds[i]); 1046 } 1047 1048 static void kfd_process_destroy_pdds(struct kfd_process *p) 1049 { 1050 int i; 1051 1052 for (i = 0; i < p->n_pdds; i++) { 1053 struct kfd_process_device *pdd = p->pdds[i]; 1054 1055 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n", 1056 pdd->dev->id, p->pasid); 1057 1058 kfd_process_device_destroy_cwsr_dgpu(pdd); 1059 kfd_process_device_destroy_ib_mem(pdd); 1060 1061 if (pdd->drm_file) { 1062 amdgpu_amdkfd_gpuvm_release_process_vm( 1063 pdd->dev->adev, pdd->drm_priv); 1064 fput(pdd->drm_file); 1065 } 1066 1067 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base) 1068 free_pages((unsigned long)pdd->qpd.cwsr_kaddr, 1069 get_order(KFD_CWSR_TBA_TMA_SIZE)); 1070 1071 idr_destroy(&pdd->alloc_idr); 1072 1073 kfd_free_process_doorbells(pdd->dev->kfd, pdd); 1074 1075 if (pdd->dev->kfd->shared_resources.enable_mes) 1076 amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev, 1077 &pdd->proc_ctx_bo); 1078 /* 1079 * before destroying pdd, make sure to report availability 1080 * for auto suspend 1081 */ 1082 if (pdd->runtime_inuse) { 1083 pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev); 1084 pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev); 1085 pdd->runtime_inuse = false; 1086 } 1087 1088 kfree(pdd); 1089 p->pdds[i] = NULL; 1090 } 1091 p->n_pdds = 0; 1092 } 1093 1094 static void kfd_process_remove_sysfs(struct kfd_process *p) 1095 { 1096 struct kfd_process_device *pdd; 1097 int i; 1098 1099 if (!p->kobj) 1100 return; 1101 1102 sysfs_remove_file(p->kobj, &p->attr_pasid); 1103 kobject_del(p->kobj_queues); 1104 kobject_put(p->kobj_queues); 1105 p->kobj_queues = NULL; 1106 1107 for (i = 0; i < p->n_pdds; i++) { 1108 pdd = p->pdds[i]; 1109 1110 sysfs_remove_file(p->kobj, &pdd->attr_vram); 1111 sysfs_remove_file(p->kobj, &pdd->attr_sdma); 1112 1113 sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict); 1114 if (pdd->dev->kfd2kgd->get_cu_occupancy) 1115 sysfs_remove_file(pdd->kobj_stats, 1116 &pdd->attr_cu_occupancy); 1117 kobject_del(pdd->kobj_stats); 1118 kobject_put(pdd->kobj_stats); 1119 pdd->kobj_stats = NULL; 1120 } 1121 1122 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) { 1123 pdd = p->pdds[i]; 1124 1125 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults); 1126 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in); 1127 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out); 1128 kobject_del(pdd->kobj_counters); 1129 kobject_put(pdd->kobj_counters); 1130 pdd->kobj_counters = NULL; 1131 } 1132 1133 kobject_del(p->kobj); 1134 kobject_put(p->kobj); 1135 p->kobj = NULL; 1136 } 1137 1138 /* No process locking is needed in this function, because the process 1139 * is not findable any more. We must assume that no other thread is 1140 * using it any more, otherwise we couldn't safely free the process 1141 * structure in the end. 1142 */ 1143 static void kfd_process_wq_release(struct work_struct *work) 1144 { 1145 struct kfd_process *p = container_of(work, struct kfd_process, 1146 release_work); 1147 struct dma_fence *ef; 1148 1149 kfd_process_dequeue_from_all_devices(p); 1150 pqm_uninit(&p->pqm); 1151 1152 /* Signal the eviction fence after user mode queues are 1153 * destroyed. This allows any BOs to be freed without 1154 * triggering pointless evictions or waiting for fences. 1155 */ 1156 synchronize_rcu(); 1157 ef = rcu_access_pointer(p->ef); 1158 dma_fence_signal(ef); 1159 1160 kfd_process_remove_sysfs(p); 1161 1162 kfd_process_kunmap_signal_bo(p); 1163 kfd_process_free_outstanding_kfd_bos(p); 1164 svm_range_list_fini(p); 1165 1166 kfd_process_destroy_pdds(p); 1167 dma_fence_put(ef); 1168 1169 kfd_event_free_process(p); 1170 1171 kfd_pasid_free(p->pasid); 1172 mutex_destroy(&p->mutex); 1173 1174 put_task_struct(p->lead_thread); 1175 1176 kfree(p); 1177 } 1178 1179 static void kfd_process_ref_release(struct kref *ref) 1180 { 1181 struct kfd_process *p = container_of(ref, struct kfd_process, ref); 1182 1183 INIT_WORK(&p->release_work, kfd_process_wq_release); 1184 queue_work(kfd_process_wq, &p->release_work); 1185 } 1186 1187 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm) 1188 { 1189 int idx = srcu_read_lock(&kfd_processes_srcu); 1190 struct kfd_process *p = find_process_by_mm(mm); 1191 1192 srcu_read_unlock(&kfd_processes_srcu, idx); 1193 1194 return p ? &p->mmu_notifier : ERR_PTR(-ESRCH); 1195 } 1196 1197 static void kfd_process_free_notifier(struct mmu_notifier *mn) 1198 { 1199 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier)); 1200 } 1201 1202 static void kfd_process_notifier_release_internal(struct kfd_process *p) 1203 { 1204 int i; 1205 1206 cancel_delayed_work_sync(&p->eviction_work); 1207 cancel_delayed_work_sync(&p->restore_work); 1208 1209 for (i = 0; i < p->n_pdds; i++) { 1210 struct kfd_process_device *pdd = p->pdds[i]; 1211 1212 /* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */ 1213 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup) 1214 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 1215 } 1216 1217 /* Indicate to other users that MM is no longer valid */ 1218 p->mm = NULL; 1219 kfd_dbg_trap_disable(p); 1220 1221 if (atomic_read(&p->debugged_process_count) > 0) { 1222 struct kfd_process *target; 1223 unsigned int temp; 1224 int idx = srcu_read_lock(&kfd_processes_srcu); 1225 1226 hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) { 1227 if (target->debugger_process && target->debugger_process == p) { 1228 mutex_lock_nested(&target->mutex, 1); 1229 kfd_dbg_trap_disable(target); 1230 mutex_unlock(&target->mutex); 1231 if (atomic_read(&p->debugged_process_count) == 0) 1232 break; 1233 } 1234 } 1235 1236 srcu_read_unlock(&kfd_processes_srcu, idx); 1237 } 1238 1239 mmu_notifier_put(&p->mmu_notifier); 1240 } 1241 1242 static void kfd_process_notifier_release(struct mmu_notifier *mn, 1243 struct mm_struct *mm) 1244 { 1245 struct kfd_process *p; 1246 1247 /* 1248 * The kfd_process structure can not be free because the 1249 * mmu_notifier srcu is read locked 1250 */ 1251 p = container_of(mn, struct kfd_process, mmu_notifier); 1252 if (WARN_ON(p->mm != mm)) 1253 return; 1254 1255 mutex_lock(&kfd_processes_mutex); 1256 /* 1257 * Do early return if table is empty. 1258 * 1259 * This could potentially happen if this function is called concurrently 1260 * by mmu_notifier and by kfd_cleanup_pocesses. 1261 * 1262 */ 1263 if (hash_empty(kfd_processes_table)) { 1264 mutex_unlock(&kfd_processes_mutex); 1265 return; 1266 } 1267 hash_del_rcu(&p->kfd_processes); 1268 mutex_unlock(&kfd_processes_mutex); 1269 synchronize_srcu(&kfd_processes_srcu); 1270 1271 kfd_process_notifier_release_internal(p); 1272 } 1273 1274 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = { 1275 .release = kfd_process_notifier_release, 1276 .alloc_notifier = kfd_process_alloc_notifier, 1277 .free_notifier = kfd_process_free_notifier, 1278 }; 1279 1280 /* 1281 * This code handles the case when driver is being unloaded before all 1282 * mm_struct are released. We need to safely free the kfd_process and 1283 * avoid race conditions with mmu_notifier that might try to free them. 1284 * 1285 */ 1286 void kfd_cleanup_processes(void) 1287 { 1288 struct kfd_process *p; 1289 struct hlist_node *p_temp; 1290 unsigned int temp; 1291 HLIST_HEAD(cleanup_list); 1292 1293 /* 1294 * Move all remaining kfd_process from the process table to a 1295 * temp list for processing. Once done, callback from mmu_notifier 1296 * release will not see the kfd_process in the table and do early return, 1297 * avoiding double free issues. 1298 */ 1299 mutex_lock(&kfd_processes_mutex); 1300 hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) { 1301 hash_del_rcu(&p->kfd_processes); 1302 synchronize_srcu(&kfd_processes_srcu); 1303 hlist_add_head(&p->kfd_processes, &cleanup_list); 1304 } 1305 mutex_unlock(&kfd_processes_mutex); 1306 1307 hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes) 1308 kfd_process_notifier_release_internal(p); 1309 1310 /* 1311 * Ensures that all outstanding free_notifier get called, triggering 1312 * the release of the kfd_process struct. 1313 */ 1314 mmu_notifier_synchronize(); 1315 } 1316 1317 int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) 1318 { 1319 unsigned long offset; 1320 int i; 1321 1322 if (p->has_cwsr) 1323 return 0; 1324 1325 for (i = 0; i < p->n_pdds; i++) { 1326 struct kfd_node *dev = p->pdds[i]->dev; 1327 struct qcm_process_device *qpd = &p->pdds[i]->qpd; 1328 1329 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) 1330 continue; 1331 1332 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id); 1333 qpd->tba_addr = (int64_t)vm_mmap(filep, 0, 1334 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC, 1335 MAP_SHARED, offset); 1336 1337 if (IS_ERR_VALUE(qpd->tba_addr)) { 1338 int err = qpd->tba_addr; 1339 1340 dev_err(dev->adev->dev, 1341 "Failure to set tba address. error %d.\n", err); 1342 qpd->tba_addr = 0; 1343 qpd->cwsr_kaddr = NULL; 1344 return err; 1345 } 1346 1347 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); 1348 1349 kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled); 1350 1351 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; 1352 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", 1353 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); 1354 } 1355 1356 p->has_cwsr = true; 1357 1358 return 0; 1359 } 1360 1361 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) 1362 { 1363 struct kfd_node *dev = pdd->dev; 1364 struct qcm_process_device *qpd = &pdd->qpd; 1365 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT 1366 | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE 1367 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 1368 struct kgd_mem *mem; 1369 void *kaddr; 1370 int ret; 1371 1372 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) 1373 return 0; 1374 1375 /* cwsr_base is only set for dGPU */ 1376 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base, 1377 KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr); 1378 if (ret) 1379 return ret; 1380 1381 qpd->cwsr_mem = mem; 1382 qpd->cwsr_kaddr = kaddr; 1383 qpd->tba_addr = qpd->cwsr_base; 1384 1385 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); 1386 1387 kfd_process_set_trap_debug_flag(&pdd->qpd, 1388 pdd->process->debug_trap_enabled); 1389 1390 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; 1391 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", 1392 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); 1393 1394 return 0; 1395 } 1396 1397 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd) 1398 { 1399 struct kfd_node *dev = pdd->dev; 1400 struct qcm_process_device *qpd = &pdd->qpd; 1401 1402 if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base) 1403 return; 1404 1405 kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr); 1406 } 1407 1408 void kfd_process_set_trap_handler(struct qcm_process_device *qpd, 1409 uint64_t tba_addr, 1410 uint64_t tma_addr) 1411 { 1412 if (qpd->cwsr_kaddr) { 1413 /* KFD trap handler is bound, record as second-level TBA/TMA 1414 * in first-level TMA. First-level trap will jump to second. 1415 */ 1416 uint64_t *tma = 1417 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET); 1418 tma[0] = tba_addr; 1419 tma[1] = tma_addr; 1420 } else { 1421 /* No trap handler bound, bind as first-level TBA/TMA. */ 1422 qpd->tba_addr = tba_addr; 1423 qpd->tma_addr = tma_addr; 1424 } 1425 } 1426 1427 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) 1428 { 1429 int i; 1430 1431 /* On most GFXv9 GPUs, the retry mode in the SQ must match the 1432 * boot time retry setting. Mixing processes with different 1433 * XNACK/retry settings can hang the GPU. 1434 * 1435 * Different GPUs can have different noretry settings depending 1436 * on HW bugs or limitations. We need to find at least one 1437 * XNACK mode for this process that's compatible with all GPUs. 1438 * Fortunately GPUs with retry enabled (noretry=0) can run code 1439 * built for XNACK-off. On GFXv9 it may perform slower. 1440 * 1441 * Therefore applications built for XNACK-off can always be 1442 * supported and will be our fallback if any GPU does not 1443 * support retry. 1444 */ 1445 for (i = 0; i < p->n_pdds; i++) { 1446 struct kfd_node *dev = p->pdds[i]->dev; 1447 1448 /* Only consider GFXv9 and higher GPUs. Older GPUs don't 1449 * support the SVM APIs and don't need to be considered 1450 * for the XNACK mode selection. 1451 */ 1452 if (!KFD_IS_SOC15(dev)) 1453 continue; 1454 /* Aldebaran can always support XNACK because it can support 1455 * per-process XNACK mode selection. But let the dev->noretry 1456 * setting still influence the default XNACK mode. 1457 */ 1458 if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) { 1459 if (!amdgpu_sriov_xnack_support(dev->kfd->adev)) { 1460 pr_debug("SRIOV platform xnack not supported\n"); 1461 return false; 1462 } 1463 continue; 1464 } 1465 1466 /* GFXv10 and later GPUs do not support shader preemption 1467 * during page faults. This can lead to poor QoS for queue 1468 * management and memory-manager-related preemptions or 1469 * even deadlocks. 1470 */ 1471 if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) 1472 return false; 1473 1474 if (dev->kfd->noretry) 1475 return false; 1476 } 1477 1478 return true; 1479 } 1480 1481 void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd, 1482 bool enabled) 1483 { 1484 if (qpd->cwsr_kaddr) { 1485 uint64_t *tma = 1486 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET); 1487 tma[2] = enabled; 1488 } 1489 } 1490 1491 /* 1492 * On return the kfd_process is fully operational and will be freed when the 1493 * mm is released 1494 */ 1495 static struct kfd_process *create_process(const struct task_struct *thread) 1496 { 1497 struct kfd_process *process; 1498 struct mmu_notifier *mn; 1499 int err = -ENOMEM; 1500 1501 process = kzalloc(sizeof(*process), GFP_KERNEL); 1502 if (!process) 1503 goto err_alloc_process; 1504 1505 kref_init(&process->ref); 1506 mutex_init(&process->mutex); 1507 process->mm = thread->mm; 1508 process->lead_thread = thread->group_leader; 1509 process->n_pdds = 0; 1510 process->queues_paused = false; 1511 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker); 1512 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker); 1513 process->last_restore_timestamp = get_jiffies_64(); 1514 err = kfd_event_init_process(process); 1515 if (err) 1516 goto err_event_init; 1517 process->is_32bit_user_mode = in_compat_syscall(); 1518 process->debug_trap_enabled = false; 1519 process->debugger_process = NULL; 1520 process->exception_enable_mask = 0; 1521 atomic_set(&process->debugged_process_count, 0); 1522 sema_init(&process->runtime_enable_sema, 0); 1523 1524 process->pasid = kfd_pasid_alloc(); 1525 if (process->pasid == 0) { 1526 err = -ENOSPC; 1527 goto err_alloc_pasid; 1528 } 1529 1530 err = pqm_init(&process->pqm, process); 1531 if (err != 0) 1532 goto err_process_pqm_init; 1533 1534 /* init process apertures*/ 1535 err = kfd_init_apertures(process); 1536 if (err != 0) 1537 goto err_init_apertures; 1538 1539 /* Check XNACK support after PDDs are created in kfd_init_apertures */ 1540 process->xnack_enabled = kfd_process_xnack_mode(process, false); 1541 1542 err = svm_range_list_init(process); 1543 if (err) 1544 goto err_init_svm_range_list; 1545 1546 /* alloc_notifier needs to find the process in the hash table */ 1547 hash_add_rcu(kfd_processes_table, &process->kfd_processes, 1548 (uintptr_t)process->mm); 1549 1550 /* Avoid free_notifier to start kfd_process_wq_release if 1551 * mmu_notifier_get failed because of pending signal. 1552 */ 1553 kref_get(&process->ref); 1554 1555 /* MMU notifier registration must be the last call that can fail 1556 * because after this point we cannot unwind the process creation. 1557 * After this point, mmu_notifier_put will trigger the cleanup by 1558 * dropping the last process reference in the free_notifier. 1559 */ 1560 mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm); 1561 if (IS_ERR(mn)) { 1562 err = PTR_ERR(mn); 1563 goto err_register_notifier; 1564 } 1565 BUG_ON(mn != &process->mmu_notifier); 1566 1567 kfd_unref_process(process); 1568 get_task_struct(process->lead_thread); 1569 1570 INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler); 1571 1572 return process; 1573 1574 err_register_notifier: 1575 hash_del_rcu(&process->kfd_processes); 1576 svm_range_list_fini(process); 1577 err_init_svm_range_list: 1578 kfd_process_free_outstanding_kfd_bos(process); 1579 kfd_process_destroy_pdds(process); 1580 err_init_apertures: 1581 pqm_uninit(&process->pqm); 1582 err_process_pqm_init: 1583 kfd_pasid_free(process->pasid); 1584 err_alloc_pasid: 1585 kfd_event_free_process(process); 1586 err_event_init: 1587 mutex_destroy(&process->mutex); 1588 kfree(process); 1589 err_alloc_process: 1590 return ERR_PTR(err); 1591 } 1592 1593 struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, 1594 struct kfd_process *p) 1595 { 1596 int i; 1597 1598 for (i = 0; i < p->n_pdds; i++) 1599 if (p->pdds[i]->dev == dev) 1600 return p->pdds[i]; 1601 1602 return NULL; 1603 } 1604 1605 struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, 1606 struct kfd_process *p) 1607 { 1608 struct kfd_process_device *pdd = NULL; 1609 int retval = 0; 1610 1611 if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE)) 1612 return NULL; 1613 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL); 1614 if (!pdd) 1615 return NULL; 1616 1617 pdd->dev = dev; 1618 INIT_LIST_HEAD(&pdd->qpd.queues_list); 1619 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); 1620 pdd->qpd.dqm = dev->dqm; 1621 pdd->qpd.pqm = &p->pqm; 1622 pdd->qpd.evicted = 0; 1623 pdd->qpd.mapped_gws_queue = false; 1624 pdd->process = p; 1625 pdd->bound = PDD_UNBOUND; 1626 pdd->already_dequeued = false; 1627 pdd->runtime_inuse = false; 1628 pdd->vram_usage = 0; 1629 pdd->sdma_past_activity_counter = 0; 1630 pdd->user_gpu_id = dev->id; 1631 atomic64_set(&pdd->evict_duration_counter, 0); 1632 1633 if (dev->kfd->shared_resources.enable_mes) { 1634 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, 1635 AMDGPU_MES_PROC_CTX_SIZE, 1636 &pdd->proc_ctx_bo, 1637 &pdd->proc_ctx_gpu_addr, 1638 &pdd->proc_ctx_cpu_ptr, 1639 false); 1640 if (retval) { 1641 dev_err(dev->adev->dev, 1642 "failed to allocate process context bo\n"); 1643 goto err_free_pdd; 1644 } 1645 memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); 1646 } 1647 1648 p->pdds[p->n_pdds++] = pdd; 1649 if (kfd_dbg_is_per_vmid_supported(pdd->dev)) 1650 pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap( 1651 pdd->dev->adev, 1652 false, 1653 0); 1654 1655 /* Init idr used for memory handle translation */ 1656 idr_init(&pdd->alloc_idr); 1657 1658 return pdd; 1659 1660 err_free_pdd: 1661 kfree(pdd); 1662 return NULL; 1663 } 1664 1665 /** 1666 * kfd_process_device_init_vm - Initialize a VM for a process-device 1667 * 1668 * @pdd: The process-device 1669 * @drm_file: Optional pointer to a DRM file descriptor 1670 * 1671 * If @drm_file is specified, it will be used to acquire the VM from 1672 * that file descriptor. If successful, the @pdd takes ownership of 1673 * the file descriptor. 1674 * 1675 * If @drm_file is NULL, a new VM is created. 1676 * 1677 * Returns 0 on success, -errno on failure. 1678 */ 1679 int kfd_process_device_init_vm(struct kfd_process_device *pdd, 1680 struct file *drm_file) 1681 { 1682 struct amdgpu_fpriv *drv_priv; 1683 struct amdgpu_vm *avm; 1684 struct kfd_process *p; 1685 struct dma_fence *ef; 1686 struct kfd_node *dev; 1687 int ret; 1688 1689 if (!drm_file) 1690 return -EINVAL; 1691 1692 if (pdd->drm_priv) 1693 return -EBUSY; 1694 1695 ret = amdgpu_file_to_fpriv(drm_file, &drv_priv); 1696 if (ret) 1697 return ret; 1698 avm = &drv_priv->vm; 1699 1700 p = pdd->process; 1701 dev = pdd->dev; 1702 1703 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm, 1704 &p->kgd_process_info, 1705 &ef); 1706 if (ret) { 1707 dev_err(dev->adev->dev, "Failed to create process VM object\n"); 1708 return ret; 1709 } 1710 RCU_INIT_POINTER(p->ef, ef); 1711 pdd->drm_priv = drm_file->private_data; 1712 1713 ret = kfd_process_device_reserve_ib_mem(pdd); 1714 if (ret) 1715 goto err_reserve_ib_mem; 1716 ret = kfd_process_device_init_cwsr_dgpu(pdd); 1717 if (ret) 1718 goto err_init_cwsr; 1719 1720 ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid); 1721 if (ret) 1722 goto err_set_pasid; 1723 1724 pdd->drm_file = drm_file; 1725 1726 return 0; 1727 1728 err_set_pasid: 1729 kfd_process_device_destroy_cwsr_dgpu(pdd); 1730 err_init_cwsr: 1731 kfd_process_device_destroy_ib_mem(pdd); 1732 err_reserve_ib_mem: 1733 pdd->drm_priv = NULL; 1734 amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm); 1735 1736 return ret; 1737 } 1738 1739 /* 1740 * Direct the IOMMU to bind the process (specifically the pasid->mm) 1741 * to the device. 1742 * Unbinding occurs when the process dies or the device is removed. 1743 * 1744 * Assumes that the process lock is held. 1745 */ 1746 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, 1747 struct kfd_process *p) 1748 { 1749 struct kfd_process_device *pdd; 1750 int err; 1751 1752 pdd = kfd_get_process_device_data(dev, p); 1753 if (!pdd) { 1754 dev_err(dev->adev->dev, "Process device data doesn't exist\n"); 1755 return ERR_PTR(-ENOMEM); 1756 } 1757 1758 if (!pdd->drm_priv) 1759 return ERR_PTR(-ENODEV); 1760 1761 /* 1762 * signal runtime-pm system to auto resume and prevent 1763 * further runtime suspend once device pdd is created until 1764 * pdd is destroyed. 1765 */ 1766 if (!pdd->runtime_inuse) { 1767 err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev); 1768 if (err < 0) { 1769 pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev); 1770 return ERR_PTR(err); 1771 } 1772 } 1773 1774 /* 1775 * make sure that runtime_usage counter is incremented just once 1776 * per pdd 1777 */ 1778 pdd->runtime_inuse = true; 1779 1780 return pdd; 1781 } 1782 1783 /* Create specific handle mapped to mem from process local memory idr 1784 * Assumes that the process lock is held. 1785 */ 1786 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 1787 void *mem) 1788 { 1789 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL); 1790 } 1791 1792 /* Translate specific handle from process local memory idr 1793 * Assumes that the process lock is held. 1794 */ 1795 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd, 1796 int handle) 1797 { 1798 if (handle < 0) 1799 return NULL; 1800 1801 return idr_find(&pdd->alloc_idr, handle); 1802 } 1803 1804 /* Remove specific handle from process local memory idr 1805 * Assumes that the process lock is held. 1806 */ 1807 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 1808 int handle) 1809 { 1810 if (handle >= 0) 1811 idr_remove(&pdd->alloc_idr, handle); 1812 } 1813 1814 /* This increments the process->ref counter. */ 1815 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid) 1816 { 1817 struct kfd_process *p, *ret_p = NULL; 1818 unsigned int temp; 1819 1820 int idx = srcu_read_lock(&kfd_processes_srcu); 1821 1822 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 1823 if (p->pasid == pasid) { 1824 kref_get(&p->ref); 1825 ret_p = p; 1826 break; 1827 } 1828 } 1829 1830 srcu_read_unlock(&kfd_processes_srcu, idx); 1831 1832 return ret_p; 1833 } 1834 1835 /* This increments the process->ref counter. */ 1836 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm) 1837 { 1838 struct kfd_process *p; 1839 1840 int idx = srcu_read_lock(&kfd_processes_srcu); 1841 1842 p = find_process_by_mm(mm); 1843 if (p) 1844 kref_get(&p->ref); 1845 1846 srcu_read_unlock(&kfd_processes_srcu, idx); 1847 1848 return p; 1849 } 1850 1851 /* kfd_process_evict_queues - Evict all user queues of a process 1852 * 1853 * Eviction is reference-counted per process-device. This means multiple 1854 * evictions from different sources can be nested safely. 1855 */ 1856 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger) 1857 { 1858 int r = 0; 1859 int i; 1860 unsigned int n_evicted = 0; 1861 1862 for (i = 0; i < p->n_pdds; i++) { 1863 struct kfd_process_device *pdd = p->pdds[i]; 1864 struct device *dev = pdd->dev->adev->dev; 1865 1866 kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid, 1867 trigger); 1868 1869 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, 1870 &pdd->qpd); 1871 /* evict return -EIO if HWS is hang or asic is resetting, in this case 1872 * we would like to set all the queues to be in evicted state to prevent 1873 * them been add back since they actually not be saved right now. 1874 */ 1875 if (r && r != -EIO) { 1876 dev_err(dev, "Failed to evict process queues\n"); 1877 goto fail; 1878 } 1879 n_evicted++; 1880 1881 pdd->dev->dqm->is_hws_hang = false; 1882 } 1883 1884 return r; 1885 1886 fail: 1887 /* To keep state consistent, roll back partial eviction by 1888 * restoring queues 1889 */ 1890 for (i = 0; i < p->n_pdds; i++) { 1891 struct kfd_process_device *pdd = p->pdds[i]; 1892 1893 if (n_evicted == 0) 1894 break; 1895 1896 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); 1897 1898 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, 1899 &pdd->qpd)) 1900 dev_err(pdd->dev->adev->dev, 1901 "Failed to restore queues\n"); 1902 1903 n_evicted--; 1904 } 1905 1906 return r; 1907 } 1908 1909 /* kfd_process_restore_queues - Restore all user queues of a process */ 1910 int kfd_process_restore_queues(struct kfd_process *p) 1911 { 1912 int r, ret = 0; 1913 int i; 1914 1915 for (i = 0; i < p->n_pdds; i++) { 1916 struct kfd_process_device *pdd = p->pdds[i]; 1917 struct device *dev = pdd->dev->adev->dev; 1918 1919 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); 1920 1921 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, 1922 &pdd->qpd); 1923 if (r) { 1924 dev_err(dev, "Failed to restore process queues\n"); 1925 if (!ret) 1926 ret = r; 1927 } 1928 } 1929 1930 return ret; 1931 } 1932 1933 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id) 1934 { 1935 int i; 1936 1937 for (i = 0; i < p->n_pdds; i++) 1938 if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id) 1939 return i; 1940 return -EINVAL; 1941 } 1942 1943 int 1944 kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, 1945 uint32_t *gpuid, uint32_t *gpuidx) 1946 { 1947 int i; 1948 1949 for (i = 0; i < p->n_pdds; i++) 1950 if (p->pdds[i] && p->pdds[i]->dev == node) { 1951 *gpuid = p->pdds[i]->user_gpu_id; 1952 *gpuidx = i; 1953 return 0; 1954 } 1955 return -EINVAL; 1956 } 1957 1958 static int signal_eviction_fence(struct kfd_process *p) 1959 { 1960 struct dma_fence *ef; 1961 int ret; 1962 1963 rcu_read_lock(); 1964 ef = dma_fence_get_rcu_safe(&p->ef); 1965 rcu_read_unlock(); 1966 if (!ef) 1967 return -EINVAL; 1968 1969 ret = dma_fence_signal(ef); 1970 dma_fence_put(ef); 1971 1972 return ret; 1973 } 1974 1975 static void evict_process_worker(struct work_struct *work) 1976 { 1977 int ret; 1978 struct kfd_process *p; 1979 struct delayed_work *dwork; 1980 1981 dwork = to_delayed_work(work); 1982 1983 /* Process termination destroys this worker thread. So during the 1984 * lifetime of this thread, kfd_process p will be valid 1985 */ 1986 p = container_of(dwork, struct kfd_process, eviction_work); 1987 1988 pr_debug("Started evicting pasid 0x%x\n", p->pasid); 1989 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM); 1990 if (!ret) { 1991 /* If another thread already signaled the eviction fence, 1992 * they are responsible stopping the queues and scheduling 1993 * the restore work. 1994 */ 1995 if (signal_eviction_fence(p) || 1996 mod_delayed_work(kfd_restore_wq, &p->restore_work, 1997 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS))) 1998 kfd_process_restore_queues(p); 1999 2000 pr_debug("Finished evicting pasid 0x%x\n", p->pasid); 2001 } else 2002 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid); 2003 } 2004 2005 static int restore_process_helper(struct kfd_process *p) 2006 { 2007 int ret = 0; 2008 2009 /* VMs may not have been acquired yet during debugging. */ 2010 if (p->kgd_process_info) { 2011 ret = amdgpu_amdkfd_gpuvm_restore_process_bos( 2012 p->kgd_process_info, &p->ef); 2013 if (ret) 2014 return ret; 2015 } 2016 2017 ret = kfd_process_restore_queues(p); 2018 if (!ret) 2019 pr_debug("Finished restoring pasid 0x%x\n", p->pasid); 2020 else 2021 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid); 2022 2023 return ret; 2024 } 2025 2026 static void restore_process_worker(struct work_struct *work) 2027 { 2028 struct delayed_work *dwork; 2029 struct kfd_process *p; 2030 int ret = 0; 2031 2032 dwork = to_delayed_work(work); 2033 2034 /* Process termination destroys this worker thread. So during the 2035 * lifetime of this thread, kfd_process p will be valid 2036 */ 2037 p = container_of(dwork, struct kfd_process, restore_work); 2038 pr_debug("Started restoring pasid 0x%x\n", p->pasid); 2039 2040 /* Setting last_restore_timestamp before successful restoration. 2041 * Otherwise this would have to be set by KGD (restore_process_bos) 2042 * before KFD BOs are unreserved. If not, the process can be evicted 2043 * again before the timestamp is set. 2044 * If restore fails, the timestamp will be set again in the next 2045 * attempt. This would mean that the minimum GPU quanta would be 2046 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two 2047 * functions) 2048 */ 2049 2050 p->last_restore_timestamp = get_jiffies_64(); 2051 2052 ret = restore_process_helper(p); 2053 if (ret) { 2054 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n", 2055 p->pasid, PROCESS_BACK_OFF_TIME_MS); 2056 if (mod_delayed_work(kfd_restore_wq, &p->restore_work, 2057 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS))) 2058 kfd_process_restore_queues(p); 2059 } 2060 } 2061 2062 void kfd_suspend_all_processes(void) 2063 { 2064 struct kfd_process *p; 2065 unsigned int temp; 2066 int idx = srcu_read_lock(&kfd_processes_srcu); 2067 2068 WARN(debug_evictions, "Evicting all processes"); 2069 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 2070 if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND)) 2071 pr_err("Failed to suspend process 0x%x\n", p->pasid); 2072 signal_eviction_fence(p); 2073 } 2074 srcu_read_unlock(&kfd_processes_srcu, idx); 2075 } 2076 2077 int kfd_resume_all_processes(void) 2078 { 2079 struct kfd_process *p; 2080 unsigned int temp; 2081 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu); 2082 2083 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 2084 if (restore_process_helper(p)) { 2085 pr_err("Restore process %d failed during resume\n", 2086 p->pasid); 2087 ret = -EFAULT; 2088 } 2089 } 2090 srcu_read_unlock(&kfd_processes_srcu, idx); 2091 return ret; 2092 } 2093 2094 int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, 2095 struct vm_area_struct *vma) 2096 { 2097 struct kfd_process_device *pdd; 2098 struct qcm_process_device *qpd; 2099 2100 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) { 2101 dev_err(dev->adev->dev, "Incorrect CWSR mapping size.\n"); 2102 return -EINVAL; 2103 } 2104 2105 pdd = kfd_get_process_device_data(dev, process); 2106 if (!pdd) 2107 return -EINVAL; 2108 qpd = &pdd->qpd; 2109 2110 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2111 get_order(KFD_CWSR_TBA_TMA_SIZE)); 2112 if (!qpd->cwsr_kaddr) { 2113 dev_err(dev->adev->dev, 2114 "Error allocating per process CWSR buffer.\n"); 2115 return -ENOMEM; 2116 } 2117 2118 vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND 2119 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP); 2120 /* Mapping pages to user process */ 2121 return remap_pfn_range(vma, vma->vm_start, 2122 PFN_DOWN(__pa(qpd->cwsr_kaddr)), 2123 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot); 2124 } 2125 2126 /* assumes caller holds process lock. */ 2127 int kfd_process_drain_interrupts(struct kfd_process_device *pdd) 2128 { 2129 uint32_t irq_drain_fence[8]; 2130 uint8_t node_id = 0; 2131 int r = 0; 2132 2133 if (!KFD_IS_SOC15(pdd->dev)) 2134 return 0; 2135 2136 pdd->process->irq_drain_is_open = true; 2137 2138 memset(irq_drain_fence, 0, sizeof(irq_drain_fence)); 2139 irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) | 2140 KFD_IRQ_FENCE_CLIENTID; 2141 irq_drain_fence[3] = pdd->process->pasid; 2142 2143 /* 2144 * For GFX 9.4.3, send the NodeId also in IH cookie DW[3] 2145 */ 2146 if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3) || 2147 KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 4)) { 2148 node_id = ffs(pdd->dev->interrupt_bitmap) - 1; 2149 irq_drain_fence[3] |= node_id << 16; 2150 } 2151 2152 /* ensure stale irqs scheduled KFD interrupts and send drain fence. */ 2153 if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev, 2154 irq_drain_fence)) { 2155 pdd->process->irq_drain_is_open = false; 2156 return 0; 2157 } 2158 2159 r = wait_event_interruptible(pdd->process->wait_irq_drain, 2160 !READ_ONCE(pdd->process->irq_drain_is_open)); 2161 if (r) 2162 pdd->process->irq_drain_is_open = false; 2163 2164 return r; 2165 } 2166 2167 void kfd_process_close_interrupt_drain(unsigned int pasid) 2168 { 2169 struct kfd_process *p; 2170 2171 p = kfd_lookup_process_by_pasid(pasid); 2172 2173 if (!p) 2174 return; 2175 2176 WRITE_ONCE(p->irq_drain_is_open, false); 2177 wake_up_all(&p->wait_irq_drain); 2178 kfd_unref_process(p); 2179 } 2180 2181 struct send_exception_work_handler_workarea { 2182 struct work_struct work; 2183 struct kfd_process *p; 2184 unsigned int queue_id; 2185 uint64_t error_reason; 2186 }; 2187 2188 static void send_exception_work_handler(struct work_struct *work) 2189 { 2190 struct send_exception_work_handler_workarea *workarea; 2191 struct kfd_process *p; 2192 struct queue *q; 2193 struct mm_struct *mm; 2194 struct kfd_context_save_area_header __user *csa_header; 2195 uint64_t __user *err_payload_ptr; 2196 uint64_t cur_err; 2197 uint32_t ev_id; 2198 2199 workarea = container_of(work, 2200 struct send_exception_work_handler_workarea, 2201 work); 2202 p = workarea->p; 2203 2204 mm = get_task_mm(p->lead_thread); 2205 2206 if (!mm) 2207 return; 2208 2209 kthread_use_mm(mm); 2210 2211 q = pqm_get_user_queue(&p->pqm, workarea->queue_id); 2212 2213 if (!q) 2214 goto out; 2215 2216 csa_header = (void __user *)q->properties.ctx_save_restore_area_address; 2217 2218 get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr); 2219 get_user(cur_err, err_payload_ptr); 2220 cur_err |= workarea->error_reason; 2221 put_user(cur_err, err_payload_ptr); 2222 get_user(ev_id, &csa_header->err_event_id); 2223 2224 kfd_set_event(p, ev_id); 2225 2226 out: 2227 kthread_unuse_mm(mm); 2228 mmput(mm); 2229 } 2230 2231 int kfd_send_exception_to_runtime(struct kfd_process *p, 2232 unsigned int queue_id, 2233 uint64_t error_reason) 2234 { 2235 struct send_exception_work_handler_workarea worker; 2236 2237 INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler); 2238 2239 worker.p = p; 2240 worker.queue_id = queue_id; 2241 worker.error_reason = error_reason; 2242 2243 schedule_work(&worker.work); 2244 flush_work(&worker.work); 2245 destroy_work_on_stack(&worker.work); 2246 2247 return 0; 2248 } 2249 2250 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id) 2251 { 2252 int i; 2253 2254 if (gpu_id) { 2255 for (i = 0; i < p->n_pdds; i++) { 2256 struct kfd_process_device *pdd = p->pdds[i]; 2257 2258 if (pdd->user_gpu_id == gpu_id) 2259 return pdd; 2260 } 2261 } 2262 return NULL; 2263 } 2264 2265 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id) 2266 { 2267 int i; 2268 2269 if (!actual_gpu_id) 2270 return 0; 2271 2272 for (i = 0; i < p->n_pdds; i++) { 2273 struct kfd_process_device *pdd = p->pdds[i]; 2274 2275 if (pdd->dev->id == actual_gpu_id) 2276 return pdd->user_gpu_id; 2277 } 2278 return -EINVAL; 2279 } 2280 2281 #if defined(CONFIG_DEBUG_FS) 2282 2283 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data) 2284 { 2285 struct kfd_process *p; 2286 unsigned int temp; 2287 int r = 0; 2288 2289 int idx = srcu_read_lock(&kfd_processes_srcu); 2290 2291 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 2292 seq_printf(m, "Process %d PASID 0x%x:\n", 2293 p->lead_thread->tgid, p->pasid); 2294 2295 mutex_lock(&p->mutex); 2296 r = pqm_debugfs_mqds(m, &p->pqm); 2297 mutex_unlock(&p->mutex); 2298 2299 if (r) 2300 break; 2301 } 2302 2303 srcu_read_unlock(&kfd_processes_srcu, idx); 2304 2305 return r; 2306 } 2307 2308 #endif 2309