xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_process.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/mutex.h>
25 #include <linux/log2.h>
26 #include <linux/sched.h>
27 #include <linux/sched/mm.h>
28 #include <linux/sched/task.h>
29 #include <linux/mmu_context.h>
30 #include <linux/slab.h>
31 #include <linux/notifier.h>
32 #include <linux/compat.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/pm_runtime.h>
36 #include "amdgpu_amdkfd.h"
37 #include "amdgpu.h"
38 #include "amdgpu_reset.h"
39 
40 struct mm_struct;
41 
42 #include "kfd_priv.h"
43 #include "kfd_device_queue_manager.h"
44 #include "kfd_svm.h"
45 #include "kfd_smi_events.h"
46 #include "kfd_debug.h"
47 
48 /*
49  * List of struct kfd_process (field kfd_process).
50  * Unique/indexed by mm_struct*
51  */
52 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
53 DEFINE_MUTEX(kfd_processes_mutex);
54 
55 DEFINE_SRCU(kfd_processes_srcu);
56 
57 /* For process termination handling */
58 static struct workqueue_struct *kfd_process_wq;
59 
60 /* Ordered, single-threaded workqueue for restoring evicted
61  * processes. Restoring multiple processes concurrently under memory
62  * pressure can lead to processes blocking each other from validating
63  * their BOs and result in a live-lock situation where processes
64  * remain evicted indefinitely.
65  */
66 static struct workqueue_struct *kfd_restore_wq;
67 
68 static struct kfd_process *find_process(const struct task_struct *thread,
69 					bool ref);
70 static void kfd_process_ref_release(struct kref *ref);
71 
72 static void evict_process_worker(struct work_struct *work);
73 static void restore_process_worker(struct work_struct *work);
74 
75 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
76 
77 struct kfd_procfs_tree {
78 	struct kobject *kobj;
79 };
80 
81 static struct kfd_procfs_tree procfs;
82 
83 /*
84  * Structure for SDMA activity tracking
85  */
86 struct kfd_sdma_activity_handler_workarea {
87 	struct work_struct sdma_activity_work;
88 	struct kfd_process_device *pdd;
89 	uint64_t sdma_activity_counter;
90 };
91 
92 struct temp_sdma_queue_list {
93 	uint64_t __user *rptr;
94 	uint64_t sdma_val;
95 	unsigned int queue_id;
96 	struct list_head list;
97 };
98 
99 static void kfd_sdma_activity_worker(struct work_struct *work)
100 {
101 	struct kfd_sdma_activity_handler_workarea *workarea;
102 	struct kfd_process_device *pdd;
103 	uint64_t val;
104 	struct mm_struct *mm;
105 	struct queue *q;
106 	struct qcm_process_device *qpd;
107 	struct device_queue_manager *dqm;
108 	int ret = 0;
109 	struct temp_sdma_queue_list sdma_q_list;
110 	struct temp_sdma_queue_list *sdma_q, *next;
111 
112 	workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
113 				sdma_activity_work);
114 
115 	pdd = workarea->pdd;
116 	if (!pdd)
117 		return;
118 	dqm = pdd->dev->dqm;
119 	qpd = &pdd->qpd;
120 	if (!dqm || !qpd)
121 		return;
122 	/*
123 	 * Total SDMA activity is current SDMA activity + past SDMA activity
124 	 * Past SDMA count is stored in pdd.
125 	 * To get the current activity counters for all active SDMA queues,
126 	 * we loop over all SDMA queues and get their counts from user-space.
127 	 *
128 	 * We cannot call get_user() with dqm_lock held as it can cause
129 	 * a circular lock dependency situation. To read the SDMA stats,
130 	 * we need to do the following:
131 	 *
132 	 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
133 	 *    with dqm_lock/dqm_unlock().
134 	 * 2. Call get_user() for each node in temporary list without dqm_lock.
135 	 *    Save the SDMA count for each node and also add the count to the total
136 	 *    SDMA count counter.
137 	 *    Its possible, during this step, a few SDMA queue nodes got deleted
138 	 *    from the qpd->queues_list.
139 	 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
140 	 *    If any node got deleted, its SDMA count would be captured in the sdma
141 	 *    past activity counter. So subtract the SDMA counter stored in step 2
142 	 *    for this node from the total SDMA count.
143 	 */
144 	INIT_LIST_HEAD(&sdma_q_list.list);
145 
146 	/*
147 	 * Create the temp list of all SDMA queues
148 	 */
149 	dqm_lock(dqm);
150 
151 	list_for_each_entry(q, &qpd->queues_list, list) {
152 		if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
153 		    (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
154 			continue;
155 
156 		sdma_q = kzalloc_obj(struct temp_sdma_queue_list);
157 		if (!sdma_q) {
158 			dqm_unlock(dqm);
159 			goto cleanup;
160 		}
161 
162 		INIT_LIST_HEAD(&sdma_q->list);
163 		sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
164 		sdma_q->queue_id = q->properties.queue_id;
165 		list_add_tail(&sdma_q->list, &sdma_q_list.list);
166 	}
167 
168 	/*
169 	 * If the temp list is empty, then no SDMA queues nodes were found in
170 	 * qpd->queues_list. Return the past activity count as the total sdma
171 	 * count
172 	 */
173 	if (list_empty(&sdma_q_list.list)) {
174 		workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
175 		dqm_unlock(dqm);
176 		return;
177 	}
178 
179 	dqm_unlock(dqm);
180 
181 	/*
182 	 * Get the usage count for each SDMA queue in temp_list.
183 	 */
184 	mm = get_task_mm(pdd->process->lead_thread);
185 	if (!mm)
186 		goto cleanup;
187 
188 	kthread_use_mm(mm);
189 
190 	list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
191 		val = 0;
192 		ret = read_sdma_queue_counter(sdma_q->rptr, &val);
193 		if (ret) {
194 			pr_debug("Failed to read SDMA queue active counter for queue id: %d",
195 				 sdma_q->queue_id);
196 		} else {
197 			sdma_q->sdma_val = val;
198 			workarea->sdma_activity_counter += val;
199 		}
200 	}
201 
202 	kthread_unuse_mm(mm);
203 	mmput(mm);
204 
205 	/*
206 	 * Do a second iteration over qpd_queues_list to check if any SDMA
207 	 * nodes got deleted while fetching SDMA counter.
208 	 */
209 	dqm_lock(dqm);
210 
211 	workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
212 
213 	list_for_each_entry(q, &qpd->queues_list, list) {
214 		if (list_empty(&sdma_q_list.list))
215 			break;
216 
217 		if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
218 		    (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
219 			continue;
220 
221 		list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
222 			if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
223 			     (sdma_q->queue_id == q->properties.queue_id)) {
224 				list_del(&sdma_q->list);
225 				kfree(sdma_q);
226 				break;
227 			}
228 		}
229 	}
230 
231 	dqm_unlock(dqm);
232 
233 	/*
234 	 * If temp list is not empty, it implies some queues got deleted
235 	 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
236 	 * count for each node from the total SDMA count.
237 	 */
238 	list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
239 		workarea->sdma_activity_counter -= sdma_q->sdma_val;
240 		list_del(&sdma_q->list);
241 		kfree(sdma_q);
242 	}
243 
244 	return;
245 
246 cleanup:
247 	list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
248 		list_del(&sdma_q->list);
249 		kfree(sdma_q);
250 	}
251 }
252 
253 /**
254  * kfd_get_cu_occupancy - Collect number of waves in-flight on this device
255  * by current process. Translates acquired wave count into number of compute units
256  * that are occupied.
257  *
258  * @attr: Handle of attribute that allows reporting of wave count. The attribute
259  * handle encapsulates GPU device it is associated with, thereby allowing collection
260  * of waves in flight, etc
261  * @buffer: Handle of user provided buffer updated with wave count
262  *
263  * Return: Number of bytes written to user buffer or an error value
264  */
265 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
266 {
267 	int cu_cnt;
268 	int wave_cnt;
269 	int max_waves_per_cu;
270 	struct kfd_node *dev = NULL;
271 	struct kfd_process *proc = NULL;
272 	struct kfd_process_device *pdd = NULL;
273 	int i;
274 	struct kfd_cu_occupancy *cu_occupancy;
275 	u32 queue_format;
276 
277 	pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
278 	dev = pdd->dev;
279 	if (dev->kfd2kgd->get_cu_occupancy == NULL)
280 		return -EINVAL;
281 
282 	cu_cnt = 0;
283 	proc = pdd->process;
284 	if (pdd->qpd.queue_count == 0) {
285 		pr_debug("Gpu-Id: %d has no active queues for process pid %d\n",
286 			 dev->id, (int)proc->lead_thread->pid);
287 		return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
288 	}
289 
290 	/* Collect wave count from device if it supports */
291 	wave_cnt = 0;
292 	max_waves_per_cu = 0;
293 
294 	cu_occupancy = kzalloc_objs(*cu_occupancy, AMDGPU_MAX_QUEUES);
295 	if (!cu_occupancy)
296 		return -ENOMEM;
297 
298 	/*
299 	 * For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition.
300 	 * For AQL queues, because of cooperative dispatch we multiply the wave count
301 	 * by number of XCCs in the partition to get the total wave counts across all
302 	 * XCCs in the partition.
303 	 * For PM4 queues, there is no cooperative dispatch so wave_cnt stay as it is.
304 	 */
305 	dev->kfd2kgd->get_cu_occupancy(dev->adev, cu_occupancy,
306 			&max_waves_per_cu, ffs(dev->xcc_mask) - 1);
307 
308 	for (i = 0; i < AMDGPU_MAX_QUEUES; i++) {
309 		if (cu_occupancy[i].wave_cnt != 0 &&
310 		    kfd_dqm_is_queue_in_process(dev->dqm, &pdd->qpd,
311 						cu_occupancy[i].doorbell_off,
312 						&queue_format)) {
313 			if (unlikely(queue_format == KFD_QUEUE_FORMAT_PM4))
314 				wave_cnt += cu_occupancy[i].wave_cnt;
315 			else
316 				wave_cnt += (NUM_XCC(dev->xcc_mask) *
317 						cu_occupancy[i].wave_cnt);
318 		}
319 	}
320 
321 	/* Translate wave count to number of compute units */
322 	cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
323 	kfree(cu_occupancy);
324 	return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
325 }
326 
327 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
328 			       char *buffer)
329 {
330 	if (strcmp(attr->name, "pasid") == 0)
331 		return snprintf(buffer, PAGE_SIZE, "%d\n", 0);
332 	else if (strncmp(attr->name, "vram_", 5) == 0) {
333 		struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
334 							      attr_vram);
335 		return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
336 	} else if (strncmp(attr->name, "sdma_", 5) == 0) {
337 		struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
338 							      attr_sdma);
339 		struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
340 
341 		INIT_WORK_ONSTACK(&sdma_activity_work_handler.sdma_activity_work,
342 				  kfd_sdma_activity_worker);
343 
344 		sdma_activity_work_handler.pdd = pdd;
345 		sdma_activity_work_handler.sdma_activity_counter = 0;
346 
347 		schedule_work(&sdma_activity_work_handler.sdma_activity_work);
348 
349 		flush_work(&sdma_activity_work_handler.sdma_activity_work);
350 		destroy_work_on_stack(&sdma_activity_work_handler.sdma_activity_work);
351 
352 		return snprintf(buffer, PAGE_SIZE, "%llu\n",
353 				(sdma_activity_work_handler.sdma_activity_counter)/
354 				 SDMA_ACTIVITY_DIVISOR);
355 	} else {
356 		pr_err("Invalid attribute");
357 		return -EINVAL;
358 	}
359 
360 	return 0;
361 }
362 
363 static void kfd_procfs_kobj_release(struct kobject *kobj)
364 {
365 	kfree(kobj);
366 }
367 
368 static const struct sysfs_ops kfd_procfs_ops = {
369 	.show = kfd_procfs_show,
370 };
371 
372 static const struct kobj_type procfs_type = {
373 	.release = kfd_procfs_kobj_release,
374 	.sysfs_ops = &kfd_procfs_ops,
375 };
376 
377 void kfd_procfs_init(void)
378 {
379 	int ret = 0;
380 
381 	procfs.kobj = kfd_alloc_struct(procfs.kobj);
382 	if (!procfs.kobj)
383 		return;
384 
385 	ret = kobject_init_and_add(procfs.kobj, &procfs_type,
386 				   &kfd_device->kobj, "proc");
387 	if (ret) {
388 		pr_warn("Could not create procfs proc folder");
389 		/* If we fail to create the procfs, clean up */
390 		kfd_procfs_shutdown();
391 	}
392 }
393 
394 void kfd_procfs_shutdown(void)
395 {
396 	if (procfs.kobj) {
397 		kobject_del(procfs.kobj);
398 		kobject_put(procfs.kobj);
399 		procfs.kobj = NULL;
400 	}
401 }
402 
403 static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
404 				     struct attribute *attr, char *buffer)
405 {
406 	struct queue *q = container_of(kobj, struct queue, kobj);
407 
408 	if (!strcmp(attr->name, "size"))
409 		return snprintf(buffer, PAGE_SIZE, "%llu",
410 				q->properties.queue_size);
411 	else if (!strcmp(attr->name, "type"))
412 		return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
413 	else if (!strcmp(attr->name, "gpuid"))
414 		return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
415 	else
416 		pr_err("Invalid attribute");
417 
418 	return 0;
419 }
420 
421 static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
422 				     struct attribute *attr, char *buffer)
423 {
424 	if (strcmp(attr->name, "evicted_ms") == 0) {
425 		struct kfd_process_device *pdd = container_of(attr,
426 				struct kfd_process_device,
427 				attr_evict);
428 		uint64_t evict_jiffies;
429 
430 		evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
431 
432 		return snprintf(buffer,
433 				PAGE_SIZE,
434 				"%llu\n",
435 				jiffies64_to_msecs(evict_jiffies));
436 
437 	/* Sysfs handle that gets CU occupancy is per device */
438 	} else if (strcmp(attr->name, "cu_occupancy") == 0) {
439 		return kfd_get_cu_occupancy(attr, buffer);
440 	} else {
441 		pr_err("Invalid attribute");
442 	}
443 
444 	return 0;
445 }
446 
447 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
448 				       struct attribute *attr, char *buf)
449 {
450 	struct kfd_process_device *pdd;
451 
452 	if (!strcmp(attr->name, "faults")) {
453 		pdd = container_of(attr, struct kfd_process_device,
454 				   attr_faults);
455 		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
456 	}
457 	if (!strcmp(attr->name, "page_in")) {
458 		pdd = container_of(attr, struct kfd_process_device,
459 				   attr_page_in);
460 		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
461 	}
462 	if (!strcmp(attr->name, "page_out")) {
463 		pdd = container_of(attr, struct kfd_process_device,
464 				   attr_page_out);
465 		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
466 	}
467 	return 0;
468 }
469 
470 static struct attribute attr_queue_size = {
471 	.name = "size",
472 	.mode = KFD_SYSFS_FILE_MODE
473 };
474 
475 static struct attribute attr_queue_type = {
476 	.name = "type",
477 	.mode = KFD_SYSFS_FILE_MODE
478 };
479 
480 static struct attribute attr_queue_gpuid = {
481 	.name = "gpuid",
482 	.mode = KFD_SYSFS_FILE_MODE
483 };
484 
485 static struct attribute *procfs_queue_attrs[] = {
486 	&attr_queue_size,
487 	&attr_queue_type,
488 	&attr_queue_gpuid,
489 	NULL
490 };
491 ATTRIBUTE_GROUPS(procfs_queue);
492 
493 static const struct sysfs_ops procfs_queue_ops = {
494 	.show = kfd_procfs_queue_show,
495 };
496 
497 static const struct kobj_type procfs_queue_type = {
498 	.sysfs_ops = &procfs_queue_ops,
499 	.default_groups = procfs_queue_groups,
500 };
501 
502 static const struct sysfs_ops procfs_stats_ops = {
503 	.show = kfd_procfs_stats_show,
504 };
505 
506 static const struct kobj_type procfs_stats_type = {
507 	.sysfs_ops = &procfs_stats_ops,
508 	.release = kfd_procfs_kobj_release,
509 };
510 
511 static const struct sysfs_ops sysfs_counters_ops = {
512 	.show = kfd_sysfs_counters_show,
513 };
514 
515 static const struct kobj_type sysfs_counters_type = {
516 	.sysfs_ops = &sysfs_counters_ops,
517 	.release = kfd_procfs_kobj_release,
518 };
519 
520 int kfd_procfs_add_queue(struct queue *q)
521 {
522 	struct kfd_process *proc;
523 	int ret;
524 
525 	if (!q || !q->process)
526 		return -EINVAL;
527 	proc = q->process;
528 
529 	/* Create proc/<pid>/queues/<queue id> folder */
530 	if (!proc->kobj_queues)
531 		return -EFAULT;
532 	ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
533 			proc->kobj_queues, "%u", q->properties.queue_id);
534 	if (ret < 0) {
535 		pr_warn("Creating proc/<pid>/queues/%u failed",
536 			q->properties.queue_id);
537 		kobject_put(&q->kobj);
538 		return ret;
539 	}
540 
541 	return 0;
542 }
543 
544 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
545 				 char *name)
546 {
547 	int ret;
548 
549 	if (!kobj || !attr || !name)
550 		return;
551 
552 	attr->name = name;
553 	attr->mode = KFD_SYSFS_FILE_MODE;
554 	sysfs_attr_init(attr);
555 
556 	ret = sysfs_create_file(kobj, attr);
557 	if (ret)
558 		pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
559 }
560 
561 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
562 {
563 	int ret;
564 	int i;
565 	char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
566 
567 	if (!p || !p->kobj)
568 		return;
569 
570 	/*
571 	 * Create sysfs files for each GPU:
572 	 * - proc/<pid>/stats_<gpuid>/
573 	 * - proc/<pid>/stats_<gpuid>/evicted_ms
574 	 * - proc/<pid>/stats_<gpuid>/cu_occupancy
575 	 */
576 	for (i = 0; i < p->n_pdds; i++) {
577 		struct kfd_process_device *pdd = p->pdds[i];
578 
579 		snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
580 				"stats_%u", pdd->dev->id);
581 		pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
582 		if (!pdd->kobj_stats)
583 			return;
584 
585 		ret = kobject_init_and_add(pdd->kobj_stats,
586 					   &procfs_stats_type,
587 					   p->kobj,
588 					   "%s", stats_dir_filename);
589 
590 		if (ret) {
591 			pr_warn("Creating KFD proc/stats_%s folder failed",
592 				stats_dir_filename);
593 			kobject_put(pdd->kobj_stats);
594 			pdd->kobj_stats = NULL;
595 			return;
596 		}
597 
598 		kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
599 				      "evicted_ms");
600 		/* Add sysfs file to report compute unit occupancy */
601 		if (pdd->dev->kfd2kgd->get_cu_occupancy)
602 			kfd_sysfs_create_file(pdd->kobj_stats,
603 					      &pdd->attr_cu_occupancy,
604 					      "cu_occupancy");
605 	}
606 }
607 
608 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
609 {
610 	int ret = 0;
611 	int i;
612 	char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
613 
614 	if (!p || !p->kobj)
615 		return;
616 
617 	/*
618 	 * Create sysfs files for each GPU which supports SVM
619 	 * - proc/<pid>/counters_<gpuid>/
620 	 * - proc/<pid>/counters_<gpuid>/faults
621 	 * - proc/<pid>/counters_<gpuid>/page_in
622 	 * - proc/<pid>/counters_<gpuid>/page_out
623 	 */
624 	for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
625 		struct kfd_process_device *pdd = p->pdds[i];
626 		struct kobject *kobj_counters;
627 
628 		snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
629 			"counters_%u", pdd->dev->id);
630 		kobj_counters = kfd_alloc_struct(kobj_counters);
631 		if (!kobj_counters)
632 			return;
633 
634 		ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
635 					   p->kobj, "%s", counters_dir_filename);
636 		if (ret) {
637 			pr_warn("Creating KFD proc/%s folder failed",
638 				counters_dir_filename);
639 			kobject_put(kobj_counters);
640 			return;
641 		}
642 
643 		pdd->kobj_counters = kobj_counters;
644 		kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
645 				      "faults");
646 		kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
647 				      "page_in");
648 		kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
649 				      "page_out");
650 	}
651 }
652 
653 static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
654 {
655 	int i;
656 
657 	if (!p || !p->kobj)
658 		return;
659 
660 	/*
661 	 * Create sysfs files for each GPU:
662 	 * - proc/<pid>/vram_<gpuid>
663 	 * - proc/<pid>/sdma_<gpuid>
664 	 */
665 	for (i = 0; i < p->n_pdds; i++) {
666 		struct kfd_process_device *pdd = p->pdds[i];
667 
668 		snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
669 			 pdd->dev->id);
670 		kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
671 				      pdd->vram_filename);
672 
673 		snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
674 			 pdd->dev->id);
675 		kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
676 					    pdd->sdma_filename);
677 	}
678 }
679 
680 void kfd_procfs_del_queue(struct queue *q)
681 {
682 	if (!q || !q->process->kobj)
683 		return;
684 
685 	kobject_del(&q->kobj);
686 	kobject_put(&q->kobj);
687 }
688 
689 int kfd_process_create_wq(void)
690 {
691 	if (!kfd_process_wq)
692 		kfd_process_wq = alloc_workqueue("kfd_process_wq", WQ_UNBOUND,
693 						 0);
694 	if (!kfd_restore_wq)
695 		kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq",
696 							 WQ_FREEZABLE);
697 
698 	if (!kfd_process_wq || !kfd_restore_wq) {
699 		kfd_process_destroy_wq();
700 		return -ENOMEM;
701 	}
702 
703 	return 0;
704 }
705 
706 void kfd_process_destroy_wq(void)
707 {
708 	if (kfd_process_wq) {
709 		destroy_workqueue(kfd_process_wq);
710 		kfd_process_wq = NULL;
711 	}
712 	if (kfd_restore_wq) {
713 		destroy_workqueue(kfd_restore_wq);
714 		kfd_restore_wq = NULL;
715 	}
716 }
717 
718 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
719 			struct kfd_process_device *pdd, void **kptr)
720 {
721 	struct kfd_node *dev = pdd->dev;
722 
723 	if (kptr && *kptr) {
724 		amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
725 		*kptr = NULL;
726 	}
727 
728 	amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
729 	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
730 					       NULL);
731 }
732 
733 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
734  *	This function should be only called right after the process
735  *	is created and when kfd_processes_mutex is still being held
736  *	to avoid concurrency. Because of that exclusiveness, we do
737  *	not need to take p->mutex.
738  */
739 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
740 				   uint64_t gpu_va, uint32_t size,
741 				   uint32_t flags, struct kgd_mem **mem, void **kptr)
742 {
743 	struct kfd_node *kdev = pdd->dev;
744 	int err;
745 
746 	err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
747 						 pdd->drm_priv, mem, NULL,
748 						 flags, false);
749 	if (err)
750 		goto err_alloc_mem;
751 
752 	err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
753 			pdd->drm_priv);
754 	if (err)
755 		goto err_map_mem;
756 
757 	err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
758 	if (err) {
759 		pr_debug("Sync memory failed, wait interrupted by user signal\n");
760 		goto sync_memory_failed;
761 	}
762 
763 	if (kptr) {
764 		err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(
765 				(struct kgd_mem *)*mem, kptr, NULL);
766 		if (err) {
767 			pr_debug("Map GTT BO to kernel failed\n");
768 			goto sync_memory_failed;
769 		}
770 	}
771 
772 	return err;
773 
774 sync_memory_failed:
775 	amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
776 
777 err_map_mem:
778 	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
779 					       NULL);
780 err_alloc_mem:
781 	*mem = NULL;
782 	*kptr = NULL;
783 	return err;
784 }
785 
786 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
787  *	process for IB usage The memory reserved is for KFD to submit
788  *	IB to AMDGPU from kernel.  If the memory is reserved
789  *	successfully, ib_kaddr will have the CPU/kernel
790  *	address. Check ib_kaddr before accessing the memory.
791  */
792 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
793 {
794 	struct qcm_process_device *qpd = &pdd->qpd;
795 	uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
796 			KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
797 			KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
798 			KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
799 	struct kgd_mem *mem;
800 	void *kaddr;
801 	int ret;
802 
803 	if (qpd->ib_kaddr || !qpd->ib_base)
804 		return 0;
805 
806 	/* ib_base is only set for dGPU */
807 	ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
808 				      &mem, &kaddr);
809 	if (ret)
810 		return ret;
811 
812 	qpd->ib_mem = mem;
813 	qpd->ib_kaddr = kaddr;
814 
815 	return 0;
816 }
817 
818 static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
819 {
820 	struct qcm_process_device *qpd = &pdd->qpd;
821 
822 	if (!qpd->ib_kaddr || !qpd->ib_base)
823 		return;
824 
825 	kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
826 }
827 
828 int kfd_create_process_sysfs(struct kfd_process *process)
829 {
830 	struct kfd_process *primary_process;
831 	int ret;
832 
833 	if (process->kobj) {
834 		pr_warn("kobject already exists for the kfd_process\n");
835 		return -EINVAL;
836 	}
837 
838 	process->kobj = kfd_alloc_struct(process->kobj);
839 	if (!process->kobj) {
840 		pr_warn("Creating procfs kobject failed");
841 		return -ENOMEM;
842 	}
843 
844 	if (process->context_id == KFD_CONTEXT_ID_PRIMARY)
845 		ret = kobject_init_and_add(process->kobj, &procfs_type,
846 					   procfs.kobj, "%d",
847 					   (int)process->lead_thread->pid);
848 	else {
849 		primary_process = kfd_lookup_process_by_mm(process->lead_thread->mm);
850 		if (!primary_process)
851 			return -ESRCH;
852 
853 		ret = kobject_init_and_add(process->kobj, &procfs_type,
854 					   primary_process->kobj, "context_%u",
855 					   process->context_id);
856 		kfd_unref_process(primary_process);
857 	}
858 
859 	if (ret) {
860 		pr_warn("Creating procfs pid directory failed");
861 		kobject_put(process->kobj);
862 		process->kobj = NULL;
863 		return ret;
864 	}
865 
866 	kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
867 			      "pasid");
868 
869 	process->kobj_queues = kobject_create_and_add("queues",
870 						process->kobj);
871 	if (!process->kobj_queues)
872 		pr_warn("Creating KFD proc/queues folder failed");
873 
874 	kfd_procfs_add_sysfs_stats(process);
875 	kfd_procfs_add_sysfs_files(process);
876 	kfd_procfs_add_sysfs_counters(process);
877 
878 	return 0;
879 }
880 
881 static int kfd_process_alloc_id(struct kfd_process *process)
882 {
883 	int ret;
884 	struct kfd_process *primary_process;
885 
886 	/* already assign 0xFFFF when create */
887 	if (process->context_id == KFD_CONTEXT_ID_PRIMARY)
888 		return 0;
889 
890 	primary_process = kfd_lookup_process_by_mm(process->lead_thread->mm);
891 	if (!primary_process)
892 		return -ESRCH;
893 
894 	/* id range: KFD_CONTEXT_ID_MIN to 0xFFFE */
895 	ret = ida_alloc_range(&primary_process->id_table, KFD_CONTEXT_ID_MIN,
896 	      KFD_CONTEXT_ID_PRIMARY - 1, GFP_KERNEL);
897 	if (ret < 0)
898 		goto out;
899 
900 	process->context_id = ret;
901 	ret = 0;
902 
903 out:
904 	kfd_unref_process(primary_process);
905 
906 	return ret;
907 }
908 
909 static void kfd_process_free_id(struct kfd_process *process)
910 {
911 	struct kfd_process *primary_process;
912 
913 	if (process->context_id != KFD_CONTEXT_ID_PRIMARY)
914 		return;
915 
916 	primary_process = kfd_lookup_process_by_mm(process->lead_thread->mm);
917 	if (!primary_process)
918 		return;
919 
920 	ida_free(&primary_process->id_table, process->context_id);
921 
922 	kfd_unref_process(primary_process);
923 }
924 
925 struct kfd_process *kfd_create_process(struct task_struct *thread)
926 {
927 	struct kfd_process *process;
928 	int ret;
929 
930 	if (!(thread->mm && mmget_not_zero(thread->mm)))
931 		return ERR_PTR(-EINVAL);
932 
933 	/* If the process just called exec(3), it is possible that the
934 	 * cleanup of the kfd_process (following the release of the mm
935 	 * of the old process image) is still in the cleanup work queue.
936 	 * Make sure to drain any job before trying to recreate any
937 	 * resource for this process.
938 	 */
939 	flush_workqueue(kfd_process_wq);
940 
941 	/*
942 	 * take kfd processes mutex before starting of process creation
943 	 * so there won't be a case where two threads of the same process
944 	 * create two kfd_process structures
945 	 */
946 	mutex_lock(&kfd_processes_mutex);
947 
948 	if (kfd_gpu_node_num() <= 0) {
949 		pr_warn("no gpu node! Cannot create KFD process");
950 		process = ERR_PTR(-EINVAL);
951 		goto out;
952 	}
953 
954 	if (kfd_is_locked(NULL)) {
955 		pr_debug("KFD is locked! Cannot create process");
956 		process = ERR_PTR(-EINVAL);
957 		goto out;
958 	}
959 
960 	/* A prior open of /dev/kfd could have already created the process.
961 	 * find_process will increase process kref in this case
962 	 */
963 	process = find_process(thread, true);
964 	if (process) {
965 		pr_debug("Process already found\n");
966 	} else {
967 		process = create_process(thread, true);
968 		if (IS_ERR(process))
969 			goto out;
970 
971 		if (!procfs.kobj)
972 			goto out;
973 
974 		ret = kfd_create_process_sysfs(process);
975 		if (ret)
976 			pr_warn("Failed to create sysfs entry for the kfd_process");
977 
978 		kfd_debugfs_add_process(process);
979 
980 		init_waitqueue_head(&process->wait_irq_drain);
981 	}
982 out:
983 	mutex_unlock(&kfd_processes_mutex);
984 	mmput(thread->mm);
985 
986 	return process;
987 }
988 
989 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
990 {
991 	struct kfd_process *process;
992 
993 	hash_for_each_possible_rcu(kfd_processes_table, process,
994 					kfd_processes, (uintptr_t)mm)
995 		if (process->mm == mm && process->context_id == KFD_CONTEXT_ID_PRIMARY)
996 			return process;
997 
998 	return NULL;
999 }
1000 
1001 static struct kfd_process *find_process(const struct task_struct *thread,
1002 					bool ref)
1003 {
1004 	struct kfd_process *p;
1005 	int idx;
1006 
1007 	idx = srcu_read_lock(&kfd_processes_srcu);
1008 	p = find_process_by_mm(thread->mm);
1009 	if (p && ref)
1010 		kref_get(&p->ref);
1011 	srcu_read_unlock(&kfd_processes_srcu, idx);
1012 
1013 	return p;
1014 }
1015 
1016 void kfd_unref_process(struct kfd_process *p)
1017 {
1018 	kref_put(&p->ref, kfd_process_ref_release);
1019 }
1020 
1021 /* This increments the process->ref counter. */
1022 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid)
1023 {
1024 	struct task_struct *task = NULL;
1025 	struct kfd_process *p    = NULL;
1026 
1027 	if (!pid) {
1028 		task = current;
1029 		get_task_struct(task);
1030 	} else {
1031 		task = get_pid_task(pid, PIDTYPE_PID);
1032 	}
1033 
1034 	if (task) {
1035 		p = find_process(task, true);
1036 		put_task_struct(task);
1037 	}
1038 
1039 	return p;
1040 }
1041 
1042 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
1043 {
1044 	struct kfd_process *p = pdd->process;
1045 	void *mem;
1046 	int id;
1047 	int i;
1048 
1049 	/*
1050 	 * Remove all handles from idr and release appropriate
1051 	 * local memory object
1052 	 */
1053 	idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1054 
1055 		for (i = 0; i < p->n_pdds; i++) {
1056 			struct kfd_process_device *peer_pdd = p->pdds[i];
1057 
1058 			if (!peer_pdd->drm_priv)
1059 				continue;
1060 			amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1061 				peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
1062 		}
1063 
1064 		amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
1065 						       pdd->drm_priv, NULL);
1066 		kfd_process_device_remove_obj_handle(pdd, id);
1067 	}
1068 }
1069 
1070 /*
1071  * Just kunmap and unpin signal BO here. It will be freed in
1072  * kfd_process_free_outstanding_kfd_bos()
1073  */
1074 static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
1075 {
1076 	struct kfd_process_device *pdd;
1077 	struct kfd_node *kdev;
1078 	void *mem;
1079 
1080 	kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
1081 	if (!kdev)
1082 		return;
1083 
1084 	mutex_lock(&p->mutex);
1085 
1086 	pdd = kfd_get_process_device_data(kdev, p);
1087 	if (!pdd)
1088 		goto out;
1089 
1090 	mem = kfd_process_device_translate_handle(
1091 		pdd, GET_IDR_HANDLE(p->signal_handle));
1092 	if (!mem)
1093 		goto out;
1094 
1095 	amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
1096 
1097 out:
1098 	mutex_unlock(&p->mutex);
1099 }
1100 
1101 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
1102 {
1103 	int i;
1104 
1105 	for (i = 0; i < p->n_pdds; i++)
1106 		kfd_process_device_free_bos(p->pdds[i]);
1107 }
1108 
1109 static void kfd_process_destroy_pdds(struct kfd_process *p)
1110 {
1111 	int i;
1112 
1113 	for (i = 0; i < p->n_pdds; i++) {
1114 		struct kfd_process_device *pdd = p->pdds[i];
1115 
1116 		kfd_smi_event_process(pdd, false);
1117 
1118 		pr_debug("Releasing pdd (topology id %d, for pid %d)\n",
1119 			pdd->dev->id, p->lead_thread->pid);
1120 		kfd_process_device_destroy_cwsr_dgpu(pdd);
1121 		kfd_process_device_destroy_ib_mem(pdd);
1122 
1123 		if (pdd->drm_file)
1124 			fput(pdd->drm_file);
1125 
1126 		if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
1127 			free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
1128 				get_order(KFD_CWSR_TBA_TMA_SIZE));
1129 
1130 		idr_destroy(&pdd->alloc_idr);
1131 
1132 		kfd_free_process_doorbells(pdd->dev->kfd, pdd);
1133 
1134 		if (pdd->dev->kfd->shared_resources.enable_mes &&
1135 			pdd->proc_ctx_cpu_ptr)
1136 			amdgpu_amdkfd_free_kernel_mem(pdd->dev->adev,
1137 						   &pdd->proc_ctx_bo);
1138 		/*
1139 		 * before destroying pdd, make sure to report availability
1140 		 * for auto suspend
1141 		 */
1142 		if (pdd->runtime_inuse) {
1143 			pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
1144 			pdd->runtime_inuse = false;
1145 		}
1146 
1147 		atomic_dec(&pdd->dev->kfd->kfd_processes_count);
1148 
1149 		kfree(pdd);
1150 		p->pdds[i] = NULL;
1151 	}
1152 	p->n_pdds = 0;
1153 }
1154 
1155 static void kfd_process_remove_sysfs(struct kfd_process *p)
1156 {
1157 	struct kfd_process_device *pdd;
1158 	int i;
1159 
1160 	if (!p->kobj)
1161 		return;
1162 
1163 	sysfs_remove_file(p->kobj, &p->attr_pasid);
1164 	kobject_del(p->kobj_queues);
1165 	kobject_put(p->kobj_queues);
1166 	p->kobj_queues = NULL;
1167 
1168 	for (i = 0; i < p->n_pdds; i++) {
1169 		pdd = p->pdds[i];
1170 
1171 		sysfs_remove_file(p->kobj, &pdd->attr_vram);
1172 		sysfs_remove_file(p->kobj, &pdd->attr_sdma);
1173 
1174 		sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
1175 		if (pdd->dev->kfd2kgd->get_cu_occupancy)
1176 			sysfs_remove_file(pdd->kobj_stats,
1177 					  &pdd->attr_cu_occupancy);
1178 		kobject_del(pdd->kobj_stats);
1179 		kobject_put(pdd->kobj_stats);
1180 		pdd->kobj_stats = NULL;
1181 	}
1182 
1183 	for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
1184 		pdd = p->pdds[i];
1185 
1186 		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
1187 		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
1188 		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
1189 		kobject_del(pdd->kobj_counters);
1190 		kobject_put(pdd->kobj_counters);
1191 		pdd->kobj_counters = NULL;
1192 	}
1193 
1194 	kobject_del(p->kobj);
1195 	kobject_put(p->kobj);
1196 	p->kobj = NULL;
1197 }
1198 
1199 /*
1200  * If any GPU is ongoing reset, wait for reset complete.
1201  */
1202 static void kfd_process_wait_gpu_reset_complete(struct kfd_process *p)
1203 {
1204 	int i;
1205 
1206 	for (i = 0; i < p->n_pdds; i++)
1207 		flush_workqueue(p->pdds[i]->dev->adev->reset_domain->wq);
1208 }
1209 
1210 /* No process locking is needed in this function, because the process
1211  * is not findable any more. We must assume that no other thread is
1212  * using it any more, otherwise we couldn't safely free the process
1213  * structure in the end.
1214  */
1215 static void kfd_process_wq_release(struct work_struct *work)
1216 {
1217 	struct kfd_process *p = container_of(work, struct kfd_process,
1218 					     release_work);
1219 	struct dma_fence *ef;
1220 
1221 	/*
1222 	 * If GPU in reset, user queues may still running, wait for reset complete.
1223 	 */
1224 	kfd_process_wait_gpu_reset_complete(p);
1225 
1226 	/* Signal the eviction fence after user mode queues are
1227 	 * destroyed. This allows any BOs to be freed without
1228 	 * triggering pointless evictions or waiting for fences.
1229 	 */
1230 	synchronize_rcu();
1231 	ef = rcu_access_pointer(p->ef);
1232 	if (ef)
1233 		dma_fence_signal(ef);
1234 
1235 	if (p->context_id != KFD_CONTEXT_ID_PRIMARY)
1236 		kfd_process_free_id(p);
1237 	else
1238 		ida_destroy(&p->id_table);
1239 
1240 	kfd_debugfs_remove_process(p);
1241 
1242 	kfd_process_kunmap_signal_bo(p);
1243 	kfd_process_free_outstanding_kfd_bos(p);
1244 	svm_range_list_fini(p);
1245 
1246 	kfd_process_destroy_pdds(p);
1247 	dma_fence_put(ef);
1248 
1249 	kfd_event_free_process(p);
1250 
1251 	mutex_destroy(&p->mutex);
1252 
1253 	put_task_struct(p->lead_thread);
1254 
1255 	/* the last step is removing process entries under /sys
1256 	 * to indicate the process has been terminated.
1257 	 */
1258 	kfd_process_remove_sysfs(p);
1259 
1260 	kfree(p);
1261 }
1262 
1263 static void kfd_process_ref_release(struct kref *ref)
1264 {
1265 	struct kfd_process *p = container_of(ref, struct kfd_process, ref);
1266 
1267 	INIT_WORK(&p->release_work, kfd_process_wq_release);
1268 	queue_work(kfd_process_wq, &p->release_work);
1269 }
1270 
1271 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
1272 {
1273 	/* This increments p->ref counter if kfd process p exists */
1274 	struct kfd_process *p = kfd_lookup_process_by_mm(mm);
1275 
1276 	return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
1277 }
1278 
1279 static void kfd_process_free_notifier(struct mmu_notifier *mn)
1280 {
1281 	kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
1282 }
1283 
1284 static void kfd_process_table_remove(struct kfd_process *p)
1285 {
1286 	mutex_lock(&kfd_processes_mutex);
1287 	/*
1288 	 * Do early return if table is empty.
1289 	 *
1290 	 * This could potentially happen if this function is called concurrently
1291 	 * by mmu_notifier and by kfd_cleanup_pocesses.
1292 	 *
1293 	 */
1294 	if (hash_empty(kfd_processes_table)) {
1295 		mutex_unlock(&kfd_processes_mutex);
1296 		return;
1297 	}
1298 	hash_del_rcu(&p->kfd_processes);
1299 	mutex_unlock(&kfd_processes_mutex);
1300 	synchronize_srcu(&kfd_processes_srcu);
1301 }
1302 
1303 void kfd_process_notifier_release_internal(struct kfd_process *p)
1304 {
1305 	int i;
1306 
1307 	kfd_process_table_remove(p);
1308 	cancel_delayed_work_sync(&p->eviction_work);
1309 	cancel_delayed_work_sync(&p->restore_work);
1310 
1311 	/*
1312 	 * Dequeue and destroy user queues, it is not safe for GPU to access
1313 	 * system memory after mmu release notifier callback returns because
1314 	 * exit_mmap free process memory afterwards.
1315 	 */
1316 	kfd_process_dequeue_from_all_devices(p);
1317 	pqm_uninit(&p->pqm);
1318 
1319 	for (i = 0; i < p->n_pdds; i++) {
1320 		struct kfd_process_device *pdd = p->pdds[i];
1321 
1322 		/* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */
1323 		if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup)
1324 			amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
1325 	}
1326 
1327 	/* Indicate to other users that MM is no longer valid */
1328 	p->mm = NULL;
1329 	kfd_dbg_trap_disable(p);
1330 
1331 	if (atomic_read(&p->debugged_process_count) > 0) {
1332 		struct kfd_process *target;
1333 		unsigned int temp;
1334 		int idx = srcu_read_lock(&kfd_processes_srcu);
1335 
1336 		hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) {
1337 			if (target->debugger_process && target->debugger_process == p) {
1338 				mutex_lock_nested(&target->mutex, 1);
1339 				kfd_dbg_trap_disable(target);
1340 				mutex_unlock(&target->mutex);
1341 				if (atomic_read(&p->debugged_process_count) == 0)
1342 					break;
1343 			}
1344 		}
1345 
1346 		srcu_read_unlock(&kfd_processes_srcu, idx);
1347 	}
1348 
1349 	if (p->context_id == KFD_CONTEXT_ID_PRIMARY)
1350 		mmu_notifier_put(&p->mmu_notifier);
1351 }
1352 
1353 static void kfd_process_notifier_release(struct mmu_notifier *mn,
1354 					struct mm_struct *mm)
1355 {
1356 	struct kfd_process *p;
1357 
1358 	/*
1359 	 * The kfd_process structure can not be free because the
1360 	 * mmu_notifier srcu is read locked
1361 	 */
1362 	p = container_of(mn, struct kfd_process, mmu_notifier);
1363 	if (WARN_ON(p->mm != mm))
1364 		return;
1365 
1366 	kfd_process_notifier_release_internal(p);
1367 }
1368 
1369 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
1370 	.release = kfd_process_notifier_release,
1371 	.alloc_notifier = kfd_process_alloc_notifier,
1372 	.free_notifier = kfd_process_free_notifier,
1373 };
1374 
1375 /*
1376  * This code handles the case when driver is being unloaded before all
1377  * mm_struct are released.  We need to safely free the kfd_process and
1378  * avoid race conditions with mmu_notifier that might try to free them.
1379  *
1380  */
1381 void kfd_cleanup_processes(void)
1382 {
1383 	struct kfd_process *p;
1384 	struct hlist_node *p_temp;
1385 	unsigned int temp;
1386 	HLIST_HEAD(cleanup_list);
1387 
1388 	/*
1389 	 * Move all remaining kfd_process from the process table to a
1390 	 * temp list for processing.   Once done, callback from mmu_notifier
1391 	 * release will not see the kfd_process in the table and do early return,
1392 	 * avoiding double free issues.
1393 	 */
1394 	mutex_lock(&kfd_processes_mutex);
1395 	hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
1396 		hash_del_rcu(&p->kfd_processes);
1397 		synchronize_srcu(&kfd_processes_srcu);
1398 		hlist_add_head(&p->kfd_processes, &cleanup_list);
1399 	}
1400 	mutex_unlock(&kfd_processes_mutex);
1401 
1402 	hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
1403 		kfd_process_notifier_release_internal(p);
1404 
1405 	/*
1406 	 * Ensures that all outstanding free_notifier get called, triggering
1407 	 * the release of the kfd_process struct.
1408 	 */
1409 	mmu_notifier_synchronize();
1410 }
1411 
1412 int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
1413 {
1414 	unsigned long  offset;
1415 	int i;
1416 
1417 	if (p->has_cwsr)
1418 		return 0;
1419 
1420 	for (i = 0; i < p->n_pdds; i++) {
1421 		struct kfd_node *dev = p->pdds[i]->dev;
1422 		struct qcm_process_device *qpd = &p->pdds[i]->qpd;
1423 
1424 		if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
1425 			continue;
1426 
1427 		offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
1428 		qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
1429 			KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
1430 			MAP_SHARED, offset);
1431 
1432 		if (IS_ERR_VALUE(qpd->tba_addr)) {
1433 			int err = qpd->tba_addr;
1434 
1435 			dev_err(dev->adev->dev,
1436 				"Failure to set tba address. error %d.\n", err);
1437 			qpd->tba_addr = 0;
1438 			qpd->cwsr_kaddr = NULL;
1439 			return err;
1440 		}
1441 
1442 		memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
1443 
1444 		kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled);
1445 
1446 		qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1447 		pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1448 			qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1449 	}
1450 
1451 	p->has_cwsr = true;
1452 
1453 	return 0;
1454 }
1455 
1456 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
1457 {
1458 	struct kfd_node *dev = pdd->dev;
1459 	struct qcm_process_device *qpd = &pdd->qpd;
1460 	uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
1461 			| KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
1462 			| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1463 	struct kgd_mem *mem;
1464 	void *kaddr;
1465 	int ret;
1466 
1467 	if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
1468 		return 0;
1469 
1470 	/* cwsr_base is only set for dGPU */
1471 	ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
1472 				      KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr);
1473 	if (ret)
1474 		return ret;
1475 
1476 	qpd->cwsr_mem = mem;
1477 	qpd->cwsr_kaddr = kaddr;
1478 	qpd->tba_addr = qpd->cwsr_base;
1479 
1480 	memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
1481 
1482 	kfd_process_set_trap_debug_flag(&pdd->qpd,
1483 					pdd->process->debug_trap_enabled);
1484 
1485 	qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1486 	pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1487 		 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1488 
1489 	return 0;
1490 }
1491 
1492 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
1493 {
1494 	struct kfd_node *dev = pdd->dev;
1495 	struct qcm_process_device *qpd = &pdd->qpd;
1496 
1497 	if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
1498 		return;
1499 
1500 	kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
1501 }
1502 
1503 void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1504 				  uint64_t tba_addr,
1505 				  uint64_t tma_addr)
1506 {
1507 	if (qpd->cwsr_kaddr) {
1508 		/* KFD trap handler is bound, record as second-level TBA/TMA
1509 		 * in first-level TMA. First-level trap will jump to second.
1510 		 */
1511 		uint64_t *tma =
1512 			(uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1513 		tma[0] = tba_addr;
1514 		tma[1] = tma_addr;
1515 	} else {
1516 		/* No trap handler bound, bind as first-level TBA/TMA. */
1517 		qpd->tba_addr = tba_addr;
1518 		qpd->tma_addr = tma_addr;
1519 	}
1520 }
1521 
1522 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
1523 {
1524 	int i;
1525 
1526 	/* On most GFXv9 GPUs, the retry mode in the SQ must match the
1527 	 * boot time retry setting. Mixing processes with different
1528 	 * XNACK/retry settings can hang the GPU.
1529 	 *
1530 	 * Different GPUs can have different noretry settings depending
1531 	 * on HW bugs or limitations. We need to find at least one
1532 	 * XNACK mode for this process that's compatible with all GPUs.
1533 	 * Fortunately GPUs with retry enabled (noretry=0) can run code
1534 	 * built for XNACK-off. On GFXv9 it may perform slower.
1535 	 *
1536 	 * Therefore applications built for XNACK-off can always be
1537 	 * supported and will be our fallback if any GPU does not
1538 	 * support retry.
1539 	 */
1540 	for (i = 0; i < p->n_pdds; i++) {
1541 		struct kfd_node *dev = p->pdds[i]->dev;
1542 
1543 		/* Only consider GFXv9 and higher GPUs. Older GPUs don't
1544 		 * support the SVM APIs and don't need to be considered
1545 		 * for the XNACK mode selection.
1546 		 */
1547 		if (!KFD_IS_SOC15(dev))
1548 			continue;
1549 		/* Aldebaran can always support XNACK because it can support
1550 		 * per-process XNACK mode selection. But let the dev->noretry
1551 		 * setting still influence the default XNACK mode.
1552 		 */
1553 		if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) {
1554 			if (!amdgpu_sriov_xnack_support(dev->kfd->adev)) {
1555 				pr_debug("SRIOV platform xnack not supported\n");
1556 				return false;
1557 			}
1558 			continue;
1559 		}
1560 
1561 		/* GFXv10 and later GPUs do not support shader preemption
1562 		 * during page faults. This can lead to poor QoS for queue
1563 		 * management and memory-manager-related preemptions or
1564 		 * even deadlocks.
1565 		 */
1566 		if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1) &&
1567 		    KFD_GC_VERSION(dev) < IP_VERSION(12, 1, 0))
1568 			return false;
1569 
1570 		if (dev->kfd->noretry)
1571 			return false;
1572 	}
1573 
1574 	return true;
1575 }
1576 
1577 void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
1578 				     bool enabled)
1579 {
1580 	if (qpd->cwsr_kaddr) {
1581 		uint64_t *tma =
1582 			(uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1583 		tma[2] = enabled;
1584 	}
1585 }
1586 
1587 /*
1588  * On return the kfd_process is fully operational and will be freed when the
1589  * mm is released
1590  */
1591 struct kfd_process *create_process(const struct task_struct *thread, bool primary)
1592 {
1593 	struct kfd_process *process;
1594 	struct mmu_notifier *mn;
1595 	int err = -ENOMEM;
1596 
1597 	process = kzalloc_obj(*process);
1598 	if (!process)
1599 		goto err_alloc_process;
1600 
1601 	kref_init(&process->ref);
1602 	mutex_init(&process->mutex);
1603 	process->mm = thread->mm;
1604 	process->lead_thread = thread->group_leader;
1605 	process->n_pdds = 0;
1606 	process->queues_paused = false;
1607 
1608 	INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1609 	INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1610 	process->last_restore_timestamp = get_jiffies_64();
1611 	err = kfd_event_init_process(process);
1612 	if (err)
1613 		goto err_event_init;
1614 	process->is_32bit_user_mode = in_compat_syscall();
1615 	process->debug_trap_enabled = false;
1616 	process->debugger_process = NULL;
1617 	process->exception_enable_mask = 0;
1618 	atomic_set(&process->debugged_process_count, 0);
1619 	sema_init(&process->runtime_enable_sema, 0);
1620 
1621 	err = pqm_init(&process->pqm, process);
1622 	if (err != 0)
1623 		goto err_process_pqm_init;
1624 
1625 	/* init process apertures*/
1626 	err = kfd_init_apertures(process);
1627 	if (err != 0)
1628 		goto err_init_apertures;
1629 
1630 	/* Check XNACK support after PDDs are created in kfd_init_apertures */
1631 	process->xnack_enabled = kfd_process_xnack_mode(process, false);
1632 
1633 	err = svm_range_list_init(process);
1634 	if (err)
1635 		goto err_init_svm_range_list;
1636 
1637 	/* alloc_notifier needs to find the process in the hash table */
1638 	hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1639 			(uintptr_t)process->mm);
1640 
1641 	/* Avoid free_notifier to start kfd_process_wq_release if
1642 	 * mmu_notifier_get failed because of pending signal.
1643 	 */
1644 	kref_get(&process->ref);
1645 
1646 	/* MMU notifier registration must be the last call that can fail
1647 	 * because after this point we cannot unwind the process creation.
1648 	 * After this point, mmu_notifier_put will trigger the cleanup by
1649 	 * dropping the last process reference in the free_notifier.
1650 	 */
1651 	if (primary) {
1652 		process->context_id = KFD_CONTEXT_ID_PRIMARY;
1653 		mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
1654 		if (IS_ERR(mn)) {
1655 			err = PTR_ERR(mn);
1656 			goto err_register_notifier;
1657 		}
1658 		BUG_ON(mn != &process->mmu_notifier);
1659 		ida_init(&process->id_table);
1660 	}
1661 
1662 	err = kfd_process_alloc_id(process);
1663 	if (err) {
1664 		pr_err("Creating kfd process: failed to alloc an id\n");
1665 		goto err_alloc_id;
1666 	}
1667 
1668 	kfd_unref_process(process);
1669 	get_task_struct(process->lead_thread);
1670 
1671 	INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler);
1672 
1673 	return process;
1674 
1675 err_alloc_id:
1676 	kfd_process_free_id(process);
1677 err_register_notifier:
1678 	hash_del_rcu(&process->kfd_processes);
1679 	svm_range_list_fini(process);
1680 err_init_svm_range_list:
1681 	kfd_process_free_outstanding_kfd_bos(process);
1682 	kfd_process_destroy_pdds(process);
1683 err_init_apertures:
1684 	pqm_uninit(&process->pqm);
1685 err_process_pqm_init:
1686 	kfd_event_free_process(process);
1687 err_event_init:
1688 	mutex_destroy(&process->mutex);
1689 	kfree(process);
1690 err_alloc_process:
1691 	return ERR_PTR(err);
1692 }
1693 
1694 struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
1695 							struct kfd_process *p)
1696 {
1697 	int i;
1698 
1699 	for (i = 0; i < p->n_pdds; i++)
1700 		if (p->pdds[i]->dev == dev)
1701 			return p->pdds[i];
1702 
1703 	return NULL;
1704 }
1705 
1706 struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
1707 							struct kfd_process *p)
1708 {
1709 	struct kfd_process_device *pdd = NULL;
1710 
1711 	if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
1712 		return NULL;
1713 	pdd = kzalloc_obj(*pdd);
1714 	if (!pdd)
1715 		return NULL;
1716 
1717 	pdd->dev = dev;
1718 	INIT_LIST_HEAD(&pdd->qpd.queues_list);
1719 	INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
1720 	pdd->qpd.dqm = dev->dqm;
1721 	pdd->qpd.pqm = &p->pqm;
1722 	pdd->qpd.evicted = 0;
1723 	pdd->qpd.mapped_gws_queue = false;
1724 	pdd->process = p;
1725 	pdd->bound = PDD_UNBOUND;
1726 	pdd->already_dequeued = false;
1727 	pdd->runtime_inuse = false;
1728 	atomic64_set(&pdd->vram_usage, 0);
1729 	pdd->sdma_past_activity_counter = 0;
1730 	pdd->user_gpu_id = dev->id;
1731 	atomic64_set(&pdd->evict_duration_counter, 0);
1732 
1733 	p->pdds[p->n_pdds++] = pdd;
1734 	if (kfd_dbg_is_per_vmid_supported(pdd->dev))
1735 		pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
1736 							pdd->dev->adev,
1737 							false,
1738 							0);
1739 
1740 	/* Init idr used for memory handle translation */
1741 	idr_init(&pdd->alloc_idr);
1742 
1743 	atomic_inc(&dev->kfd->kfd_processes_count);
1744 
1745 	return pdd;
1746 }
1747 
1748 /**
1749  * kfd_process_device_init_vm - Initialize a VM for a process-device
1750  *
1751  * @pdd: The process-device
1752  * @drm_file: Optional pointer to a DRM file descriptor
1753  *
1754  * If @drm_file is specified, it will be used to acquire the VM from
1755  * that file descriptor. If successful, the @pdd takes ownership of
1756  * the file descriptor.
1757  *
1758  * If @drm_file is NULL, a new VM is created.
1759  *
1760  * Returns 0 on success, -errno on failure.
1761  */
1762 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1763 			       struct file *drm_file)
1764 {
1765 	struct amdgpu_fpriv *drv_priv;
1766 	struct amdgpu_vm *avm;
1767 	struct kfd_process *p;
1768 	struct dma_fence *ef;
1769 	struct kfd_node *dev;
1770 	int ret;
1771 
1772 	if (pdd->drm_priv)
1773 		return -EBUSY;
1774 
1775 	ret = amdgpu_file_to_fpriv(drm_file, &drv_priv);
1776 	if (ret)
1777 		return ret;
1778 	avm = &drv_priv->vm;
1779 
1780 	p = pdd->process;
1781 	dev = pdd->dev;
1782 
1783 	ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
1784 						     &p->kgd_process_info,
1785 						     p->ef ? NULL : &ef);
1786 	if (ret) {
1787 		dev_err(dev->adev->dev, "Failed to create process VM object\n");
1788 		return ret;
1789 	}
1790 
1791 	if (!p->ef)
1792 		RCU_INIT_POINTER(p->ef, ef);
1793 
1794 	pdd->drm_priv = drm_file->private_data;
1795 
1796 	ret = kfd_process_device_reserve_ib_mem(pdd);
1797 	if (ret)
1798 		goto err_reserve_ib_mem;
1799 	ret = kfd_process_device_init_cwsr_dgpu(pdd);
1800 	if (ret)
1801 		goto err_init_cwsr;
1802 
1803 	if (unlikely(!avm->pasid)) {
1804 		dev_warn(pdd->dev->adev->dev, "WARN: vm %p has no pasid associated",
1805 				 avm);
1806 		ret = -EINVAL;
1807 		goto err_get_pasid;
1808 	}
1809 
1810 	pdd->pasid = avm->pasid;
1811 	pdd->drm_file = drm_file;
1812 
1813 	kfd_smi_event_process(pdd, true);
1814 
1815 	return 0;
1816 
1817 err_get_pasid:
1818 	kfd_process_device_destroy_cwsr_dgpu(pdd);
1819 err_init_cwsr:
1820 	kfd_process_device_destroy_ib_mem(pdd);
1821 err_reserve_ib_mem:
1822 	pdd->drm_priv = NULL;
1823 	amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm);
1824 
1825 	return ret;
1826 }
1827 
1828 /*
1829  * Direct the IOMMU to bind the process (specifically the pasid->mm)
1830  * to the device.
1831  * Unbinding occurs when the process dies or the device is removed.
1832  *
1833  * Assumes that the process lock is held.
1834  */
1835 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
1836 							struct kfd_process *p)
1837 {
1838 	struct kfd_process_device *pdd;
1839 	int err;
1840 
1841 	pdd = kfd_get_process_device_data(dev, p);
1842 	if (!pdd) {
1843 		dev_err(dev->adev->dev, "Process device data doesn't exist\n");
1844 		return ERR_PTR(-ENOMEM);
1845 	}
1846 
1847 	if (!pdd->drm_priv)
1848 		return ERR_PTR(-ENODEV);
1849 
1850 	/*
1851 	 * signal runtime-pm system to auto resume and prevent
1852 	 * further runtime suspend once device pdd is created until
1853 	 * pdd is destroyed.
1854 	 */
1855 	if (!pdd->runtime_inuse) {
1856 		err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev);
1857 		if (err < 0) {
1858 			pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
1859 			return ERR_PTR(err);
1860 		}
1861 	}
1862 
1863 	/*
1864 	 * make sure that runtime_usage counter is incremented just once
1865 	 * per pdd
1866 	 */
1867 	pdd->runtime_inuse = true;
1868 
1869 	return pdd;
1870 }
1871 
1872 /* Create specific handle mapped to mem from process local memory idr
1873  * Assumes that the process lock is held.
1874  */
1875 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1876 					void *mem)
1877 {
1878 	return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1879 }
1880 
1881 /* Translate specific handle from process local memory idr
1882  * Assumes that the process lock is held.
1883  */
1884 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1885 					int handle)
1886 {
1887 	if (handle < 0)
1888 		return NULL;
1889 
1890 	return idr_find(&pdd->alloc_idr, handle);
1891 }
1892 
1893 /* Remove specific handle from process local memory idr
1894  * Assumes that the process lock is held.
1895  */
1896 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1897 					int handle)
1898 {
1899 	if (handle >= 0)
1900 		idr_remove(&pdd->alloc_idr, handle);
1901 }
1902 
1903 static struct kfd_process_device *kfd_lookup_process_device_by_pasid(u32 pasid)
1904 {
1905 	struct kfd_process_device *ret_p = NULL;
1906 	struct kfd_process *p;
1907 	unsigned int temp;
1908 	int i;
1909 
1910 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1911 		for (i = 0; i < p->n_pdds; i++) {
1912 			if (p->pdds[i]->pasid == pasid) {
1913 				ret_p = p->pdds[i];
1914 				break;
1915 			}
1916 		}
1917 		if (ret_p)
1918 			break;
1919 	}
1920 	return ret_p;
1921 }
1922 
1923 /* This increments the process->ref counter. */
1924 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid,
1925 						struct kfd_process_device **pdd)
1926 {
1927 	struct kfd_process_device *ret_p;
1928 
1929 	int idx = srcu_read_lock(&kfd_processes_srcu);
1930 
1931 	ret_p = kfd_lookup_process_device_by_pasid(pasid);
1932 	if (ret_p) {
1933 		if (pdd)
1934 			*pdd = ret_p;
1935 		kref_get(&ret_p->process->ref);
1936 
1937 		srcu_read_unlock(&kfd_processes_srcu, idx);
1938 		return ret_p->process;
1939 	}
1940 
1941 	srcu_read_unlock(&kfd_processes_srcu, idx);
1942 
1943 	if (pdd)
1944 		*pdd = NULL;
1945 
1946 	return NULL;
1947 }
1948 
1949 /* This increments the process->ref counter. */
1950 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1951 {
1952 	struct kfd_process *p;
1953 
1954 	int idx = srcu_read_lock(&kfd_processes_srcu);
1955 
1956 	p = find_process_by_mm(mm);
1957 	if (p)
1958 		kref_get(&p->ref);
1959 
1960 	srcu_read_unlock(&kfd_processes_srcu, idx);
1961 
1962 	return p;
1963 }
1964 
1965 /* This increments the process->ref counter. */
1966 struct kfd_process *kfd_lookup_process_by_id(const struct mm_struct *mm, u16 id)
1967 {
1968 	struct kfd_process *p, *ret_p = NULL;
1969 	unsigned int temp;
1970 
1971 	int idx = srcu_read_lock(&kfd_processes_srcu);
1972 
1973 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1974 		if (p->mm == mm && p->context_id == id) {
1975 			kref_get(&p->ref);
1976 			ret_p = p;
1977 			break;
1978 		}
1979 	}
1980 
1981 	srcu_read_unlock(&kfd_processes_srcu, idx);
1982 
1983 	return ret_p;
1984 }
1985 
1986 /* kfd_process_evict_queues - Evict all user queues of a process
1987  *
1988  * Eviction is reference-counted per process-device. This means multiple
1989  * evictions from different sources can be nested safely.
1990  */
1991 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
1992 {
1993 	int r = 0;
1994 	int i;
1995 	unsigned int n_evicted = 0;
1996 
1997 	for (i = 0; i < p->n_pdds; i++) {
1998 		struct kfd_process_device *pdd = p->pdds[i];
1999 		struct device *dev = pdd->dev->adev->dev;
2000 
2001 		kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid,
2002 					     trigger);
2003 
2004 		r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
2005 							    &pdd->qpd);
2006 		/* evict return -EIO if HWS is hang or asic is resetting, in this case
2007 		 * we would like to set all the queues to be in evicted state to prevent
2008 		 * them been add back since they actually not be saved right now.
2009 		 */
2010 		if (r && r != -EIO) {
2011 			dev_err(dev, "Failed to evict process queues\n");
2012 			goto fail;
2013 		}
2014 		n_evicted++;
2015 
2016 		pdd->dev->dqm->is_hws_hang = false;
2017 	}
2018 
2019 	return r;
2020 
2021 fail:
2022 	/* To keep state consistent, roll back partial eviction by
2023 	 * restoring queues
2024 	 */
2025 	for (i = 0; i < p->n_pdds; i++) {
2026 		struct kfd_process_device *pdd = p->pdds[i];
2027 
2028 		if (n_evicted == 0)
2029 			break;
2030 
2031 		kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
2032 
2033 		if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
2034 							      &pdd->qpd))
2035 			dev_err(pdd->dev->adev->dev,
2036 				"Failed to restore queues\n");
2037 
2038 		n_evicted--;
2039 	}
2040 
2041 	return r;
2042 }
2043 
2044 /* kfd_process_restore_queues - Restore all user queues of a process */
2045 int kfd_process_restore_queues(struct kfd_process *p)
2046 {
2047 	int r, ret = 0;
2048 	int i;
2049 
2050 	for (i = 0; i < p->n_pdds; i++) {
2051 		struct kfd_process_device *pdd = p->pdds[i];
2052 		struct device *dev = pdd->dev->adev->dev;
2053 
2054 		kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
2055 
2056 		r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
2057 							      &pdd->qpd);
2058 		if (r) {
2059 			dev_err(dev, "Failed to restore process queues\n");
2060 			if (!ret)
2061 				ret = r;
2062 		}
2063 	}
2064 
2065 	return ret;
2066 }
2067 
2068 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
2069 {
2070 	int i;
2071 
2072 	for (i = 0; i < p->n_pdds; i++)
2073 		if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id)
2074 			return i;
2075 	return -EINVAL;
2076 }
2077 
2078 int
2079 kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
2080 			    uint32_t *gpuid, uint32_t *gpuidx)
2081 {
2082 	int i;
2083 
2084 	for (i = 0; i < p->n_pdds; i++)
2085 		if (p->pdds[i] && p->pdds[i]->dev == node) {
2086 			*gpuid = p->pdds[i]->user_gpu_id;
2087 			*gpuidx = i;
2088 			return 0;
2089 		}
2090 	return -EINVAL;
2091 }
2092 
2093 static bool signal_eviction_fence(struct kfd_process *p)
2094 {
2095 	struct dma_fence *ef;
2096 	bool ret;
2097 
2098 	rcu_read_lock();
2099 	ef = dma_fence_get_rcu_safe(&p->ef);
2100 	rcu_read_unlock();
2101 	if (!ef)
2102 		return true;
2103 
2104 	ret = dma_fence_check_and_signal(ef);
2105 	dma_fence_put(ef);
2106 
2107 	return ret;
2108 }
2109 
2110 static void evict_process_worker(struct work_struct *work)
2111 {
2112 	int ret;
2113 	struct kfd_process *p;
2114 	struct delayed_work *dwork;
2115 
2116 	dwork = to_delayed_work(work);
2117 
2118 	/* Process termination destroys this worker thread. So during the
2119 	 * lifetime of this thread, kfd_process p will be valid
2120 	 */
2121 	p = container_of(dwork, struct kfd_process, eviction_work);
2122 
2123 	pr_debug("Started evicting process pid %d\n", p->lead_thread->pid);
2124 	ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
2125 	if (!ret) {
2126 		/* If another thread already signaled the eviction fence,
2127 		 * they are responsible stopping the queues and scheduling
2128 		 * the restore work.
2129 		 */
2130 		if (signal_eviction_fence(p) ||
2131 		    mod_delayed_work(kfd_restore_wq, &p->restore_work,
2132 				     msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
2133 			kfd_process_restore_queues(p);
2134 
2135 		pr_debug("Finished evicting process pid %d\n", p->lead_thread->pid);
2136 	} else
2137 		pr_err("Failed to evict queues of process pid %d\n", p->lead_thread->pid);
2138 }
2139 
2140 static int restore_process_helper(struct kfd_process *p)
2141 {
2142 	int ret = 0;
2143 
2144 	/* VMs may not have been acquired yet during debugging. */
2145 	if (p->kgd_process_info) {
2146 		ret = amdgpu_amdkfd_gpuvm_restore_process_bos(
2147 			p->kgd_process_info, &p->ef);
2148 		if (ret)
2149 			return ret;
2150 	}
2151 
2152 	ret = kfd_process_restore_queues(p);
2153 	if (!ret)
2154 		pr_debug("Finished restoring process pid %d\n",
2155 			p->lead_thread->pid);
2156 	else
2157 		pr_err("Failed to restore queues of process pid %d\n",
2158 		      p->lead_thread->pid);
2159 
2160 	return ret;
2161 }
2162 
2163 static void restore_process_worker(struct work_struct *work)
2164 {
2165 	struct delayed_work *dwork;
2166 	struct kfd_process *p;
2167 	int ret = 0;
2168 
2169 	dwork = to_delayed_work(work);
2170 
2171 	/* Process termination destroys this worker thread. So during the
2172 	 * lifetime of this thread, kfd_process p will be valid
2173 	 */
2174 	p = container_of(dwork, struct kfd_process, restore_work);
2175 	pr_debug("Started restoring process pasid %d\n", (int)p->lead_thread->pid);
2176 
2177 	/* Setting last_restore_timestamp before successful restoration.
2178 	 * Otherwise this would have to be set by KGD (restore_process_bos)
2179 	 * before KFD BOs are unreserved. If not, the process can be evicted
2180 	 * again before the timestamp is set.
2181 	 * If restore fails, the timestamp will be set again in the next
2182 	 * attempt. This would mean that the minimum GPU quanta would be
2183 	 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
2184 	 * functions)
2185 	 */
2186 
2187 	p->last_restore_timestamp = get_jiffies_64();
2188 
2189 	ret = restore_process_helper(p);
2190 	if (ret) {
2191 		pr_debug("Failed to restore BOs of process pid %d, retry after %d ms\n",
2192 			 p->lead_thread->pid, PROCESS_BACK_OFF_TIME_MS);
2193 		if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
2194 				     msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
2195 			kfd_process_restore_queues(p);
2196 	}
2197 }
2198 
2199 void kfd_suspend_all_processes(void)
2200 {
2201 	struct kfd_process *p;
2202 	unsigned int temp;
2203 	int idx = srcu_read_lock(&kfd_processes_srcu);
2204 
2205 	WARN(debug_evictions, "Evicting all processes");
2206 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2207 		if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
2208 			pr_err("Failed to suspend process pid %d\n", p->lead_thread->pid);
2209 		signal_eviction_fence(p);
2210 	}
2211 	srcu_read_unlock(&kfd_processes_srcu, idx);
2212 }
2213 
2214 int kfd_resume_all_processes(void)
2215 {
2216 	struct kfd_process *p;
2217 	unsigned int temp;
2218 	int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
2219 
2220 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2221 		if (restore_process_helper(p)) {
2222 			pr_err("Restore process pid %d failed during resume\n",
2223 			      p->lead_thread->pid);
2224 			ret = -EFAULT;
2225 		}
2226 	}
2227 	srcu_read_unlock(&kfd_processes_srcu, idx);
2228 	return ret;
2229 }
2230 
2231 int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
2232 			  struct vm_area_struct *vma)
2233 {
2234 	struct kfd_process_device *pdd;
2235 	struct qcm_process_device *qpd;
2236 
2237 	if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
2238 		dev_err(dev->adev->dev, "Incorrect CWSR mapping size.\n");
2239 		return -EINVAL;
2240 	}
2241 
2242 	pdd = kfd_get_process_device_data(dev, process);
2243 	if (!pdd)
2244 		return -EINVAL;
2245 	qpd = &pdd->qpd;
2246 
2247 	qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2248 					get_order(KFD_CWSR_TBA_TMA_SIZE));
2249 	if (!qpd->cwsr_kaddr) {
2250 		dev_err(dev->adev->dev,
2251 			"Error allocating per process CWSR buffer.\n");
2252 		return -ENOMEM;
2253 	}
2254 
2255 	vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
2256 		| VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
2257 	/* Mapping pages to user process */
2258 	return remap_pfn_range(vma, vma->vm_start,
2259 			       PFN_DOWN(__pa(qpd->cwsr_kaddr)),
2260 			       KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
2261 }
2262 
2263 /* assumes caller holds process lock. */
2264 int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
2265 {
2266 	uint32_t irq_drain_fence[8];
2267 	uint8_t node_id = 0;
2268 	int r = 0;
2269 
2270 	if (!KFD_IS_SOC15(pdd->dev))
2271 		return 0;
2272 
2273 	pdd->process->irq_drain_is_open = true;
2274 
2275 	memset(irq_drain_fence, 0, sizeof(irq_drain_fence));
2276 	irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) |
2277 							KFD_IRQ_FENCE_CLIENTID;
2278 	irq_drain_fence[3] = pdd->pasid;
2279 
2280 	/*
2281 	 * For GFX 9.4.3/9.5.0, send the NodeId also in IH cookie DW[3]
2282 	 */
2283 	if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3) ||
2284 	    KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 4) ||
2285 	    KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 5, 0) ||
2286 	    KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(12, 1, 0)) {
2287 		node_id = ffs(pdd->dev->interrupt_bitmap) - 1;
2288 		irq_drain_fence[3] |= node_id << 16;
2289 	}
2290 
2291 	/* ensure stale irqs scheduled KFD interrupts and send drain fence. */
2292 	if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev,
2293 						     irq_drain_fence)) {
2294 		pdd->process->irq_drain_is_open = false;
2295 		return 0;
2296 	}
2297 
2298 	r = wait_event_interruptible(pdd->process->wait_irq_drain,
2299 				     !READ_ONCE(pdd->process->irq_drain_is_open));
2300 	if (r)
2301 		pdd->process->irq_drain_is_open = false;
2302 
2303 	return r;
2304 }
2305 
2306 void kfd_process_close_interrupt_drain(unsigned int pasid)
2307 {
2308 	struct kfd_process *p;
2309 
2310 	p = kfd_lookup_process_by_pasid(pasid, NULL);
2311 
2312 	if (!p)
2313 		return;
2314 
2315 	WRITE_ONCE(p->irq_drain_is_open, false);
2316 	wake_up_all(&p->wait_irq_drain);
2317 	kfd_unref_process(p);
2318 }
2319 
2320 struct send_exception_work_handler_workarea {
2321 	struct work_struct work;
2322 	struct kfd_process *p;
2323 	unsigned int queue_id;
2324 	uint64_t error_reason;
2325 };
2326 
2327 static void send_exception_work_handler(struct work_struct *work)
2328 {
2329 	struct send_exception_work_handler_workarea *workarea;
2330 	struct kfd_process *p;
2331 	struct queue *q;
2332 	struct mm_struct *mm;
2333 	struct kfd_context_save_area_header __user *csa_header;
2334 	uint64_t __user *err_payload_ptr;
2335 	uint64_t cur_err;
2336 	uint32_t ev_id;
2337 
2338 	workarea = container_of(work,
2339 				struct send_exception_work_handler_workarea,
2340 				work);
2341 	p = workarea->p;
2342 
2343 	mm = get_task_mm(p->lead_thread);
2344 
2345 	if (!mm)
2346 		return;
2347 
2348 	kthread_use_mm(mm);
2349 
2350 	q = pqm_get_user_queue(&p->pqm, workarea->queue_id);
2351 
2352 	if (!q)
2353 		goto out;
2354 
2355 	csa_header = (void __user *)q->properties.ctx_save_restore_area_address;
2356 
2357 	get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr);
2358 	get_user(cur_err, err_payload_ptr);
2359 	cur_err |= workarea->error_reason;
2360 	put_user(cur_err, err_payload_ptr);
2361 	get_user(ev_id, &csa_header->err_event_id);
2362 
2363 	kfd_set_event(p, ev_id);
2364 
2365 out:
2366 	kthread_unuse_mm(mm);
2367 	mmput(mm);
2368 }
2369 
2370 int kfd_send_exception_to_runtime(struct kfd_process *p,
2371 			unsigned int queue_id,
2372 			uint64_t error_reason)
2373 {
2374 	struct send_exception_work_handler_workarea worker;
2375 
2376 	INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler);
2377 
2378 	worker.p = p;
2379 	worker.queue_id = queue_id;
2380 	worker.error_reason = error_reason;
2381 
2382 	schedule_work(&worker.work);
2383 	flush_work(&worker.work);
2384 	destroy_work_on_stack(&worker.work);
2385 
2386 	return 0;
2387 }
2388 
2389 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
2390 {
2391 	int i;
2392 
2393 	if (gpu_id) {
2394 		for (i = 0; i < p->n_pdds; i++) {
2395 			struct kfd_process_device *pdd = p->pdds[i];
2396 
2397 			if (pdd->user_gpu_id == gpu_id)
2398 				return pdd;
2399 		}
2400 	}
2401 	return NULL;
2402 }
2403 
2404 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id)
2405 {
2406 	int i;
2407 
2408 	if (!actual_gpu_id)
2409 		return 0;
2410 
2411 	for (i = 0; i < p->n_pdds; i++) {
2412 		struct kfd_process_device *pdd = p->pdds[i];
2413 
2414 		if (pdd->dev->id == actual_gpu_id)
2415 			return pdd->user_gpu_id;
2416 	}
2417 	return -EINVAL;
2418 }
2419 
2420 #if defined(CONFIG_DEBUG_FS)
2421 
2422 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
2423 {
2424 	struct kfd_process *p;
2425 	unsigned int temp;
2426 	int r = 0;
2427 
2428 	int idx = srcu_read_lock(&kfd_processes_srcu);
2429 
2430 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2431 		seq_printf(m, "Process %d PASID %d:\n",
2432 			   p->lead_thread->tgid, p->lead_thread->pid);
2433 
2434 		mutex_lock(&p->mutex);
2435 		r = pqm_debugfs_mqds(m, &p->pqm);
2436 		mutex_unlock(&p->mutex);
2437 
2438 		if (r)
2439 			break;
2440 	}
2441 
2442 	srcu_read_unlock(&kfd_processes_srcu, idx);
2443 
2444 	return r;
2445 }
2446 
2447 #endif
2448