xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_process.c (revision 0a94608f0f7de9b1135ffea3546afe68eafef57f)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/mutex.h>
25 #include <linux/log2.h>
26 #include <linux/sched.h>
27 #include <linux/sched/mm.h>
28 #include <linux/sched/task.h>
29 #include <linux/mmu_context.h>
30 #include <linux/slab.h>
31 #include <linux/amd-iommu.h>
32 #include <linux/notifier.h>
33 #include <linux/compat.h>
34 #include <linux/mman.h>
35 #include <linux/file.h>
36 #include <linux/pm_runtime.h>
37 #include "amdgpu_amdkfd.h"
38 #include "amdgpu.h"
39 
40 struct mm_struct;
41 
42 #include "kfd_priv.h"
43 #include "kfd_device_queue_manager.h"
44 #include "kfd_iommu.h"
45 #include "kfd_svm.h"
46 #include "kfd_smi_events.h"
47 
48 /*
49  * List of struct kfd_process (field kfd_process).
50  * Unique/indexed by mm_struct*
51  */
52 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
53 static DEFINE_MUTEX(kfd_processes_mutex);
54 
55 DEFINE_SRCU(kfd_processes_srcu);
56 
57 /* For process termination handling */
58 static struct workqueue_struct *kfd_process_wq;
59 
60 /* Ordered, single-threaded workqueue for restoring evicted
61  * processes. Restoring multiple processes concurrently under memory
62  * pressure can lead to processes blocking each other from validating
63  * their BOs and result in a live-lock situation where processes
64  * remain evicted indefinitely.
65  */
66 static struct workqueue_struct *kfd_restore_wq;
67 
68 static struct kfd_process *find_process(const struct task_struct *thread,
69 					bool ref);
70 static void kfd_process_ref_release(struct kref *ref);
71 static struct kfd_process *create_process(const struct task_struct *thread);
72 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
73 
74 static void evict_process_worker(struct work_struct *work);
75 static void restore_process_worker(struct work_struct *work);
76 
77 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
78 
79 struct kfd_procfs_tree {
80 	struct kobject *kobj;
81 };
82 
83 static struct kfd_procfs_tree procfs;
84 
85 /*
86  * Structure for SDMA activity tracking
87  */
88 struct kfd_sdma_activity_handler_workarea {
89 	struct work_struct sdma_activity_work;
90 	struct kfd_process_device *pdd;
91 	uint64_t sdma_activity_counter;
92 };
93 
94 struct temp_sdma_queue_list {
95 	uint64_t __user *rptr;
96 	uint64_t sdma_val;
97 	unsigned int queue_id;
98 	struct list_head list;
99 };
100 
101 static void kfd_sdma_activity_worker(struct work_struct *work)
102 {
103 	struct kfd_sdma_activity_handler_workarea *workarea;
104 	struct kfd_process_device *pdd;
105 	uint64_t val;
106 	struct mm_struct *mm;
107 	struct queue *q;
108 	struct qcm_process_device *qpd;
109 	struct device_queue_manager *dqm;
110 	int ret = 0;
111 	struct temp_sdma_queue_list sdma_q_list;
112 	struct temp_sdma_queue_list *sdma_q, *next;
113 
114 	workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
115 				sdma_activity_work);
116 
117 	pdd = workarea->pdd;
118 	if (!pdd)
119 		return;
120 	dqm = pdd->dev->dqm;
121 	qpd = &pdd->qpd;
122 	if (!dqm || !qpd)
123 		return;
124 	/*
125 	 * Total SDMA activity is current SDMA activity + past SDMA activity
126 	 * Past SDMA count is stored in pdd.
127 	 * To get the current activity counters for all active SDMA queues,
128 	 * we loop over all SDMA queues and get their counts from user-space.
129 	 *
130 	 * We cannot call get_user() with dqm_lock held as it can cause
131 	 * a circular lock dependency situation. To read the SDMA stats,
132 	 * we need to do the following:
133 	 *
134 	 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
135 	 *    with dqm_lock/dqm_unlock().
136 	 * 2. Call get_user() for each node in temporary list without dqm_lock.
137 	 *    Save the SDMA count for each node and also add the count to the total
138 	 *    SDMA count counter.
139 	 *    Its possible, during this step, a few SDMA queue nodes got deleted
140 	 *    from the qpd->queues_list.
141 	 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
142 	 *    If any node got deleted, its SDMA count would be captured in the sdma
143 	 *    past activity counter. So subtract the SDMA counter stored in step 2
144 	 *    for this node from the total SDMA count.
145 	 */
146 	INIT_LIST_HEAD(&sdma_q_list.list);
147 
148 	/*
149 	 * Create the temp list of all SDMA queues
150 	 */
151 	dqm_lock(dqm);
152 
153 	list_for_each_entry(q, &qpd->queues_list, list) {
154 		if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
155 		    (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
156 			continue;
157 
158 		sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
159 		if (!sdma_q) {
160 			dqm_unlock(dqm);
161 			goto cleanup;
162 		}
163 
164 		INIT_LIST_HEAD(&sdma_q->list);
165 		sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
166 		sdma_q->queue_id = q->properties.queue_id;
167 		list_add_tail(&sdma_q->list, &sdma_q_list.list);
168 	}
169 
170 	/*
171 	 * If the temp list is empty, then no SDMA queues nodes were found in
172 	 * qpd->queues_list. Return the past activity count as the total sdma
173 	 * count
174 	 */
175 	if (list_empty(&sdma_q_list.list)) {
176 		workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
177 		dqm_unlock(dqm);
178 		return;
179 	}
180 
181 	dqm_unlock(dqm);
182 
183 	/*
184 	 * Get the usage count for each SDMA queue in temp_list.
185 	 */
186 	mm = get_task_mm(pdd->process->lead_thread);
187 	if (!mm)
188 		goto cleanup;
189 
190 	kthread_use_mm(mm);
191 
192 	list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
193 		val = 0;
194 		ret = read_sdma_queue_counter(sdma_q->rptr, &val);
195 		if (ret) {
196 			pr_debug("Failed to read SDMA queue active counter for queue id: %d",
197 				 sdma_q->queue_id);
198 		} else {
199 			sdma_q->sdma_val = val;
200 			workarea->sdma_activity_counter += val;
201 		}
202 	}
203 
204 	kthread_unuse_mm(mm);
205 	mmput(mm);
206 
207 	/*
208 	 * Do a second iteration over qpd_queues_list to check if any SDMA
209 	 * nodes got deleted while fetching SDMA counter.
210 	 */
211 	dqm_lock(dqm);
212 
213 	workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
214 
215 	list_for_each_entry(q, &qpd->queues_list, list) {
216 		if (list_empty(&sdma_q_list.list))
217 			break;
218 
219 		if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
220 		    (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
221 			continue;
222 
223 		list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
224 			if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
225 			     (sdma_q->queue_id == q->properties.queue_id)) {
226 				list_del(&sdma_q->list);
227 				kfree(sdma_q);
228 				break;
229 			}
230 		}
231 	}
232 
233 	dqm_unlock(dqm);
234 
235 	/*
236 	 * If temp list is not empty, it implies some queues got deleted
237 	 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
238 	 * count for each node from the total SDMA count.
239 	 */
240 	list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
241 		workarea->sdma_activity_counter -= sdma_q->sdma_val;
242 		list_del(&sdma_q->list);
243 		kfree(sdma_q);
244 	}
245 
246 	return;
247 
248 cleanup:
249 	list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
250 		list_del(&sdma_q->list);
251 		kfree(sdma_q);
252 	}
253 }
254 
255 /**
256  * kfd_get_cu_occupancy - Collect number of waves in-flight on this device
257  * by current process. Translates acquired wave count into number of compute units
258  * that are occupied.
259  *
260  * @attr: Handle of attribute that allows reporting of wave count. The attribute
261  * handle encapsulates GPU device it is associated with, thereby allowing collection
262  * of waves in flight, etc
263  * @buffer: Handle of user provided buffer updated with wave count
264  *
265  * Return: Number of bytes written to user buffer or an error value
266  */
267 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
268 {
269 	int cu_cnt;
270 	int wave_cnt;
271 	int max_waves_per_cu;
272 	struct kfd_dev *dev = NULL;
273 	struct kfd_process *proc = NULL;
274 	struct kfd_process_device *pdd = NULL;
275 
276 	pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
277 	dev = pdd->dev;
278 	if (dev->kfd2kgd->get_cu_occupancy == NULL)
279 		return -EINVAL;
280 
281 	cu_cnt = 0;
282 	proc = pdd->process;
283 	if (pdd->qpd.queue_count == 0) {
284 		pr_debug("Gpu-Id: %d has no active queues for process %d\n",
285 			 dev->id, proc->pasid);
286 		return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
287 	}
288 
289 	/* Collect wave count from device if it supports */
290 	wave_cnt = 0;
291 	max_waves_per_cu = 0;
292 	dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
293 			&max_waves_per_cu);
294 
295 	/* Translate wave count to number of compute units */
296 	cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
297 	return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
298 }
299 
300 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
301 			       char *buffer)
302 {
303 	if (strcmp(attr->name, "pasid") == 0) {
304 		struct kfd_process *p = container_of(attr, struct kfd_process,
305 						     attr_pasid);
306 
307 		return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
308 	} else if (strncmp(attr->name, "vram_", 5) == 0) {
309 		struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
310 							      attr_vram);
311 		return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
312 	} else if (strncmp(attr->name, "sdma_", 5) == 0) {
313 		struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
314 							      attr_sdma);
315 		struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
316 
317 		INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
318 					kfd_sdma_activity_worker);
319 
320 		sdma_activity_work_handler.pdd = pdd;
321 		sdma_activity_work_handler.sdma_activity_counter = 0;
322 
323 		schedule_work(&sdma_activity_work_handler.sdma_activity_work);
324 
325 		flush_work(&sdma_activity_work_handler.sdma_activity_work);
326 
327 		return snprintf(buffer, PAGE_SIZE, "%llu\n",
328 				(sdma_activity_work_handler.sdma_activity_counter)/
329 				 SDMA_ACTIVITY_DIVISOR);
330 	} else {
331 		pr_err("Invalid attribute");
332 		return -EINVAL;
333 	}
334 
335 	return 0;
336 }
337 
338 static void kfd_procfs_kobj_release(struct kobject *kobj)
339 {
340 	kfree(kobj);
341 }
342 
343 static const struct sysfs_ops kfd_procfs_ops = {
344 	.show = kfd_procfs_show,
345 };
346 
347 static struct kobj_type procfs_type = {
348 	.release = kfd_procfs_kobj_release,
349 	.sysfs_ops = &kfd_procfs_ops,
350 };
351 
352 void kfd_procfs_init(void)
353 {
354 	int ret = 0;
355 
356 	procfs.kobj = kfd_alloc_struct(procfs.kobj);
357 	if (!procfs.kobj)
358 		return;
359 
360 	ret = kobject_init_and_add(procfs.kobj, &procfs_type,
361 				   &kfd_device->kobj, "proc");
362 	if (ret) {
363 		pr_warn("Could not create procfs proc folder");
364 		/* If we fail to create the procfs, clean up */
365 		kfd_procfs_shutdown();
366 	}
367 }
368 
369 void kfd_procfs_shutdown(void)
370 {
371 	if (procfs.kobj) {
372 		kobject_del(procfs.kobj);
373 		kobject_put(procfs.kobj);
374 		procfs.kobj = NULL;
375 	}
376 }
377 
378 static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
379 				     struct attribute *attr, char *buffer)
380 {
381 	struct queue *q = container_of(kobj, struct queue, kobj);
382 
383 	if (!strcmp(attr->name, "size"))
384 		return snprintf(buffer, PAGE_SIZE, "%llu",
385 				q->properties.queue_size);
386 	else if (!strcmp(attr->name, "type"))
387 		return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
388 	else if (!strcmp(attr->name, "gpuid"))
389 		return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
390 	else
391 		pr_err("Invalid attribute");
392 
393 	return 0;
394 }
395 
396 static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
397 				     struct attribute *attr, char *buffer)
398 {
399 	if (strcmp(attr->name, "evicted_ms") == 0) {
400 		struct kfd_process_device *pdd = container_of(attr,
401 				struct kfd_process_device,
402 				attr_evict);
403 		uint64_t evict_jiffies;
404 
405 		evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
406 
407 		return snprintf(buffer,
408 				PAGE_SIZE,
409 				"%llu\n",
410 				jiffies64_to_msecs(evict_jiffies));
411 
412 	/* Sysfs handle that gets CU occupancy is per device */
413 	} else if (strcmp(attr->name, "cu_occupancy") == 0) {
414 		return kfd_get_cu_occupancy(attr, buffer);
415 	} else {
416 		pr_err("Invalid attribute");
417 	}
418 
419 	return 0;
420 }
421 
422 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
423 				       struct attribute *attr, char *buf)
424 {
425 	struct kfd_process_device *pdd;
426 
427 	if (!strcmp(attr->name, "faults")) {
428 		pdd = container_of(attr, struct kfd_process_device,
429 				   attr_faults);
430 		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
431 	}
432 	if (!strcmp(attr->name, "page_in")) {
433 		pdd = container_of(attr, struct kfd_process_device,
434 				   attr_page_in);
435 		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
436 	}
437 	if (!strcmp(attr->name, "page_out")) {
438 		pdd = container_of(attr, struct kfd_process_device,
439 				   attr_page_out);
440 		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
441 	}
442 	return 0;
443 }
444 
445 static struct attribute attr_queue_size = {
446 	.name = "size",
447 	.mode = KFD_SYSFS_FILE_MODE
448 };
449 
450 static struct attribute attr_queue_type = {
451 	.name = "type",
452 	.mode = KFD_SYSFS_FILE_MODE
453 };
454 
455 static struct attribute attr_queue_gpuid = {
456 	.name = "gpuid",
457 	.mode = KFD_SYSFS_FILE_MODE
458 };
459 
460 static struct attribute *procfs_queue_attrs[] = {
461 	&attr_queue_size,
462 	&attr_queue_type,
463 	&attr_queue_gpuid,
464 	NULL
465 };
466 ATTRIBUTE_GROUPS(procfs_queue);
467 
468 static const struct sysfs_ops procfs_queue_ops = {
469 	.show = kfd_procfs_queue_show,
470 };
471 
472 static struct kobj_type procfs_queue_type = {
473 	.sysfs_ops = &procfs_queue_ops,
474 	.default_groups = procfs_queue_groups,
475 };
476 
477 static const struct sysfs_ops procfs_stats_ops = {
478 	.show = kfd_procfs_stats_show,
479 };
480 
481 static struct kobj_type procfs_stats_type = {
482 	.sysfs_ops = &procfs_stats_ops,
483 	.release = kfd_procfs_kobj_release,
484 };
485 
486 static const struct sysfs_ops sysfs_counters_ops = {
487 	.show = kfd_sysfs_counters_show,
488 };
489 
490 static struct kobj_type sysfs_counters_type = {
491 	.sysfs_ops = &sysfs_counters_ops,
492 	.release = kfd_procfs_kobj_release,
493 };
494 
495 int kfd_procfs_add_queue(struct queue *q)
496 {
497 	struct kfd_process *proc;
498 	int ret;
499 
500 	if (!q || !q->process)
501 		return -EINVAL;
502 	proc = q->process;
503 
504 	/* Create proc/<pid>/queues/<queue id> folder */
505 	if (!proc->kobj_queues)
506 		return -EFAULT;
507 	ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
508 			proc->kobj_queues, "%u", q->properties.queue_id);
509 	if (ret < 0) {
510 		pr_warn("Creating proc/<pid>/queues/%u failed",
511 			q->properties.queue_id);
512 		kobject_put(&q->kobj);
513 		return ret;
514 	}
515 
516 	return 0;
517 }
518 
519 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
520 				 char *name)
521 {
522 	int ret;
523 
524 	if (!kobj || !attr || !name)
525 		return;
526 
527 	attr->name = name;
528 	attr->mode = KFD_SYSFS_FILE_MODE;
529 	sysfs_attr_init(attr);
530 
531 	ret = sysfs_create_file(kobj, attr);
532 	if (ret)
533 		pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
534 }
535 
536 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
537 {
538 	int ret;
539 	int i;
540 	char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
541 
542 	if (!p || !p->kobj)
543 		return;
544 
545 	/*
546 	 * Create sysfs files for each GPU:
547 	 * - proc/<pid>/stats_<gpuid>/
548 	 * - proc/<pid>/stats_<gpuid>/evicted_ms
549 	 * - proc/<pid>/stats_<gpuid>/cu_occupancy
550 	 */
551 	for (i = 0; i < p->n_pdds; i++) {
552 		struct kfd_process_device *pdd = p->pdds[i];
553 
554 		snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
555 				"stats_%u", pdd->dev->id);
556 		pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
557 		if (!pdd->kobj_stats)
558 			return;
559 
560 		ret = kobject_init_and_add(pdd->kobj_stats,
561 					   &procfs_stats_type,
562 					   p->kobj,
563 					   stats_dir_filename);
564 
565 		if (ret) {
566 			pr_warn("Creating KFD proc/stats_%s folder failed",
567 				stats_dir_filename);
568 			kobject_put(pdd->kobj_stats);
569 			pdd->kobj_stats = NULL;
570 			return;
571 		}
572 
573 		kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
574 				      "evicted_ms");
575 		/* Add sysfs file to report compute unit occupancy */
576 		if (pdd->dev->kfd2kgd->get_cu_occupancy)
577 			kfd_sysfs_create_file(pdd->kobj_stats,
578 					      &pdd->attr_cu_occupancy,
579 					      "cu_occupancy");
580 	}
581 }
582 
583 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
584 {
585 	int ret = 0;
586 	int i;
587 	char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
588 
589 	if (!p || !p->kobj)
590 		return;
591 
592 	/*
593 	 * Create sysfs files for each GPU which supports SVM
594 	 * - proc/<pid>/counters_<gpuid>/
595 	 * - proc/<pid>/counters_<gpuid>/faults
596 	 * - proc/<pid>/counters_<gpuid>/page_in
597 	 * - proc/<pid>/counters_<gpuid>/page_out
598 	 */
599 	for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
600 		struct kfd_process_device *pdd = p->pdds[i];
601 		struct kobject *kobj_counters;
602 
603 		snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
604 			"counters_%u", pdd->dev->id);
605 		kobj_counters = kfd_alloc_struct(kobj_counters);
606 		if (!kobj_counters)
607 			return;
608 
609 		ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
610 					   p->kobj, counters_dir_filename);
611 		if (ret) {
612 			pr_warn("Creating KFD proc/%s folder failed",
613 				counters_dir_filename);
614 			kobject_put(kobj_counters);
615 			return;
616 		}
617 
618 		pdd->kobj_counters = kobj_counters;
619 		kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
620 				      "faults");
621 		kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
622 				      "page_in");
623 		kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
624 				      "page_out");
625 	}
626 }
627 
628 static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
629 {
630 	int i;
631 
632 	if (!p || !p->kobj)
633 		return;
634 
635 	/*
636 	 * Create sysfs files for each GPU:
637 	 * - proc/<pid>/vram_<gpuid>
638 	 * - proc/<pid>/sdma_<gpuid>
639 	 */
640 	for (i = 0; i < p->n_pdds; i++) {
641 		struct kfd_process_device *pdd = p->pdds[i];
642 
643 		snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
644 			 pdd->dev->id);
645 		kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
646 				      pdd->vram_filename);
647 
648 		snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
649 			 pdd->dev->id);
650 		kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
651 					    pdd->sdma_filename);
652 	}
653 }
654 
655 void kfd_procfs_del_queue(struct queue *q)
656 {
657 	if (!q)
658 		return;
659 
660 	kobject_del(&q->kobj);
661 	kobject_put(&q->kobj);
662 }
663 
664 int kfd_process_create_wq(void)
665 {
666 	if (!kfd_process_wq)
667 		kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
668 	if (!kfd_restore_wq)
669 		kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
670 
671 	if (!kfd_process_wq || !kfd_restore_wq) {
672 		kfd_process_destroy_wq();
673 		return -ENOMEM;
674 	}
675 
676 	return 0;
677 }
678 
679 void kfd_process_destroy_wq(void)
680 {
681 	if (kfd_process_wq) {
682 		destroy_workqueue(kfd_process_wq);
683 		kfd_process_wq = NULL;
684 	}
685 	if (kfd_restore_wq) {
686 		destroy_workqueue(kfd_restore_wq);
687 		kfd_restore_wq = NULL;
688 	}
689 }
690 
691 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
692 			struct kfd_process_device *pdd, void *kptr)
693 {
694 	struct kfd_dev *dev = pdd->dev;
695 
696 	if (kptr) {
697 		amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
698 		kptr = NULL;
699 	}
700 
701 	amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
702 	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
703 					       NULL);
704 }
705 
706 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
707  *	This function should be only called right after the process
708  *	is created and when kfd_processes_mutex is still being held
709  *	to avoid concurrency. Because of that exclusiveness, we do
710  *	not need to take p->mutex.
711  */
712 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
713 				   uint64_t gpu_va, uint32_t size,
714 				   uint32_t flags, struct kgd_mem **mem, void **kptr)
715 {
716 	struct kfd_dev *kdev = pdd->dev;
717 	int err;
718 
719 	err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
720 						 pdd->drm_priv, mem, NULL,
721 						 flags, false);
722 	if (err)
723 		goto err_alloc_mem;
724 
725 	err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
726 			pdd->drm_priv);
727 	if (err)
728 		goto err_map_mem;
729 
730 	err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
731 	if (err) {
732 		pr_debug("Sync memory failed, wait interrupted by user signal\n");
733 		goto sync_memory_failed;
734 	}
735 
736 	if (kptr) {
737 		err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(
738 				(struct kgd_mem *)*mem, kptr, NULL);
739 		if (err) {
740 			pr_debug("Map GTT BO to kernel failed\n");
741 			goto sync_memory_failed;
742 		}
743 	}
744 
745 	return err;
746 
747 sync_memory_failed:
748 	amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
749 
750 err_map_mem:
751 	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
752 					       NULL);
753 err_alloc_mem:
754 	*mem = NULL;
755 	*kptr = NULL;
756 	return err;
757 }
758 
759 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
760  *	process for IB usage The memory reserved is for KFD to submit
761  *	IB to AMDGPU from kernel.  If the memory is reserved
762  *	successfully, ib_kaddr will have the CPU/kernel
763  *	address. Check ib_kaddr before accessing the memory.
764  */
765 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
766 {
767 	struct qcm_process_device *qpd = &pdd->qpd;
768 	uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
769 			KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
770 			KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
771 			KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
772 	struct kgd_mem *mem;
773 	void *kaddr;
774 	int ret;
775 
776 	if (qpd->ib_kaddr || !qpd->ib_base)
777 		return 0;
778 
779 	/* ib_base is only set for dGPU */
780 	ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
781 				      &mem, &kaddr);
782 	if (ret)
783 		return ret;
784 
785 	qpd->ib_mem = mem;
786 	qpd->ib_kaddr = kaddr;
787 
788 	return 0;
789 }
790 
791 static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
792 {
793 	struct qcm_process_device *qpd = &pdd->qpd;
794 
795 	if (!qpd->ib_kaddr || !qpd->ib_base)
796 		return;
797 
798 	kfd_process_free_gpuvm(qpd->ib_mem, pdd, qpd->ib_kaddr);
799 }
800 
801 struct kfd_process *kfd_create_process(struct file *filep)
802 {
803 	struct kfd_process *process;
804 	struct task_struct *thread = current;
805 	int ret;
806 
807 	if (!thread->mm)
808 		return ERR_PTR(-EINVAL);
809 
810 	/* Only the pthreads threading model is supported. */
811 	if (thread->group_leader->mm != thread->mm)
812 		return ERR_PTR(-EINVAL);
813 
814 	/*
815 	 * take kfd processes mutex before starting of process creation
816 	 * so there won't be a case where two threads of the same process
817 	 * create two kfd_process structures
818 	 */
819 	mutex_lock(&kfd_processes_mutex);
820 
821 	/* A prior open of /dev/kfd could have already created the process. */
822 	process = find_process(thread, false);
823 	if (process) {
824 		pr_debug("Process already found\n");
825 	} else {
826 		process = create_process(thread);
827 		if (IS_ERR(process))
828 			goto out;
829 
830 		ret = kfd_process_init_cwsr_apu(process, filep);
831 		if (ret)
832 			goto out_destroy;
833 
834 		if (!procfs.kobj)
835 			goto out;
836 
837 		process->kobj = kfd_alloc_struct(process->kobj);
838 		if (!process->kobj) {
839 			pr_warn("Creating procfs kobject failed");
840 			goto out;
841 		}
842 		ret = kobject_init_and_add(process->kobj, &procfs_type,
843 					   procfs.kobj, "%d",
844 					   (int)process->lead_thread->pid);
845 		if (ret) {
846 			pr_warn("Creating procfs pid directory failed");
847 			kobject_put(process->kobj);
848 			goto out;
849 		}
850 
851 		kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
852 				      "pasid");
853 
854 		process->kobj_queues = kobject_create_and_add("queues",
855 							process->kobj);
856 		if (!process->kobj_queues)
857 			pr_warn("Creating KFD proc/queues folder failed");
858 
859 		kfd_procfs_add_sysfs_stats(process);
860 		kfd_procfs_add_sysfs_files(process);
861 		kfd_procfs_add_sysfs_counters(process);
862 	}
863 out:
864 	if (!IS_ERR(process))
865 		kref_get(&process->ref);
866 	mutex_unlock(&kfd_processes_mutex);
867 
868 	return process;
869 
870 out_destroy:
871 	hash_del_rcu(&process->kfd_processes);
872 	mutex_unlock(&kfd_processes_mutex);
873 	synchronize_srcu(&kfd_processes_srcu);
874 	/* kfd_process_free_notifier will trigger the cleanup */
875 	mmu_notifier_put(&process->mmu_notifier);
876 	return ERR_PTR(ret);
877 }
878 
879 struct kfd_process *kfd_get_process(const struct task_struct *thread)
880 {
881 	struct kfd_process *process;
882 
883 	if (!thread->mm)
884 		return ERR_PTR(-EINVAL);
885 
886 	/* Only the pthreads threading model is supported. */
887 	if (thread->group_leader->mm != thread->mm)
888 		return ERR_PTR(-EINVAL);
889 
890 	process = find_process(thread, false);
891 	if (!process)
892 		return ERR_PTR(-EINVAL);
893 
894 	return process;
895 }
896 
897 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
898 {
899 	struct kfd_process *process;
900 
901 	hash_for_each_possible_rcu(kfd_processes_table, process,
902 					kfd_processes, (uintptr_t)mm)
903 		if (process->mm == mm)
904 			return process;
905 
906 	return NULL;
907 }
908 
909 static struct kfd_process *find_process(const struct task_struct *thread,
910 					bool ref)
911 {
912 	struct kfd_process *p;
913 	int idx;
914 
915 	idx = srcu_read_lock(&kfd_processes_srcu);
916 	p = find_process_by_mm(thread->mm);
917 	if (p && ref)
918 		kref_get(&p->ref);
919 	srcu_read_unlock(&kfd_processes_srcu, idx);
920 
921 	return p;
922 }
923 
924 void kfd_unref_process(struct kfd_process *p)
925 {
926 	kref_put(&p->ref, kfd_process_ref_release);
927 }
928 
929 /* This increments the process->ref counter. */
930 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid)
931 {
932 	struct task_struct *task = NULL;
933 	struct kfd_process *p    = NULL;
934 
935 	if (!pid) {
936 		task = current;
937 		get_task_struct(task);
938 	} else {
939 		task = get_pid_task(pid, PIDTYPE_PID);
940 	}
941 
942 	if (task) {
943 		p = find_process(task, true);
944 		put_task_struct(task);
945 	}
946 
947 	return p;
948 }
949 
950 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
951 {
952 	struct kfd_process *p = pdd->process;
953 	void *mem;
954 	int id;
955 	int i;
956 
957 	/*
958 	 * Remove all handles from idr and release appropriate
959 	 * local memory object
960 	 */
961 	idr_for_each_entry(&pdd->alloc_idr, mem, id) {
962 
963 		for (i = 0; i < p->n_pdds; i++) {
964 			struct kfd_process_device *peer_pdd = p->pdds[i];
965 
966 			if (!peer_pdd->drm_priv)
967 				continue;
968 			amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
969 				peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
970 		}
971 
972 		amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
973 						       pdd->drm_priv, NULL);
974 		kfd_process_device_remove_obj_handle(pdd, id);
975 	}
976 }
977 
978 /*
979  * Just kunmap and unpin signal BO here. It will be freed in
980  * kfd_process_free_outstanding_kfd_bos()
981  */
982 static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
983 {
984 	struct kfd_process_device *pdd;
985 	struct kfd_dev *kdev;
986 	void *mem;
987 
988 	kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
989 	if (!kdev)
990 		return;
991 
992 	mutex_lock(&p->mutex);
993 
994 	pdd = kfd_get_process_device_data(kdev, p);
995 	if (!pdd)
996 		goto out;
997 
998 	mem = kfd_process_device_translate_handle(
999 		pdd, GET_IDR_HANDLE(p->signal_handle));
1000 	if (!mem)
1001 		goto out;
1002 
1003 	amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
1004 
1005 out:
1006 	mutex_unlock(&p->mutex);
1007 }
1008 
1009 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
1010 {
1011 	int i;
1012 
1013 	for (i = 0; i < p->n_pdds; i++)
1014 		kfd_process_device_free_bos(p->pdds[i]);
1015 }
1016 
1017 static void kfd_process_destroy_pdds(struct kfd_process *p)
1018 {
1019 	int i;
1020 
1021 	for (i = 0; i < p->n_pdds; i++) {
1022 		struct kfd_process_device *pdd = p->pdds[i];
1023 
1024 		pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
1025 				pdd->dev->id, p->pasid);
1026 
1027 		kfd_process_device_destroy_cwsr_dgpu(pdd);
1028 		kfd_process_device_destroy_ib_mem(pdd);
1029 
1030 		if (pdd->drm_file) {
1031 			amdgpu_amdkfd_gpuvm_release_process_vm(
1032 					pdd->dev->adev, pdd->drm_priv);
1033 			fput(pdd->drm_file);
1034 		}
1035 
1036 		if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
1037 			free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
1038 				get_order(KFD_CWSR_TBA_TMA_SIZE));
1039 
1040 		bitmap_free(pdd->qpd.doorbell_bitmap);
1041 		idr_destroy(&pdd->alloc_idr);
1042 
1043 		kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);
1044 
1045 		if (pdd->dev->shared_resources.enable_mes)
1046 			amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
1047 						   pdd->proc_ctx_bo);
1048 		/*
1049 		 * before destroying pdd, make sure to report availability
1050 		 * for auto suspend
1051 		 */
1052 		if (pdd->runtime_inuse) {
1053 			pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
1054 			pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
1055 			pdd->runtime_inuse = false;
1056 		}
1057 
1058 		kfree(pdd);
1059 		p->pdds[i] = NULL;
1060 	}
1061 	p->n_pdds = 0;
1062 }
1063 
1064 static void kfd_process_remove_sysfs(struct kfd_process *p)
1065 {
1066 	struct kfd_process_device *pdd;
1067 	int i;
1068 
1069 	if (!p->kobj)
1070 		return;
1071 
1072 	sysfs_remove_file(p->kobj, &p->attr_pasid);
1073 	kobject_del(p->kobj_queues);
1074 	kobject_put(p->kobj_queues);
1075 	p->kobj_queues = NULL;
1076 
1077 	for (i = 0; i < p->n_pdds; i++) {
1078 		pdd = p->pdds[i];
1079 
1080 		sysfs_remove_file(p->kobj, &pdd->attr_vram);
1081 		sysfs_remove_file(p->kobj, &pdd->attr_sdma);
1082 
1083 		sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
1084 		if (pdd->dev->kfd2kgd->get_cu_occupancy)
1085 			sysfs_remove_file(pdd->kobj_stats,
1086 					  &pdd->attr_cu_occupancy);
1087 		kobject_del(pdd->kobj_stats);
1088 		kobject_put(pdd->kobj_stats);
1089 		pdd->kobj_stats = NULL;
1090 	}
1091 
1092 	for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
1093 		pdd = p->pdds[i];
1094 
1095 		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
1096 		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
1097 		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
1098 		kobject_del(pdd->kobj_counters);
1099 		kobject_put(pdd->kobj_counters);
1100 		pdd->kobj_counters = NULL;
1101 	}
1102 
1103 	kobject_del(p->kobj);
1104 	kobject_put(p->kobj);
1105 	p->kobj = NULL;
1106 }
1107 
1108 /* No process locking is needed in this function, because the process
1109  * is not findable any more. We must assume that no other thread is
1110  * using it any more, otherwise we couldn't safely free the process
1111  * structure in the end.
1112  */
1113 static void kfd_process_wq_release(struct work_struct *work)
1114 {
1115 	struct kfd_process *p = container_of(work, struct kfd_process,
1116 					     release_work);
1117 
1118 	kfd_process_remove_sysfs(p);
1119 	kfd_iommu_unbind_process(p);
1120 
1121 	kfd_process_kunmap_signal_bo(p);
1122 	kfd_process_free_outstanding_kfd_bos(p);
1123 	svm_range_list_fini(p);
1124 
1125 	kfd_process_destroy_pdds(p);
1126 	dma_fence_put(p->ef);
1127 
1128 	kfd_event_free_process(p);
1129 
1130 	kfd_pasid_free(p->pasid);
1131 	mutex_destroy(&p->mutex);
1132 
1133 	put_task_struct(p->lead_thread);
1134 
1135 	kfree(p);
1136 }
1137 
1138 static void kfd_process_ref_release(struct kref *ref)
1139 {
1140 	struct kfd_process *p = container_of(ref, struct kfd_process, ref);
1141 
1142 	INIT_WORK(&p->release_work, kfd_process_wq_release);
1143 	queue_work(kfd_process_wq, &p->release_work);
1144 }
1145 
1146 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
1147 {
1148 	int idx = srcu_read_lock(&kfd_processes_srcu);
1149 	struct kfd_process *p = find_process_by_mm(mm);
1150 
1151 	srcu_read_unlock(&kfd_processes_srcu, idx);
1152 
1153 	return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
1154 }
1155 
1156 static void kfd_process_free_notifier(struct mmu_notifier *mn)
1157 {
1158 	kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
1159 }
1160 
1161 static void kfd_process_notifier_release(struct mmu_notifier *mn,
1162 					struct mm_struct *mm)
1163 {
1164 	struct kfd_process *p;
1165 
1166 	/*
1167 	 * The kfd_process structure can not be free because the
1168 	 * mmu_notifier srcu is read locked
1169 	 */
1170 	p = container_of(mn, struct kfd_process, mmu_notifier);
1171 	if (WARN_ON(p->mm != mm))
1172 		return;
1173 
1174 	mutex_lock(&kfd_processes_mutex);
1175 	hash_del_rcu(&p->kfd_processes);
1176 	mutex_unlock(&kfd_processes_mutex);
1177 	synchronize_srcu(&kfd_processes_srcu);
1178 
1179 	cancel_delayed_work_sync(&p->eviction_work);
1180 	cancel_delayed_work_sync(&p->restore_work);
1181 
1182 	mutex_lock(&p->mutex);
1183 
1184 	kfd_process_dequeue_from_all_devices(p);
1185 	pqm_uninit(&p->pqm);
1186 
1187 	/* Indicate to other users that MM is no longer valid */
1188 	p->mm = NULL;
1189 	/* Signal the eviction fence after user mode queues are
1190 	 * destroyed. This allows any BOs to be freed without
1191 	 * triggering pointless evictions or waiting for fences.
1192 	 */
1193 	dma_fence_signal(p->ef);
1194 
1195 	mutex_unlock(&p->mutex);
1196 
1197 	mmu_notifier_put(&p->mmu_notifier);
1198 }
1199 
1200 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
1201 	.release = kfd_process_notifier_release,
1202 	.alloc_notifier = kfd_process_alloc_notifier,
1203 	.free_notifier = kfd_process_free_notifier,
1204 };
1205 
1206 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
1207 {
1208 	unsigned long  offset;
1209 	int i;
1210 
1211 	for (i = 0; i < p->n_pdds; i++) {
1212 		struct kfd_dev *dev = p->pdds[i]->dev;
1213 		struct qcm_process_device *qpd = &p->pdds[i]->qpd;
1214 
1215 		if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
1216 			continue;
1217 
1218 		offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
1219 		qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
1220 			KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
1221 			MAP_SHARED, offset);
1222 
1223 		if (IS_ERR_VALUE(qpd->tba_addr)) {
1224 			int err = qpd->tba_addr;
1225 
1226 			pr_err("Failure to set tba address. error %d.\n", err);
1227 			qpd->tba_addr = 0;
1228 			qpd->cwsr_kaddr = NULL;
1229 			return err;
1230 		}
1231 
1232 		memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
1233 
1234 		qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1235 		pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1236 			qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1237 	}
1238 
1239 	return 0;
1240 }
1241 
1242 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
1243 {
1244 	struct kfd_dev *dev = pdd->dev;
1245 	struct qcm_process_device *qpd = &pdd->qpd;
1246 	uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
1247 			| KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
1248 			| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1249 	struct kgd_mem *mem;
1250 	void *kaddr;
1251 	int ret;
1252 
1253 	if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
1254 		return 0;
1255 
1256 	/* cwsr_base is only set for dGPU */
1257 	ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
1258 				      KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr);
1259 	if (ret)
1260 		return ret;
1261 
1262 	qpd->cwsr_mem = mem;
1263 	qpd->cwsr_kaddr = kaddr;
1264 	qpd->tba_addr = qpd->cwsr_base;
1265 
1266 	memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
1267 
1268 	qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1269 	pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1270 		 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1271 
1272 	return 0;
1273 }
1274 
1275 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
1276 {
1277 	struct kfd_dev *dev = pdd->dev;
1278 	struct qcm_process_device *qpd = &pdd->qpd;
1279 
1280 	if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
1281 		return;
1282 
1283 	kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, qpd->cwsr_kaddr);
1284 }
1285 
1286 void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1287 				  uint64_t tba_addr,
1288 				  uint64_t tma_addr)
1289 {
1290 	if (qpd->cwsr_kaddr) {
1291 		/* KFD trap handler is bound, record as second-level TBA/TMA
1292 		 * in first-level TMA. First-level trap will jump to second.
1293 		 */
1294 		uint64_t *tma =
1295 			(uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1296 		tma[0] = tba_addr;
1297 		tma[1] = tma_addr;
1298 	} else {
1299 		/* No trap handler bound, bind as first-level TBA/TMA. */
1300 		qpd->tba_addr = tba_addr;
1301 		qpd->tma_addr = tma_addr;
1302 	}
1303 }
1304 
1305 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
1306 {
1307 	int i;
1308 
1309 	/* On most GFXv9 GPUs, the retry mode in the SQ must match the
1310 	 * boot time retry setting. Mixing processes with different
1311 	 * XNACK/retry settings can hang the GPU.
1312 	 *
1313 	 * Different GPUs can have different noretry settings depending
1314 	 * on HW bugs or limitations. We need to find at least one
1315 	 * XNACK mode for this process that's compatible with all GPUs.
1316 	 * Fortunately GPUs with retry enabled (noretry=0) can run code
1317 	 * built for XNACK-off. On GFXv9 it may perform slower.
1318 	 *
1319 	 * Therefore applications built for XNACK-off can always be
1320 	 * supported and will be our fallback if any GPU does not
1321 	 * support retry.
1322 	 */
1323 	for (i = 0; i < p->n_pdds; i++) {
1324 		struct kfd_dev *dev = p->pdds[i]->dev;
1325 
1326 		/* Only consider GFXv9 and higher GPUs. Older GPUs don't
1327 		 * support the SVM APIs and don't need to be considered
1328 		 * for the XNACK mode selection.
1329 		 */
1330 		if (!KFD_IS_SOC15(dev))
1331 			continue;
1332 		/* Aldebaran can always support XNACK because it can support
1333 		 * per-process XNACK mode selection. But let the dev->noretry
1334 		 * setting still influence the default XNACK mode.
1335 		 */
1336 		if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2))
1337 			continue;
1338 
1339 		/* GFXv10 and later GPUs do not support shader preemption
1340 		 * during page faults. This can lead to poor QoS for queue
1341 		 * management and memory-manager-related preemptions or
1342 		 * even deadlocks.
1343 		 */
1344 		if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
1345 			return false;
1346 
1347 		if (dev->noretry)
1348 			return false;
1349 	}
1350 
1351 	return true;
1352 }
1353 
1354 /*
1355  * On return the kfd_process is fully operational and will be freed when the
1356  * mm is released
1357  */
1358 static struct kfd_process *create_process(const struct task_struct *thread)
1359 {
1360 	struct kfd_process *process;
1361 	struct mmu_notifier *mn;
1362 	int err = -ENOMEM;
1363 
1364 	process = kzalloc(sizeof(*process), GFP_KERNEL);
1365 	if (!process)
1366 		goto err_alloc_process;
1367 
1368 	kref_init(&process->ref);
1369 	mutex_init(&process->mutex);
1370 	process->mm = thread->mm;
1371 	process->lead_thread = thread->group_leader;
1372 	process->n_pdds = 0;
1373 	process->queues_paused = false;
1374 	INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1375 	INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1376 	process->last_restore_timestamp = get_jiffies_64();
1377 	err = kfd_event_init_process(process);
1378 	if (err)
1379 		goto err_event_init;
1380 	process->is_32bit_user_mode = in_compat_syscall();
1381 
1382 	process->pasid = kfd_pasid_alloc();
1383 	if (process->pasid == 0) {
1384 		err = -ENOSPC;
1385 		goto err_alloc_pasid;
1386 	}
1387 
1388 	err = pqm_init(&process->pqm, process);
1389 	if (err != 0)
1390 		goto err_process_pqm_init;
1391 
1392 	/* init process apertures*/
1393 	err = kfd_init_apertures(process);
1394 	if (err != 0)
1395 		goto err_init_apertures;
1396 
1397 	/* Check XNACK support after PDDs are created in kfd_init_apertures */
1398 	process->xnack_enabled = kfd_process_xnack_mode(process, false);
1399 
1400 	err = svm_range_list_init(process);
1401 	if (err)
1402 		goto err_init_svm_range_list;
1403 
1404 	/* alloc_notifier needs to find the process in the hash table */
1405 	hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1406 			(uintptr_t)process->mm);
1407 
1408 	/* MMU notifier registration must be the last call that can fail
1409 	 * because after this point we cannot unwind the process creation.
1410 	 * After this point, mmu_notifier_put will trigger the cleanup by
1411 	 * dropping the last process reference in the free_notifier.
1412 	 */
1413 	mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
1414 	if (IS_ERR(mn)) {
1415 		err = PTR_ERR(mn);
1416 		goto err_register_notifier;
1417 	}
1418 	BUG_ON(mn != &process->mmu_notifier);
1419 
1420 	get_task_struct(process->lead_thread);
1421 
1422 	return process;
1423 
1424 err_register_notifier:
1425 	hash_del_rcu(&process->kfd_processes);
1426 	svm_range_list_fini(process);
1427 err_init_svm_range_list:
1428 	kfd_process_free_outstanding_kfd_bos(process);
1429 	kfd_process_destroy_pdds(process);
1430 err_init_apertures:
1431 	pqm_uninit(&process->pqm);
1432 err_process_pqm_init:
1433 	kfd_pasid_free(process->pasid);
1434 err_alloc_pasid:
1435 	kfd_event_free_process(process);
1436 err_event_init:
1437 	mutex_destroy(&process->mutex);
1438 	kfree(process);
1439 err_alloc_process:
1440 	return ERR_PTR(err);
1441 }
1442 
1443 static int init_doorbell_bitmap(struct qcm_process_device *qpd,
1444 			struct kfd_dev *dev)
1445 {
1446 	unsigned int i;
1447 	int range_start = dev->shared_resources.non_cp_doorbells_start;
1448 	int range_end = dev->shared_resources.non_cp_doorbells_end;
1449 
1450 	if (!KFD_IS_SOC15(dev))
1451 		return 0;
1452 
1453 	qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
1454 					     GFP_KERNEL);
1455 	if (!qpd->doorbell_bitmap)
1456 		return -ENOMEM;
1457 
1458 	/* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
1459 	pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
1460 	pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
1461 			range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1462 			range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
1463 
1464 	for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
1465 		if (i >= range_start && i <= range_end) {
1466 			__set_bit(i, qpd->doorbell_bitmap);
1467 			__set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1468 				  qpd->doorbell_bitmap);
1469 		}
1470 	}
1471 
1472 	return 0;
1473 }
1474 
1475 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
1476 							struct kfd_process *p)
1477 {
1478 	int i;
1479 
1480 	for (i = 0; i < p->n_pdds; i++)
1481 		if (p->pdds[i]->dev == dev)
1482 			return p->pdds[i];
1483 
1484 	return NULL;
1485 }
1486 
1487 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
1488 							struct kfd_process *p)
1489 {
1490 	struct kfd_process_device *pdd = NULL;
1491 	int retval = 0;
1492 
1493 	if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
1494 		return NULL;
1495 	pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
1496 	if (!pdd)
1497 		return NULL;
1498 
1499 	if (kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
1500 		pr_err("Failed to alloc doorbell for pdd\n");
1501 		goto err_free_pdd;
1502 	}
1503 
1504 	if (init_doorbell_bitmap(&pdd->qpd, dev)) {
1505 		pr_err("Failed to init doorbell for process\n");
1506 		goto err_free_pdd;
1507 	}
1508 
1509 	pdd->dev = dev;
1510 	INIT_LIST_HEAD(&pdd->qpd.queues_list);
1511 	INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
1512 	pdd->qpd.dqm = dev->dqm;
1513 	pdd->qpd.pqm = &p->pqm;
1514 	pdd->qpd.evicted = 0;
1515 	pdd->qpd.mapped_gws_queue = false;
1516 	pdd->process = p;
1517 	pdd->bound = PDD_UNBOUND;
1518 	pdd->already_dequeued = false;
1519 	pdd->runtime_inuse = false;
1520 	pdd->vram_usage = 0;
1521 	pdd->sdma_past_activity_counter = 0;
1522 	pdd->user_gpu_id = dev->id;
1523 	atomic64_set(&pdd->evict_duration_counter, 0);
1524 
1525 	if (dev->shared_resources.enable_mes) {
1526 		retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
1527 						AMDGPU_MES_PROC_CTX_SIZE,
1528 						&pdd->proc_ctx_bo,
1529 						&pdd->proc_ctx_gpu_addr,
1530 						&pdd->proc_ctx_cpu_ptr,
1531 						false);
1532 		if (retval) {
1533 			pr_err("failed to allocate process context bo\n");
1534 			goto err_free_pdd;
1535 		}
1536 		memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
1537 	}
1538 
1539 	p->pdds[p->n_pdds++] = pdd;
1540 
1541 	/* Init idr used for memory handle translation */
1542 	idr_init(&pdd->alloc_idr);
1543 
1544 	return pdd;
1545 
1546 err_free_pdd:
1547 	kfree(pdd);
1548 	return NULL;
1549 }
1550 
1551 /**
1552  * kfd_process_device_init_vm - Initialize a VM for a process-device
1553  *
1554  * @pdd: The process-device
1555  * @drm_file: Optional pointer to a DRM file descriptor
1556  *
1557  * If @drm_file is specified, it will be used to acquire the VM from
1558  * that file descriptor. If successful, the @pdd takes ownership of
1559  * the file descriptor.
1560  *
1561  * If @drm_file is NULL, a new VM is created.
1562  *
1563  * Returns 0 on success, -errno on failure.
1564  */
1565 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1566 			       struct file *drm_file)
1567 {
1568 	struct kfd_process *p;
1569 	struct kfd_dev *dev;
1570 	int ret;
1571 
1572 	if (!drm_file)
1573 		return -EINVAL;
1574 
1575 	if (pdd->drm_priv)
1576 		return -EBUSY;
1577 
1578 	p = pdd->process;
1579 	dev = pdd->dev;
1580 
1581 	ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
1582 		dev->adev, drm_file, p->pasid,
1583 		&p->kgd_process_info, &p->ef);
1584 	if (ret) {
1585 		pr_err("Failed to create process VM object\n");
1586 		return ret;
1587 	}
1588 	pdd->drm_priv = drm_file->private_data;
1589 	atomic64_set(&pdd->tlb_seq, 0);
1590 
1591 	ret = kfd_process_device_reserve_ib_mem(pdd);
1592 	if (ret)
1593 		goto err_reserve_ib_mem;
1594 	ret = kfd_process_device_init_cwsr_dgpu(pdd);
1595 	if (ret)
1596 		goto err_init_cwsr;
1597 
1598 	pdd->drm_file = drm_file;
1599 
1600 	return 0;
1601 
1602 err_init_cwsr:
1603 err_reserve_ib_mem:
1604 	kfd_process_device_free_bos(pdd);
1605 	pdd->drm_priv = NULL;
1606 
1607 	return ret;
1608 }
1609 
1610 /*
1611  * Direct the IOMMU to bind the process (specifically the pasid->mm)
1612  * to the device.
1613  * Unbinding occurs when the process dies or the device is removed.
1614  *
1615  * Assumes that the process lock is held.
1616  */
1617 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
1618 							struct kfd_process *p)
1619 {
1620 	struct kfd_process_device *pdd;
1621 	int err;
1622 
1623 	pdd = kfd_get_process_device_data(dev, p);
1624 	if (!pdd) {
1625 		pr_err("Process device data doesn't exist\n");
1626 		return ERR_PTR(-ENOMEM);
1627 	}
1628 
1629 	if (!pdd->drm_priv)
1630 		return ERR_PTR(-ENODEV);
1631 
1632 	/*
1633 	 * signal runtime-pm system to auto resume and prevent
1634 	 * further runtime suspend once device pdd is created until
1635 	 * pdd is destroyed.
1636 	 */
1637 	if (!pdd->runtime_inuse) {
1638 		err = pm_runtime_get_sync(dev->ddev->dev);
1639 		if (err < 0) {
1640 			pm_runtime_put_autosuspend(dev->ddev->dev);
1641 			return ERR_PTR(err);
1642 		}
1643 	}
1644 
1645 	err = kfd_iommu_bind_process_to_device(pdd);
1646 	if (err)
1647 		goto out;
1648 
1649 	/*
1650 	 * make sure that runtime_usage counter is incremented just once
1651 	 * per pdd
1652 	 */
1653 	pdd->runtime_inuse = true;
1654 
1655 	return pdd;
1656 
1657 out:
1658 	/* balance runpm reference count and exit with error */
1659 	if (!pdd->runtime_inuse) {
1660 		pm_runtime_mark_last_busy(dev->ddev->dev);
1661 		pm_runtime_put_autosuspend(dev->ddev->dev);
1662 	}
1663 
1664 	return ERR_PTR(err);
1665 }
1666 
1667 /* Create specific handle mapped to mem from process local memory idr
1668  * Assumes that the process lock is held.
1669  */
1670 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1671 					void *mem)
1672 {
1673 	return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1674 }
1675 
1676 /* Translate specific handle from process local memory idr
1677  * Assumes that the process lock is held.
1678  */
1679 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1680 					int handle)
1681 {
1682 	if (handle < 0)
1683 		return NULL;
1684 
1685 	return idr_find(&pdd->alloc_idr, handle);
1686 }
1687 
1688 /* Remove specific handle from process local memory idr
1689  * Assumes that the process lock is held.
1690  */
1691 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1692 					int handle)
1693 {
1694 	if (handle >= 0)
1695 		idr_remove(&pdd->alloc_idr, handle);
1696 }
1697 
1698 /* This increments the process->ref counter. */
1699 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
1700 {
1701 	struct kfd_process *p, *ret_p = NULL;
1702 	unsigned int temp;
1703 
1704 	int idx = srcu_read_lock(&kfd_processes_srcu);
1705 
1706 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1707 		if (p->pasid == pasid) {
1708 			kref_get(&p->ref);
1709 			ret_p = p;
1710 			break;
1711 		}
1712 	}
1713 
1714 	srcu_read_unlock(&kfd_processes_srcu, idx);
1715 
1716 	return ret_p;
1717 }
1718 
1719 /* This increments the process->ref counter. */
1720 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1721 {
1722 	struct kfd_process *p;
1723 
1724 	int idx = srcu_read_lock(&kfd_processes_srcu);
1725 
1726 	p = find_process_by_mm(mm);
1727 	if (p)
1728 		kref_get(&p->ref);
1729 
1730 	srcu_read_unlock(&kfd_processes_srcu, idx);
1731 
1732 	return p;
1733 }
1734 
1735 /* kfd_process_evict_queues - Evict all user queues of a process
1736  *
1737  * Eviction is reference-counted per process-device. This means multiple
1738  * evictions from different sources can be nested safely.
1739  */
1740 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
1741 {
1742 	int r = 0;
1743 	int i;
1744 	unsigned int n_evicted = 0;
1745 
1746 	for (i = 0; i < p->n_pdds; i++) {
1747 		struct kfd_process_device *pdd = p->pdds[i];
1748 
1749 		kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid,
1750 					     trigger);
1751 
1752 		r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1753 							    &pdd->qpd);
1754 		/* evict return -EIO if HWS is hang or asic is resetting, in this case
1755 		 * we would like to set all the queues to be in evicted state to prevent
1756 		 * them been add back since they actually not be saved right now.
1757 		 */
1758 		if (r && r != -EIO) {
1759 			pr_err("Failed to evict process queues\n");
1760 			goto fail;
1761 		}
1762 		n_evicted++;
1763 	}
1764 
1765 	return r;
1766 
1767 fail:
1768 	/* To keep state consistent, roll back partial eviction by
1769 	 * restoring queues
1770 	 */
1771 	for (i = 0; i < p->n_pdds; i++) {
1772 		struct kfd_process_device *pdd = p->pdds[i];
1773 
1774 		if (n_evicted == 0)
1775 			break;
1776 
1777 		kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1778 
1779 		if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1780 							      &pdd->qpd))
1781 			pr_err("Failed to restore queues\n");
1782 
1783 		n_evicted--;
1784 	}
1785 
1786 	return r;
1787 }
1788 
1789 /* kfd_process_restore_queues - Restore all user queues of a process */
1790 int kfd_process_restore_queues(struct kfd_process *p)
1791 {
1792 	int r, ret = 0;
1793 	int i;
1794 
1795 	for (i = 0; i < p->n_pdds; i++) {
1796 		struct kfd_process_device *pdd = p->pdds[i];
1797 
1798 		kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1799 
1800 		r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1801 							      &pdd->qpd);
1802 		if (r) {
1803 			pr_err("Failed to restore process queues\n");
1804 			if (!ret)
1805 				ret = r;
1806 		}
1807 	}
1808 
1809 	return ret;
1810 }
1811 
1812 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
1813 {
1814 	int i;
1815 
1816 	for (i = 0; i < p->n_pdds; i++)
1817 		if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id)
1818 			return i;
1819 	return -EINVAL;
1820 }
1821 
1822 int
1823 kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev,
1824 			   uint32_t *gpuid, uint32_t *gpuidx)
1825 {
1826 	int i;
1827 
1828 	for (i = 0; i < p->n_pdds; i++)
1829 		if (p->pdds[i] && p->pdds[i]->dev->adev == adev) {
1830 			*gpuid = p->pdds[i]->user_gpu_id;
1831 			*gpuidx = i;
1832 			return 0;
1833 		}
1834 	return -EINVAL;
1835 }
1836 
1837 static void evict_process_worker(struct work_struct *work)
1838 {
1839 	int ret;
1840 	struct kfd_process *p;
1841 	struct delayed_work *dwork;
1842 
1843 	dwork = to_delayed_work(work);
1844 
1845 	/* Process termination destroys this worker thread. So during the
1846 	 * lifetime of this thread, kfd_process p will be valid
1847 	 */
1848 	p = container_of(dwork, struct kfd_process, eviction_work);
1849 	WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1850 		  "Eviction fence mismatch\n");
1851 
1852 	/* Narrow window of overlap between restore and evict work
1853 	 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1854 	 * unreserves KFD BOs, it is possible to evicted again. But
1855 	 * restore has few more steps of finish. So lets wait for any
1856 	 * previous restore work to complete
1857 	 */
1858 	flush_delayed_work(&p->restore_work);
1859 
1860 	pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1861 	ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
1862 	if (!ret) {
1863 		dma_fence_signal(p->ef);
1864 		dma_fence_put(p->ef);
1865 		p->ef = NULL;
1866 		queue_delayed_work(kfd_restore_wq, &p->restore_work,
1867 				msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1868 
1869 		pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1870 	} else
1871 		pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1872 }
1873 
1874 static void restore_process_worker(struct work_struct *work)
1875 {
1876 	struct delayed_work *dwork;
1877 	struct kfd_process *p;
1878 	int ret = 0;
1879 
1880 	dwork = to_delayed_work(work);
1881 
1882 	/* Process termination destroys this worker thread. So during the
1883 	 * lifetime of this thread, kfd_process p will be valid
1884 	 */
1885 	p = container_of(dwork, struct kfd_process, restore_work);
1886 	pr_debug("Started restoring pasid 0x%x\n", p->pasid);
1887 
1888 	/* Setting last_restore_timestamp before successful restoration.
1889 	 * Otherwise this would have to be set by KGD (restore_process_bos)
1890 	 * before KFD BOs are unreserved. If not, the process can be evicted
1891 	 * again before the timestamp is set.
1892 	 * If restore fails, the timestamp will be set again in the next
1893 	 * attempt. This would mean that the minimum GPU quanta would be
1894 	 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1895 	 * functions)
1896 	 */
1897 
1898 	p->last_restore_timestamp = get_jiffies_64();
1899 	ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1900 						     &p->ef);
1901 	if (ret) {
1902 		pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
1903 			 p->pasid, PROCESS_BACK_OFF_TIME_MS);
1904 		ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1905 				msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1906 		WARN(!ret, "reschedule restore work failed\n");
1907 		return;
1908 	}
1909 
1910 	ret = kfd_process_restore_queues(p);
1911 	if (!ret)
1912 		pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
1913 	else
1914 		pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
1915 }
1916 
1917 void kfd_suspend_all_processes(void)
1918 {
1919 	struct kfd_process *p;
1920 	unsigned int temp;
1921 	int idx = srcu_read_lock(&kfd_processes_srcu);
1922 
1923 	WARN(debug_evictions, "Evicting all processes");
1924 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1925 		cancel_delayed_work_sync(&p->eviction_work);
1926 		cancel_delayed_work_sync(&p->restore_work);
1927 
1928 		if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
1929 			pr_err("Failed to suspend process 0x%x\n", p->pasid);
1930 		dma_fence_signal(p->ef);
1931 		dma_fence_put(p->ef);
1932 		p->ef = NULL;
1933 	}
1934 	srcu_read_unlock(&kfd_processes_srcu, idx);
1935 }
1936 
1937 int kfd_resume_all_processes(void)
1938 {
1939 	struct kfd_process *p;
1940 	unsigned int temp;
1941 	int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1942 
1943 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1944 		if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1945 			pr_err("Restore process %d failed during resume\n",
1946 			       p->pasid);
1947 			ret = -EFAULT;
1948 		}
1949 	}
1950 	srcu_read_unlock(&kfd_processes_srcu, idx);
1951 	return ret;
1952 }
1953 
1954 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
1955 			  struct vm_area_struct *vma)
1956 {
1957 	struct kfd_process_device *pdd;
1958 	struct qcm_process_device *qpd;
1959 
1960 	if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1961 		pr_err("Incorrect CWSR mapping size.\n");
1962 		return -EINVAL;
1963 	}
1964 
1965 	pdd = kfd_get_process_device_data(dev, process);
1966 	if (!pdd)
1967 		return -EINVAL;
1968 	qpd = &pdd->qpd;
1969 
1970 	qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1971 					get_order(KFD_CWSR_TBA_TMA_SIZE));
1972 	if (!qpd->cwsr_kaddr) {
1973 		pr_err("Error allocating per process CWSR buffer.\n");
1974 		return -ENOMEM;
1975 	}
1976 
1977 	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1978 		| VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1979 	/* Mapping pages to user process */
1980 	return remap_pfn_range(vma, vma->vm_start,
1981 			       PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1982 			       KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1983 }
1984 
1985 void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
1986 {
1987 	struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1988 	uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1989 	struct kfd_dev *dev = pdd->dev;
1990 
1991 	/*
1992 	 * It can be that we race and lose here, but that is extremely unlikely
1993 	 * and the worst thing which could happen is that we flush the changes
1994 	 * into the TLB once more which is harmless.
1995 	 */
1996 	if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq)
1997 		return;
1998 
1999 	if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
2000 		/* Nothing to flush until a VMID is assigned, which
2001 		 * only happens when the first queue is created.
2002 		 */
2003 		if (pdd->qpd.vmid)
2004 			amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
2005 							pdd->qpd.vmid);
2006 	} else {
2007 		amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev,
2008 					pdd->process->pasid, type);
2009 	}
2010 }
2011 
2012 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
2013 {
2014 	int i;
2015 
2016 	if (gpu_id) {
2017 		for (i = 0; i < p->n_pdds; i++) {
2018 			struct kfd_process_device *pdd = p->pdds[i];
2019 
2020 			if (pdd->user_gpu_id == gpu_id)
2021 				return pdd;
2022 		}
2023 	}
2024 	return NULL;
2025 }
2026 
2027 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id)
2028 {
2029 	int i;
2030 
2031 	if (!actual_gpu_id)
2032 		return 0;
2033 
2034 	for (i = 0; i < p->n_pdds; i++) {
2035 		struct kfd_process_device *pdd = p->pdds[i];
2036 
2037 		if (pdd->dev->id == actual_gpu_id)
2038 			return pdd->user_gpu_id;
2039 	}
2040 	return -EINVAL;
2041 }
2042 
2043 #if defined(CONFIG_DEBUG_FS)
2044 
2045 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
2046 {
2047 	struct kfd_process *p;
2048 	unsigned int temp;
2049 	int r = 0;
2050 
2051 	int idx = srcu_read_lock(&kfd_processes_srcu);
2052 
2053 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2054 		seq_printf(m, "Process %d PASID 0x%x:\n",
2055 			   p->lead_thread->tgid, p->pasid);
2056 
2057 		mutex_lock(&p->mutex);
2058 		r = pqm_debugfs_mqds(m, &p->pqm);
2059 		mutex_unlock(&p->mutex);
2060 
2061 		if (r)
2062 			break;
2063 	}
2064 
2065 	srcu_read_unlock(&kfd_processes_srcu, idx);
2066 
2067 	return r;
2068 }
2069 
2070 #endif
2071 
2072