xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_process.c (revision fe7fad476ec8153a8b8767a08114e3e4a58a837e)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/mutex.h>
25 #include <linux/log2.h>
26 #include <linux/sched.h>
27 #include <linux/sched/mm.h>
28 #include <linux/sched/task.h>
29 #include <linux/mmu_context.h>
30 #include <linux/slab.h>
31 #include <linux/notifier.h>
32 #include <linux/compat.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/pm_runtime.h>
36 #include "amdgpu_amdkfd.h"
37 #include "amdgpu.h"
38 
39 struct mm_struct;
40 
41 #include "kfd_priv.h"
42 #include "kfd_device_queue_manager.h"
43 #include "kfd_svm.h"
44 #include "kfd_smi_events.h"
45 #include "kfd_debug.h"
46 
47 /*
48  * List of struct kfd_process (field kfd_process).
49  * Unique/indexed by mm_struct*
50  */
51 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
52 DEFINE_MUTEX(kfd_processes_mutex);
53 
54 DEFINE_SRCU(kfd_processes_srcu);
55 
56 /* For process termination handling */
57 static struct workqueue_struct *kfd_process_wq;
58 
59 /* Ordered, single-threaded workqueue for restoring evicted
60  * processes. Restoring multiple processes concurrently under memory
61  * pressure can lead to processes blocking each other from validating
62  * their BOs and result in a live-lock situation where processes
63  * remain evicted indefinitely.
64  */
65 static struct workqueue_struct *kfd_restore_wq;
66 
67 static struct kfd_process *find_process(const struct task_struct *thread,
68 					bool ref);
69 static void kfd_process_ref_release(struct kref *ref);
70 static struct kfd_process *create_process(const struct task_struct *thread);
71 
72 static void evict_process_worker(struct work_struct *work);
73 static void restore_process_worker(struct work_struct *work);
74 
75 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
76 
77 struct kfd_procfs_tree {
78 	struct kobject *kobj;
79 };
80 
81 static struct kfd_procfs_tree procfs;
82 
83 /*
84  * Structure for SDMA activity tracking
85  */
86 struct kfd_sdma_activity_handler_workarea {
87 	struct work_struct sdma_activity_work;
88 	struct kfd_process_device *pdd;
89 	uint64_t sdma_activity_counter;
90 };
91 
92 struct temp_sdma_queue_list {
93 	uint64_t __user *rptr;
94 	uint64_t sdma_val;
95 	unsigned int queue_id;
96 	struct list_head list;
97 };
98 
99 static void kfd_sdma_activity_worker(struct work_struct *work)
100 {
101 	struct kfd_sdma_activity_handler_workarea *workarea;
102 	struct kfd_process_device *pdd;
103 	uint64_t val;
104 	struct mm_struct *mm;
105 	struct queue *q;
106 	struct qcm_process_device *qpd;
107 	struct device_queue_manager *dqm;
108 	int ret = 0;
109 	struct temp_sdma_queue_list sdma_q_list;
110 	struct temp_sdma_queue_list *sdma_q, *next;
111 
112 	workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
113 				sdma_activity_work);
114 
115 	pdd = workarea->pdd;
116 	if (!pdd)
117 		return;
118 	dqm = pdd->dev->dqm;
119 	qpd = &pdd->qpd;
120 	if (!dqm || !qpd)
121 		return;
122 	/*
123 	 * Total SDMA activity is current SDMA activity + past SDMA activity
124 	 * Past SDMA count is stored in pdd.
125 	 * To get the current activity counters for all active SDMA queues,
126 	 * we loop over all SDMA queues and get their counts from user-space.
127 	 *
128 	 * We cannot call get_user() with dqm_lock held as it can cause
129 	 * a circular lock dependency situation. To read the SDMA stats,
130 	 * we need to do the following:
131 	 *
132 	 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
133 	 *    with dqm_lock/dqm_unlock().
134 	 * 2. Call get_user() for each node in temporary list without dqm_lock.
135 	 *    Save the SDMA count for each node and also add the count to the total
136 	 *    SDMA count counter.
137 	 *    Its possible, during this step, a few SDMA queue nodes got deleted
138 	 *    from the qpd->queues_list.
139 	 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
140 	 *    If any node got deleted, its SDMA count would be captured in the sdma
141 	 *    past activity counter. So subtract the SDMA counter stored in step 2
142 	 *    for this node from the total SDMA count.
143 	 */
144 	INIT_LIST_HEAD(&sdma_q_list.list);
145 
146 	/*
147 	 * Create the temp list of all SDMA queues
148 	 */
149 	dqm_lock(dqm);
150 
151 	list_for_each_entry(q, &qpd->queues_list, list) {
152 		if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
153 		    (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
154 			continue;
155 
156 		sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
157 		if (!sdma_q) {
158 			dqm_unlock(dqm);
159 			goto cleanup;
160 		}
161 
162 		INIT_LIST_HEAD(&sdma_q->list);
163 		sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
164 		sdma_q->queue_id = q->properties.queue_id;
165 		list_add_tail(&sdma_q->list, &sdma_q_list.list);
166 	}
167 
168 	/*
169 	 * If the temp list is empty, then no SDMA queues nodes were found in
170 	 * qpd->queues_list. Return the past activity count as the total sdma
171 	 * count
172 	 */
173 	if (list_empty(&sdma_q_list.list)) {
174 		workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
175 		dqm_unlock(dqm);
176 		return;
177 	}
178 
179 	dqm_unlock(dqm);
180 
181 	/*
182 	 * Get the usage count for each SDMA queue in temp_list.
183 	 */
184 	mm = get_task_mm(pdd->process->lead_thread);
185 	if (!mm)
186 		goto cleanup;
187 
188 	kthread_use_mm(mm);
189 
190 	list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
191 		val = 0;
192 		ret = read_sdma_queue_counter(sdma_q->rptr, &val);
193 		if (ret) {
194 			pr_debug("Failed to read SDMA queue active counter for queue id: %d",
195 				 sdma_q->queue_id);
196 		} else {
197 			sdma_q->sdma_val = val;
198 			workarea->sdma_activity_counter += val;
199 		}
200 	}
201 
202 	kthread_unuse_mm(mm);
203 	mmput(mm);
204 
205 	/*
206 	 * Do a second iteration over qpd_queues_list to check if any SDMA
207 	 * nodes got deleted while fetching SDMA counter.
208 	 */
209 	dqm_lock(dqm);
210 
211 	workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
212 
213 	list_for_each_entry(q, &qpd->queues_list, list) {
214 		if (list_empty(&sdma_q_list.list))
215 			break;
216 
217 		if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
218 		    (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
219 			continue;
220 
221 		list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
222 			if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
223 			     (sdma_q->queue_id == q->properties.queue_id)) {
224 				list_del(&sdma_q->list);
225 				kfree(sdma_q);
226 				break;
227 			}
228 		}
229 	}
230 
231 	dqm_unlock(dqm);
232 
233 	/*
234 	 * If temp list is not empty, it implies some queues got deleted
235 	 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
236 	 * count for each node from the total SDMA count.
237 	 */
238 	list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
239 		workarea->sdma_activity_counter -= sdma_q->sdma_val;
240 		list_del(&sdma_q->list);
241 		kfree(sdma_q);
242 	}
243 
244 	return;
245 
246 cleanup:
247 	list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
248 		list_del(&sdma_q->list);
249 		kfree(sdma_q);
250 	}
251 }
252 
253 /**
254  * kfd_get_cu_occupancy - Collect number of waves in-flight on this device
255  * by current process. Translates acquired wave count into number of compute units
256  * that are occupied.
257  *
258  * @attr: Handle of attribute that allows reporting of wave count. The attribute
259  * handle encapsulates GPU device it is associated with, thereby allowing collection
260  * of waves in flight, etc
261  * @buffer: Handle of user provided buffer updated with wave count
262  *
263  * Return: Number of bytes written to user buffer or an error value
264  */
265 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
266 {
267 	int cu_cnt;
268 	int wave_cnt;
269 	int max_waves_per_cu;
270 	struct kfd_node *dev = NULL;
271 	struct kfd_process *proc = NULL;
272 	struct kfd_process_device *pdd = NULL;
273 	int i;
274 	struct kfd_cu_occupancy *cu_occupancy;
275 	u32 queue_format;
276 
277 	pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
278 	dev = pdd->dev;
279 	if (dev->kfd2kgd->get_cu_occupancy == NULL)
280 		return -EINVAL;
281 
282 	cu_cnt = 0;
283 	proc = pdd->process;
284 	if (pdd->qpd.queue_count == 0) {
285 		pr_debug("Gpu-Id: %d has no active queues for process pid %d\n",
286 			 dev->id, (int)proc->lead_thread->pid);
287 		return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
288 	}
289 
290 	/* Collect wave count from device if it supports */
291 	wave_cnt = 0;
292 	max_waves_per_cu = 0;
293 
294 	cu_occupancy = kcalloc(AMDGPU_MAX_QUEUES, sizeof(*cu_occupancy), GFP_KERNEL);
295 	if (!cu_occupancy)
296 		return -ENOMEM;
297 
298 	/*
299 	 * For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition.
300 	 * For AQL queues, because of cooperative dispatch we multiply the wave count
301 	 * by number of XCCs in the partition to get the total wave counts across all
302 	 * XCCs in the partition.
303 	 * For PM4 queues, there is no cooperative dispatch so wave_cnt stay as it is.
304 	 */
305 	dev->kfd2kgd->get_cu_occupancy(dev->adev, cu_occupancy,
306 			&max_waves_per_cu, ffs(dev->xcc_mask) - 1);
307 
308 	for (i = 0; i < AMDGPU_MAX_QUEUES; i++) {
309 		if (cu_occupancy[i].wave_cnt != 0 &&
310 		    kfd_dqm_is_queue_in_process(dev->dqm, &pdd->qpd,
311 						cu_occupancy[i].doorbell_off,
312 						&queue_format)) {
313 			if (unlikely(queue_format == KFD_QUEUE_FORMAT_PM4))
314 				wave_cnt += cu_occupancy[i].wave_cnt;
315 			else
316 				wave_cnt += (NUM_XCC(dev->xcc_mask) *
317 						cu_occupancy[i].wave_cnt);
318 		}
319 	}
320 
321 	/* Translate wave count to number of compute units */
322 	cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
323 	kfree(cu_occupancy);
324 	return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
325 }
326 
327 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
328 			       char *buffer)
329 {
330 	if (strcmp(attr->name, "pasid") == 0)
331 		return snprintf(buffer, PAGE_SIZE, "%d\n", 0);
332 	else if (strncmp(attr->name, "vram_", 5) == 0) {
333 		struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
334 							      attr_vram);
335 		return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
336 	} else if (strncmp(attr->name, "sdma_", 5) == 0) {
337 		struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
338 							      attr_sdma);
339 		struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
340 
341 		INIT_WORK_ONSTACK(&sdma_activity_work_handler.sdma_activity_work,
342 				  kfd_sdma_activity_worker);
343 
344 		sdma_activity_work_handler.pdd = pdd;
345 		sdma_activity_work_handler.sdma_activity_counter = 0;
346 
347 		schedule_work(&sdma_activity_work_handler.sdma_activity_work);
348 
349 		flush_work(&sdma_activity_work_handler.sdma_activity_work);
350 		destroy_work_on_stack(&sdma_activity_work_handler.sdma_activity_work);
351 
352 		return snprintf(buffer, PAGE_SIZE, "%llu\n",
353 				(sdma_activity_work_handler.sdma_activity_counter)/
354 				 SDMA_ACTIVITY_DIVISOR);
355 	} else {
356 		pr_err("Invalid attribute");
357 		return -EINVAL;
358 	}
359 
360 	return 0;
361 }
362 
363 static void kfd_procfs_kobj_release(struct kobject *kobj)
364 {
365 	kfree(kobj);
366 }
367 
368 static const struct sysfs_ops kfd_procfs_ops = {
369 	.show = kfd_procfs_show,
370 };
371 
372 static const struct kobj_type procfs_type = {
373 	.release = kfd_procfs_kobj_release,
374 	.sysfs_ops = &kfd_procfs_ops,
375 };
376 
377 void kfd_procfs_init(void)
378 {
379 	int ret = 0;
380 
381 	procfs.kobj = kfd_alloc_struct(procfs.kobj);
382 	if (!procfs.kobj)
383 		return;
384 
385 	ret = kobject_init_and_add(procfs.kobj, &procfs_type,
386 				   &kfd_device->kobj, "proc");
387 	if (ret) {
388 		pr_warn("Could not create procfs proc folder");
389 		/* If we fail to create the procfs, clean up */
390 		kfd_procfs_shutdown();
391 	}
392 }
393 
394 void kfd_procfs_shutdown(void)
395 {
396 	if (procfs.kobj) {
397 		kobject_del(procfs.kobj);
398 		kobject_put(procfs.kobj);
399 		procfs.kobj = NULL;
400 	}
401 }
402 
403 static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
404 				     struct attribute *attr, char *buffer)
405 {
406 	struct queue *q = container_of(kobj, struct queue, kobj);
407 
408 	if (!strcmp(attr->name, "size"))
409 		return snprintf(buffer, PAGE_SIZE, "%llu",
410 				q->properties.queue_size);
411 	else if (!strcmp(attr->name, "type"))
412 		return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
413 	else if (!strcmp(attr->name, "gpuid"))
414 		return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
415 	else
416 		pr_err("Invalid attribute");
417 
418 	return 0;
419 }
420 
421 static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
422 				     struct attribute *attr, char *buffer)
423 {
424 	if (strcmp(attr->name, "evicted_ms") == 0) {
425 		struct kfd_process_device *pdd = container_of(attr,
426 				struct kfd_process_device,
427 				attr_evict);
428 		uint64_t evict_jiffies;
429 
430 		evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
431 
432 		return snprintf(buffer,
433 				PAGE_SIZE,
434 				"%llu\n",
435 				jiffies64_to_msecs(evict_jiffies));
436 
437 	/* Sysfs handle that gets CU occupancy is per device */
438 	} else if (strcmp(attr->name, "cu_occupancy") == 0) {
439 		return kfd_get_cu_occupancy(attr, buffer);
440 	} else {
441 		pr_err("Invalid attribute");
442 	}
443 
444 	return 0;
445 }
446 
447 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
448 				       struct attribute *attr, char *buf)
449 {
450 	struct kfd_process_device *pdd;
451 
452 	if (!strcmp(attr->name, "faults")) {
453 		pdd = container_of(attr, struct kfd_process_device,
454 				   attr_faults);
455 		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
456 	}
457 	if (!strcmp(attr->name, "page_in")) {
458 		pdd = container_of(attr, struct kfd_process_device,
459 				   attr_page_in);
460 		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
461 	}
462 	if (!strcmp(attr->name, "page_out")) {
463 		pdd = container_of(attr, struct kfd_process_device,
464 				   attr_page_out);
465 		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
466 	}
467 	return 0;
468 }
469 
470 static struct attribute attr_queue_size = {
471 	.name = "size",
472 	.mode = KFD_SYSFS_FILE_MODE
473 };
474 
475 static struct attribute attr_queue_type = {
476 	.name = "type",
477 	.mode = KFD_SYSFS_FILE_MODE
478 };
479 
480 static struct attribute attr_queue_gpuid = {
481 	.name = "gpuid",
482 	.mode = KFD_SYSFS_FILE_MODE
483 };
484 
485 static struct attribute *procfs_queue_attrs[] = {
486 	&attr_queue_size,
487 	&attr_queue_type,
488 	&attr_queue_gpuid,
489 	NULL
490 };
491 ATTRIBUTE_GROUPS(procfs_queue);
492 
493 static const struct sysfs_ops procfs_queue_ops = {
494 	.show = kfd_procfs_queue_show,
495 };
496 
497 static const struct kobj_type procfs_queue_type = {
498 	.sysfs_ops = &procfs_queue_ops,
499 	.default_groups = procfs_queue_groups,
500 };
501 
502 static const struct sysfs_ops procfs_stats_ops = {
503 	.show = kfd_procfs_stats_show,
504 };
505 
506 static const struct kobj_type procfs_stats_type = {
507 	.sysfs_ops = &procfs_stats_ops,
508 	.release = kfd_procfs_kobj_release,
509 };
510 
511 static const struct sysfs_ops sysfs_counters_ops = {
512 	.show = kfd_sysfs_counters_show,
513 };
514 
515 static const struct kobj_type sysfs_counters_type = {
516 	.sysfs_ops = &sysfs_counters_ops,
517 	.release = kfd_procfs_kobj_release,
518 };
519 
520 int kfd_procfs_add_queue(struct queue *q)
521 {
522 	struct kfd_process *proc;
523 	int ret;
524 
525 	if (!q || !q->process)
526 		return -EINVAL;
527 	proc = q->process;
528 
529 	/* Create proc/<pid>/queues/<queue id> folder */
530 	if (!proc->kobj_queues)
531 		return -EFAULT;
532 	ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
533 			proc->kobj_queues, "%u", q->properties.queue_id);
534 	if (ret < 0) {
535 		pr_warn("Creating proc/<pid>/queues/%u failed",
536 			q->properties.queue_id);
537 		kobject_put(&q->kobj);
538 		return ret;
539 	}
540 
541 	return 0;
542 }
543 
544 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
545 				 char *name)
546 {
547 	int ret;
548 
549 	if (!kobj || !attr || !name)
550 		return;
551 
552 	attr->name = name;
553 	attr->mode = KFD_SYSFS_FILE_MODE;
554 	sysfs_attr_init(attr);
555 
556 	ret = sysfs_create_file(kobj, attr);
557 	if (ret)
558 		pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
559 }
560 
561 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
562 {
563 	int ret;
564 	int i;
565 	char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
566 
567 	if (!p || !p->kobj)
568 		return;
569 
570 	/*
571 	 * Create sysfs files for each GPU:
572 	 * - proc/<pid>/stats_<gpuid>/
573 	 * - proc/<pid>/stats_<gpuid>/evicted_ms
574 	 * - proc/<pid>/stats_<gpuid>/cu_occupancy
575 	 */
576 	for (i = 0; i < p->n_pdds; i++) {
577 		struct kfd_process_device *pdd = p->pdds[i];
578 
579 		snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
580 				"stats_%u", pdd->dev->id);
581 		pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
582 		if (!pdd->kobj_stats)
583 			return;
584 
585 		ret = kobject_init_and_add(pdd->kobj_stats,
586 					   &procfs_stats_type,
587 					   p->kobj,
588 					   stats_dir_filename);
589 
590 		if (ret) {
591 			pr_warn("Creating KFD proc/stats_%s folder failed",
592 				stats_dir_filename);
593 			kobject_put(pdd->kobj_stats);
594 			pdd->kobj_stats = NULL;
595 			return;
596 		}
597 
598 		kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
599 				      "evicted_ms");
600 		/* Add sysfs file to report compute unit occupancy */
601 		if (pdd->dev->kfd2kgd->get_cu_occupancy)
602 			kfd_sysfs_create_file(pdd->kobj_stats,
603 					      &pdd->attr_cu_occupancy,
604 					      "cu_occupancy");
605 	}
606 }
607 
608 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
609 {
610 	int ret = 0;
611 	int i;
612 	char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
613 
614 	if (!p || !p->kobj)
615 		return;
616 
617 	/*
618 	 * Create sysfs files for each GPU which supports SVM
619 	 * - proc/<pid>/counters_<gpuid>/
620 	 * - proc/<pid>/counters_<gpuid>/faults
621 	 * - proc/<pid>/counters_<gpuid>/page_in
622 	 * - proc/<pid>/counters_<gpuid>/page_out
623 	 */
624 	for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
625 		struct kfd_process_device *pdd = p->pdds[i];
626 		struct kobject *kobj_counters;
627 
628 		snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
629 			"counters_%u", pdd->dev->id);
630 		kobj_counters = kfd_alloc_struct(kobj_counters);
631 		if (!kobj_counters)
632 			return;
633 
634 		ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
635 					   p->kobj, counters_dir_filename);
636 		if (ret) {
637 			pr_warn("Creating KFD proc/%s folder failed",
638 				counters_dir_filename);
639 			kobject_put(kobj_counters);
640 			return;
641 		}
642 
643 		pdd->kobj_counters = kobj_counters;
644 		kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
645 				      "faults");
646 		kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
647 				      "page_in");
648 		kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
649 				      "page_out");
650 	}
651 }
652 
653 static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
654 {
655 	int i;
656 
657 	if (!p || !p->kobj)
658 		return;
659 
660 	/*
661 	 * Create sysfs files for each GPU:
662 	 * - proc/<pid>/vram_<gpuid>
663 	 * - proc/<pid>/sdma_<gpuid>
664 	 */
665 	for (i = 0; i < p->n_pdds; i++) {
666 		struct kfd_process_device *pdd = p->pdds[i];
667 
668 		snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
669 			 pdd->dev->id);
670 		kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
671 				      pdd->vram_filename);
672 
673 		snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
674 			 pdd->dev->id);
675 		kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
676 					    pdd->sdma_filename);
677 	}
678 }
679 
680 void kfd_procfs_del_queue(struct queue *q)
681 {
682 	if (!q)
683 		return;
684 
685 	kobject_del(&q->kobj);
686 	kobject_put(&q->kobj);
687 }
688 
689 int kfd_process_create_wq(void)
690 {
691 	if (!kfd_process_wq)
692 		kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
693 	if (!kfd_restore_wq)
694 		kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq",
695 							 WQ_FREEZABLE);
696 
697 	if (!kfd_process_wq || !kfd_restore_wq) {
698 		kfd_process_destroy_wq();
699 		return -ENOMEM;
700 	}
701 
702 	return 0;
703 }
704 
705 void kfd_process_destroy_wq(void)
706 {
707 	if (kfd_process_wq) {
708 		destroy_workqueue(kfd_process_wq);
709 		kfd_process_wq = NULL;
710 	}
711 	if (kfd_restore_wq) {
712 		destroy_workqueue(kfd_restore_wq);
713 		kfd_restore_wq = NULL;
714 	}
715 }
716 
717 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
718 			struct kfd_process_device *pdd, void **kptr)
719 {
720 	struct kfd_node *dev = pdd->dev;
721 
722 	if (kptr && *kptr) {
723 		amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
724 		*kptr = NULL;
725 	}
726 
727 	amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
728 	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
729 					       NULL);
730 }
731 
732 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
733  *	This function should be only called right after the process
734  *	is created and when kfd_processes_mutex is still being held
735  *	to avoid concurrency. Because of that exclusiveness, we do
736  *	not need to take p->mutex.
737  */
738 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
739 				   uint64_t gpu_va, uint32_t size,
740 				   uint32_t flags, struct kgd_mem **mem, void **kptr)
741 {
742 	struct kfd_node *kdev = pdd->dev;
743 	int err;
744 
745 	err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
746 						 pdd->drm_priv, mem, NULL,
747 						 flags, false);
748 	if (err)
749 		goto err_alloc_mem;
750 
751 	err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
752 			pdd->drm_priv);
753 	if (err)
754 		goto err_map_mem;
755 
756 	err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
757 	if (err) {
758 		pr_debug("Sync memory failed, wait interrupted by user signal\n");
759 		goto sync_memory_failed;
760 	}
761 
762 	if (kptr) {
763 		err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(
764 				(struct kgd_mem *)*mem, kptr, NULL);
765 		if (err) {
766 			pr_debug("Map GTT BO to kernel failed\n");
767 			goto sync_memory_failed;
768 		}
769 	}
770 
771 	return err;
772 
773 sync_memory_failed:
774 	amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
775 
776 err_map_mem:
777 	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
778 					       NULL);
779 err_alloc_mem:
780 	*mem = NULL;
781 	*kptr = NULL;
782 	return err;
783 }
784 
785 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
786  *	process for IB usage The memory reserved is for KFD to submit
787  *	IB to AMDGPU from kernel.  If the memory is reserved
788  *	successfully, ib_kaddr will have the CPU/kernel
789  *	address. Check ib_kaddr before accessing the memory.
790  */
791 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
792 {
793 	struct qcm_process_device *qpd = &pdd->qpd;
794 	uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
795 			KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
796 			KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
797 			KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
798 	struct kgd_mem *mem;
799 	void *kaddr;
800 	int ret;
801 
802 	if (qpd->ib_kaddr || !qpd->ib_base)
803 		return 0;
804 
805 	/* ib_base is only set for dGPU */
806 	ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
807 				      &mem, &kaddr);
808 	if (ret)
809 		return ret;
810 
811 	qpd->ib_mem = mem;
812 	qpd->ib_kaddr = kaddr;
813 
814 	return 0;
815 }
816 
817 static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
818 {
819 	struct qcm_process_device *qpd = &pdd->qpd;
820 
821 	if (!qpd->ib_kaddr || !qpd->ib_base)
822 		return;
823 
824 	kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
825 }
826 
827 struct kfd_process *kfd_create_process(struct task_struct *thread)
828 {
829 	struct kfd_process *process;
830 	int ret;
831 
832 	if (!(thread->mm && mmget_not_zero(thread->mm)))
833 		return ERR_PTR(-EINVAL);
834 
835 	/* Only the pthreads threading model is supported. */
836 	if (thread->group_leader->mm != thread->mm) {
837 		mmput(thread->mm);
838 		return ERR_PTR(-EINVAL);
839 	}
840 
841 	/*
842 	 * take kfd processes mutex before starting of process creation
843 	 * so there won't be a case where two threads of the same process
844 	 * create two kfd_process structures
845 	 */
846 	mutex_lock(&kfd_processes_mutex);
847 
848 	if (kfd_is_locked()) {
849 		pr_debug("KFD is locked! Cannot create process");
850 		process = ERR_PTR(-EINVAL);
851 		goto out;
852 	}
853 
854 	/* A prior open of /dev/kfd could have already created the process.
855 	 * find_process will increase process kref in this case
856 	 */
857 	process = find_process(thread, true);
858 	if (process) {
859 		pr_debug("Process already found\n");
860 	} else {
861 		/* If the process just called exec(3), it is possible that the
862 		 * cleanup of the kfd_process (following the release of the mm
863 		 * of the old process image) is still in the cleanup work queue.
864 		 * Make sure to drain any job before trying to recreate any
865 		 * resource for this process.
866 		 */
867 		flush_workqueue(kfd_process_wq);
868 
869 		process = create_process(thread);
870 		if (IS_ERR(process))
871 			goto out;
872 
873 		if (!procfs.kobj)
874 			goto out;
875 
876 		process->kobj = kfd_alloc_struct(process->kobj);
877 		if (!process->kobj) {
878 			pr_warn("Creating procfs kobject failed");
879 			goto out;
880 		}
881 		ret = kobject_init_and_add(process->kobj, &procfs_type,
882 					   procfs.kobj, "%d",
883 					   (int)process->lead_thread->pid);
884 		if (ret) {
885 			pr_warn("Creating procfs pid directory failed");
886 			kobject_put(process->kobj);
887 			goto out;
888 		}
889 
890 		kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
891 				      "pasid");
892 
893 		process->kobj_queues = kobject_create_and_add("queues",
894 							process->kobj);
895 		if (!process->kobj_queues)
896 			pr_warn("Creating KFD proc/queues folder failed");
897 
898 		kfd_procfs_add_sysfs_stats(process);
899 		kfd_procfs_add_sysfs_files(process);
900 		kfd_procfs_add_sysfs_counters(process);
901 
902 		init_waitqueue_head(&process->wait_irq_drain);
903 	}
904 out:
905 	mutex_unlock(&kfd_processes_mutex);
906 	mmput(thread->mm);
907 
908 	return process;
909 }
910 
911 struct kfd_process *kfd_get_process(const struct task_struct *thread)
912 {
913 	struct kfd_process *process;
914 
915 	if (!thread->mm)
916 		return ERR_PTR(-EINVAL);
917 
918 	/* Only the pthreads threading model is supported. */
919 	if (thread->group_leader->mm != thread->mm)
920 		return ERR_PTR(-EINVAL);
921 
922 	process = find_process(thread, false);
923 	if (!process)
924 		return ERR_PTR(-EINVAL);
925 
926 	return process;
927 }
928 
929 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
930 {
931 	struct kfd_process *process;
932 
933 	hash_for_each_possible_rcu(kfd_processes_table, process,
934 					kfd_processes, (uintptr_t)mm)
935 		if (process->mm == mm)
936 			return process;
937 
938 	return NULL;
939 }
940 
941 static struct kfd_process *find_process(const struct task_struct *thread,
942 					bool ref)
943 {
944 	struct kfd_process *p;
945 	int idx;
946 
947 	idx = srcu_read_lock(&kfd_processes_srcu);
948 	p = find_process_by_mm(thread->mm);
949 	if (p && ref)
950 		kref_get(&p->ref);
951 	srcu_read_unlock(&kfd_processes_srcu, idx);
952 
953 	return p;
954 }
955 
956 void kfd_unref_process(struct kfd_process *p)
957 {
958 	kref_put(&p->ref, kfd_process_ref_release);
959 }
960 
961 /* This increments the process->ref counter. */
962 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid)
963 {
964 	struct task_struct *task = NULL;
965 	struct kfd_process *p    = NULL;
966 
967 	if (!pid) {
968 		task = current;
969 		get_task_struct(task);
970 	} else {
971 		task = get_pid_task(pid, PIDTYPE_PID);
972 	}
973 
974 	if (task) {
975 		p = find_process(task, true);
976 		put_task_struct(task);
977 	}
978 
979 	return p;
980 }
981 
982 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
983 {
984 	struct kfd_process *p = pdd->process;
985 	void *mem;
986 	int id;
987 	int i;
988 
989 	/*
990 	 * Remove all handles from idr and release appropriate
991 	 * local memory object
992 	 */
993 	idr_for_each_entry(&pdd->alloc_idr, mem, id) {
994 
995 		for (i = 0; i < p->n_pdds; i++) {
996 			struct kfd_process_device *peer_pdd = p->pdds[i];
997 
998 			if (!peer_pdd->drm_priv)
999 				continue;
1000 			amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1001 				peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
1002 		}
1003 
1004 		amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
1005 						       pdd->drm_priv, NULL);
1006 		kfd_process_device_remove_obj_handle(pdd, id);
1007 	}
1008 }
1009 
1010 /*
1011  * Just kunmap and unpin signal BO here. It will be freed in
1012  * kfd_process_free_outstanding_kfd_bos()
1013  */
1014 static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
1015 {
1016 	struct kfd_process_device *pdd;
1017 	struct kfd_node *kdev;
1018 	void *mem;
1019 
1020 	kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
1021 	if (!kdev)
1022 		return;
1023 
1024 	mutex_lock(&p->mutex);
1025 
1026 	pdd = kfd_get_process_device_data(kdev, p);
1027 	if (!pdd)
1028 		goto out;
1029 
1030 	mem = kfd_process_device_translate_handle(
1031 		pdd, GET_IDR_HANDLE(p->signal_handle));
1032 	if (!mem)
1033 		goto out;
1034 
1035 	amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
1036 
1037 out:
1038 	mutex_unlock(&p->mutex);
1039 }
1040 
1041 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
1042 {
1043 	int i;
1044 
1045 	for (i = 0; i < p->n_pdds; i++)
1046 		kfd_process_device_free_bos(p->pdds[i]);
1047 }
1048 
1049 static void kfd_process_destroy_pdds(struct kfd_process *p)
1050 {
1051 	int i;
1052 
1053 	for (i = 0; i < p->n_pdds; i++) {
1054 		struct kfd_process_device *pdd = p->pdds[i];
1055 
1056 		pr_debug("Releasing pdd (topology id %d, for pid %d)\n",
1057 			pdd->dev->id, p->lead_thread->pid);
1058 		kfd_process_device_destroy_cwsr_dgpu(pdd);
1059 		kfd_process_device_destroy_ib_mem(pdd);
1060 
1061 		if (pdd->drm_file)
1062 			fput(pdd->drm_file);
1063 
1064 		if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
1065 			free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
1066 				get_order(KFD_CWSR_TBA_TMA_SIZE));
1067 
1068 		idr_destroy(&pdd->alloc_idr);
1069 
1070 		kfd_free_process_doorbells(pdd->dev->kfd, pdd);
1071 
1072 		if (pdd->dev->kfd->shared_resources.enable_mes &&
1073 			pdd->proc_ctx_cpu_ptr)
1074 			amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
1075 						   &pdd->proc_ctx_bo);
1076 		/*
1077 		 * before destroying pdd, make sure to report availability
1078 		 * for auto suspend
1079 		 */
1080 		if (pdd->runtime_inuse) {
1081 			pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
1082 			pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
1083 			pdd->runtime_inuse = false;
1084 		}
1085 
1086 		kfree(pdd);
1087 		p->pdds[i] = NULL;
1088 	}
1089 	p->n_pdds = 0;
1090 }
1091 
1092 static void kfd_process_remove_sysfs(struct kfd_process *p)
1093 {
1094 	struct kfd_process_device *pdd;
1095 	int i;
1096 
1097 	if (!p->kobj)
1098 		return;
1099 
1100 	sysfs_remove_file(p->kobj, &p->attr_pasid);
1101 	kobject_del(p->kobj_queues);
1102 	kobject_put(p->kobj_queues);
1103 	p->kobj_queues = NULL;
1104 
1105 	for (i = 0; i < p->n_pdds; i++) {
1106 		pdd = p->pdds[i];
1107 
1108 		sysfs_remove_file(p->kobj, &pdd->attr_vram);
1109 		sysfs_remove_file(p->kobj, &pdd->attr_sdma);
1110 
1111 		sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
1112 		if (pdd->dev->kfd2kgd->get_cu_occupancy)
1113 			sysfs_remove_file(pdd->kobj_stats,
1114 					  &pdd->attr_cu_occupancy);
1115 		kobject_del(pdd->kobj_stats);
1116 		kobject_put(pdd->kobj_stats);
1117 		pdd->kobj_stats = NULL;
1118 	}
1119 
1120 	for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
1121 		pdd = p->pdds[i];
1122 
1123 		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
1124 		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
1125 		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
1126 		kobject_del(pdd->kobj_counters);
1127 		kobject_put(pdd->kobj_counters);
1128 		pdd->kobj_counters = NULL;
1129 	}
1130 
1131 	kobject_del(p->kobj);
1132 	kobject_put(p->kobj);
1133 	p->kobj = NULL;
1134 }
1135 
1136 /* No process locking is needed in this function, because the process
1137  * is not findable any more. We must assume that no other thread is
1138  * using it any more, otherwise we couldn't safely free the process
1139  * structure in the end.
1140  */
1141 static void kfd_process_wq_release(struct work_struct *work)
1142 {
1143 	struct kfd_process *p = container_of(work, struct kfd_process,
1144 					     release_work);
1145 	struct dma_fence *ef;
1146 
1147 	kfd_process_dequeue_from_all_devices(p);
1148 	pqm_uninit(&p->pqm);
1149 
1150 	/* Signal the eviction fence after user mode queues are
1151 	 * destroyed. This allows any BOs to be freed without
1152 	 * triggering pointless evictions or waiting for fences.
1153 	 */
1154 	synchronize_rcu();
1155 	ef = rcu_access_pointer(p->ef);
1156 	if (ef)
1157 		dma_fence_signal(ef);
1158 
1159 	kfd_process_remove_sysfs(p);
1160 
1161 	kfd_process_kunmap_signal_bo(p);
1162 	kfd_process_free_outstanding_kfd_bos(p);
1163 	svm_range_list_fini(p);
1164 
1165 	kfd_process_destroy_pdds(p);
1166 	dma_fence_put(ef);
1167 
1168 	kfd_event_free_process(p);
1169 
1170 	mutex_destroy(&p->mutex);
1171 
1172 	put_task_struct(p->lead_thread);
1173 
1174 	kfree(p);
1175 }
1176 
1177 static void kfd_process_ref_release(struct kref *ref)
1178 {
1179 	struct kfd_process *p = container_of(ref, struct kfd_process, ref);
1180 
1181 	INIT_WORK(&p->release_work, kfd_process_wq_release);
1182 	queue_work(kfd_process_wq, &p->release_work);
1183 }
1184 
1185 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
1186 {
1187 	/* This increments p->ref counter if kfd process p exists */
1188 	struct kfd_process *p = kfd_lookup_process_by_mm(mm);
1189 
1190 	return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
1191 }
1192 
1193 static void kfd_process_free_notifier(struct mmu_notifier *mn)
1194 {
1195 	kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
1196 }
1197 
1198 static void kfd_process_notifier_release_internal(struct kfd_process *p)
1199 {
1200 	int i;
1201 
1202 	cancel_delayed_work_sync(&p->eviction_work);
1203 	cancel_delayed_work_sync(&p->restore_work);
1204 
1205 	for (i = 0; i < p->n_pdds; i++) {
1206 		struct kfd_process_device *pdd = p->pdds[i];
1207 
1208 		/* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */
1209 		if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup)
1210 			amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
1211 	}
1212 
1213 	/* Indicate to other users that MM is no longer valid */
1214 	p->mm = NULL;
1215 	kfd_dbg_trap_disable(p);
1216 
1217 	if (atomic_read(&p->debugged_process_count) > 0) {
1218 		struct kfd_process *target;
1219 		unsigned int temp;
1220 		int idx = srcu_read_lock(&kfd_processes_srcu);
1221 
1222 		hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) {
1223 			if (target->debugger_process && target->debugger_process == p) {
1224 				mutex_lock_nested(&target->mutex, 1);
1225 				kfd_dbg_trap_disable(target);
1226 				mutex_unlock(&target->mutex);
1227 				if (atomic_read(&p->debugged_process_count) == 0)
1228 					break;
1229 			}
1230 		}
1231 
1232 		srcu_read_unlock(&kfd_processes_srcu, idx);
1233 	}
1234 
1235 	mmu_notifier_put(&p->mmu_notifier);
1236 }
1237 
1238 static void kfd_process_notifier_release(struct mmu_notifier *mn,
1239 					struct mm_struct *mm)
1240 {
1241 	struct kfd_process *p;
1242 
1243 	/*
1244 	 * The kfd_process structure can not be free because the
1245 	 * mmu_notifier srcu is read locked
1246 	 */
1247 	p = container_of(mn, struct kfd_process, mmu_notifier);
1248 	if (WARN_ON(p->mm != mm))
1249 		return;
1250 
1251 	mutex_lock(&kfd_processes_mutex);
1252 	/*
1253 	 * Do early return if table is empty.
1254 	 *
1255 	 * This could potentially happen if this function is called concurrently
1256 	 * by mmu_notifier and by kfd_cleanup_pocesses.
1257 	 *
1258 	 */
1259 	if (hash_empty(kfd_processes_table)) {
1260 		mutex_unlock(&kfd_processes_mutex);
1261 		return;
1262 	}
1263 	hash_del_rcu(&p->kfd_processes);
1264 	mutex_unlock(&kfd_processes_mutex);
1265 	synchronize_srcu(&kfd_processes_srcu);
1266 
1267 	kfd_process_notifier_release_internal(p);
1268 }
1269 
1270 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
1271 	.release = kfd_process_notifier_release,
1272 	.alloc_notifier = kfd_process_alloc_notifier,
1273 	.free_notifier = kfd_process_free_notifier,
1274 };
1275 
1276 /*
1277  * This code handles the case when driver is being unloaded before all
1278  * mm_struct are released.  We need to safely free the kfd_process and
1279  * avoid race conditions with mmu_notifier that might try to free them.
1280  *
1281  */
1282 void kfd_cleanup_processes(void)
1283 {
1284 	struct kfd_process *p;
1285 	struct hlist_node *p_temp;
1286 	unsigned int temp;
1287 	HLIST_HEAD(cleanup_list);
1288 
1289 	/*
1290 	 * Move all remaining kfd_process from the process table to a
1291 	 * temp list for processing.   Once done, callback from mmu_notifier
1292 	 * release will not see the kfd_process in the table and do early return,
1293 	 * avoiding double free issues.
1294 	 */
1295 	mutex_lock(&kfd_processes_mutex);
1296 	hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
1297 		hash_del_rcu(&p->kfd_processes);
1298 		synchronize_srcu(&kfd_processes_srcu);
1299 		hlist_add_head(&p->kfd_processes, &cleanup_list);
1300 	}
1301 	mutex_unlock(&kfd_processes_mutex);
1302 
1303 	hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
1304 		kfd_process_notifier_release_internal(p);
1305 
1306 	/*
1307 	 * Ensures that all outstanding free_notifier get called, triggering
1308 	 * the release of the kfd_process struct.
1309 	 */
1310 	mmu_notifier_synchronize();
1311 }
1312 
1313 int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
1314 {
1315 	unsigned long  offset;
1316 	int i;
1317 
1318 	if (p->has_cwsr)
1319 		return 0;
1320 
1321 	for (i = 0; i < p->n_pdds; i++) {
1322 		struct kfd_node *dev = p->pdds[i]->dev;
1323 		struct qcm_process_device *qpd = &p->pdds[i]->qpd;
1324 
1325 		if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
1326 			continue;
1327 
1328 		offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
1329 		qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
1330 			KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
1331 			MAP_SHARED, offset);
1332 
1333 		if (IS_ERR_VALUE(qpd->tba_addr)) {
1334 			int err = qpd->tba_addr;
1335 
1336 			dev_err(dev->adev->dev,
1337 				"Failure to set tba address. error %d.\n", err);
1338 			qpd->tba_addr = 0;
1339 			qpd->cwsr_kaddr = NULL;
1340 			return err;
1341 		}
1342 
1343 		memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
1344 
1345 		kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled);
1346 
1347 		qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1348 		pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1349 			qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1350 	}
1351 
1352 	p->has_cwsr = true;
1353 
1354 	return 0;
1355 }
1356 
1357 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
1358 {
1359 	struct kfd_node *dev = pdd->dev;
1360 	struct qcm_process_device *qpd = &pdd->qpd;
1361 	uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
1362 			| KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
1363 			| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1364 	struct kgd_mem *mem;
1365 	void *kaddr;
1366 	int ret;
1367 
1368 	if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
1369 		return 0;
1370 
1371 	/* cwsr_base is only set for dGPU */
1372 	ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
1373 				      KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr);
1374 	if (ret)
1375 		return ret;
1376 
1377 	qpd->cwsr_mem = mem;
1378 	qpd->cwsr_kaddr = kaddr;
1379 	qpd->tba_addr = qpd->cwsr_base;
1380 
1381 	memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
1382 
1383 	kfd_process_set_trap_debug_flag(&pdd->qpd,
1384 					pdd->process->debug_trap_enabled);
1385 
1386 	qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1387 	pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1388 		 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1389 
1390 	return 0;
1391 }
1392 
1393 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
1394 {
1395 	struct kfd_node *dev = pdd->dev;
1396 	struct qcm_process_device *qpd = &pdd->qpd;
1397 
1398 	if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
1399 		return;
1400 
1401 	kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
1402 }
1403 
1404 void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1405 				  uint64_t tba_addr,
1406 				  uint64_t tma_addr)
1407 {
1408 	if (qpd->cwsr_kaddr) {
1409 		/* KFD trap handler is bound, record as second-level TBA/TMA
1410 		 * in first-level TMA. First-level trap will jump to second.
1411 		 */
1412 		uint64_t *tma =
1413 			(uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1414 		tma[0] = tba_addr;
1415 		tma[1] = tma_addr;
1416 	} else {
1417 		/* No trap handler bound, bind as first-level TBA/TMA. */
1418 		qpd->tba_addr = tba_addr;
1419 		qpd->tma_addr = tma_addr;
1420 	}
1421 }
1422 
1423 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
1424 {
1425 	int i;
1426 
1427 	/* On most GFXv9 GPUs, the retry mode in the SQ must match the
1428 	 * boot time retry setting. Mixing processes with different
1429 	 * XNACK/retry settings can hang the GPU.
1430 	 *
1431 	 * Different GPUs can have different noretry settings depending
1432 	 * on HW bugs or limitations. We need to find at least one
1433 	 * XNACK mode for this process that's compatible with all GPUs.
1434 	 * Fortunately GPUs with retry enabled (noretry=0) can run code
1435 	 * built for XNACK-off. On GFXv9 it may perform slower.
1436 	 *
1437 	 * Therefore applications built for XNACK-off can always be
1438 	 * supported and will be our fallback if any GPU does not
1439 	 * support retry.
1440 	 */
1441 	for (i = 0; i < p->n_pdds; i++) {
1442 		struct kfd_node *dev = p->pdds[i]->dev;
1443 
1444 		/* Only consider GFXv9 and higher GPUs. Older GPUs don't
1445 		 * support the SVM APIs and don't need to be considered
1446 		 * for the XNACK mode selection.
1447 		 */
1448 		if (!KFD_IS_SOC15(dev))
1449 			continue;
1450 		/* Aldebaran can always support XNACK because it can support
1451 		 * per-process XNACK mode selection. But let the dev->noretry
1452 		 * setting still influence the default XNACK mode.
1453 		 */
1454 		if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) {
1455 			if (!amdgpu_sriov_xnack_support(dev->kfd->adev)) {
1456 				pr_debug("SRIOV platform xnack not supported\n");
1457 				return false;
1458 			}
1459 			continue;
1460 		}
1461 
1462 		/* GFXv10 and later GPUs do not support shader preemption
1463 		 * during page faults. This can lead to poor QoS for queue
1464 		 * management and memory-manager-related preemptions or
1465 		 * even deadlocks.
1466 		 */
1467 		if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
1468 			return false;
1469 
1470 		if (dev->kfd->noretry)
1471 			return false;
1472 	}
1473 
1474 	return true;
1475 }
1476 
1477 void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
1478 				     bool enabled)
1479 {
1480 	if (qpd->cwsr_kaddr) {
1481 		uint64_t *tma =
1482 			(uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1483 		tma[2] = enabled;
1484 	}
1485 }
1486 
1487 /*
1488  * On return the kfd_process is fully operational and will be freed when the
1489  * mm is released
1490  */
1491 static struct kfd_process *create_process(const struct task_struct *thread)
1492 {
1493 	struct kfd_process *process;
1494 	struct mmu_notifier *mn;
1495 	int err = -ENOMEM;
1496 
1497 	process = kzalloc(sizeof(*process), GFP_KERNEL);
1498 	if (!process)
1499 		goto err_alloc_process;
1500 
1501 	kref_init(&process->ref);
1502 	mutex_init(&process->mutex);
1503 	process->mm = thread->mm;
1504 	process->lead_thread = thread->group_leader;
1505 	process->n_pdds = 0;
1506 	process->queues_paused = false;
1507 	INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1508 	INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1509 	process->last_restore_timestamp = get_jiffies_64();
1510 	err = kfd_event_init_process(process);
1511 	if (err)
1512 		goto err_event_init;
1513 	process->is_32bit_user_mode = in_compat_syscall();
1514 	process->debug_trap_enabled = false;
1515 	process->debugger_process = NULL;
1516 	process->exception_enable_mask = 0;
1517 	atomic_set(&process->debugged_process_count, 0);
1518 	sema_init(&process->runtime_enable_sema, 0);
1519 
1520 	err = pqm_init(&process->pqm, process);
1521 	if (err != 0)
1522 		goto err_process_pqm_init;
1523 
1524 	/* init process apertures*/
1525 	err = kfd_init_apertures(process);
1526 	if (err != 0)
1527 		goto err_init_apertures;
1528 
1529 	/* Check XNACK support after PDDs are created in kfd_init_apertures */
1530 	process->xnack_enabled = kfd_process_xnack_mode(process, false);
1531 
1532 	err = svm_range_list_init(process);
1533 	if (err)
1534 		goto err_init_svm_range_list;
1535 
1536 	/* alloc_notifier needs to find the process in the hash table */
1537 	hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1538 			(uintptr_t)process->mm);
1539 
1540 	/* Avoid free_notifier to start kfd_process_wq_release if
1541 	 * mmu_notifier_get failed because of pending signal.
1542 	 */
1543 	kref_get(&process->ref);
1544 
1545 	/* MMU notifier registration must be the last call that can fail
1546 	 * because after this point we cannot unwind the process creation.
1547 	 * After this point, mmu_notifier_put will trigger the cleanup by
1548 	 * dropping the last process reference in the free_notifier.
1549 	 */
1550 	mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
1551 	if (IS_ERR(mn)) {
1552 		err = PTR_ERR(mn);
1553 		goto err_register_notifier;
1554 	}
1555 	BUG_ON(mn != &process->mmu_notifier);
1556 
1557 	kfd_unref_process(process);
1558 	get_task_struct(process->lead_thread);
1559 
1560 	INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler);
1561 
1562 	return process;
1563 
1564 err_register_notifier:
1565 	hash_del_rcu(&process->kfd_processes);
1566 	svm_range_list_fini(process);
1567 err_init_svm_range_list:
1568 	kfd_process_free_outstanding_kfd_bos(process);
1569 	kfd_process_destroy_pdds(process);
1570 err_init_apertures:
1571 	pqm_uninit(&process->pqm);
1572 err_process_pqm_init:
1573 	kfd_event_free_process(process);
1574 err_event_init:
1575 	mutex_destroy(&process->mutex);
1576 	kfree(process);
1577 err_alloc_process:
1578 	return ERR_PTR(err);
1579 }
1580 
1581 struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
1582 							struct kfd_process *p)
1583 {
1584 	int i;
1585 
1586 	for (i = 0; i < p->n_pdds; i++)
1587 		if (p->pdds[i]->dev == dev)
1588 			return p->pdds[i];
1589 
1590 	return NULL;
1591 }
1592 
1593 struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
1594 							struct kfd_process *p)
1595 {
1596 	struct kfd_process_device *pdd = NULL;
1597 
1598 	if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
1599 		return NULL;
1600 	pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
1601 	if (!pdd)
1602 		return NULL;
1603 
1604 	pdd->dev = dev;
1605 	INIT_LIST_HEAD(&pdd->qpd.queues_list);
1606 	INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
1607 	pdd->qpd.dqm = dev->dqm;
1608 	pdd->qpd.pqm = &p->pqm;
1609 	pdd->qpd.evicted = 0;
1610 	pdd->qpd.mapped_gws_queue = false;
1611 	pdd->process = p;
1612 	pdd->bound = PDD_UNBOUND;
1613 	pdd->already_dequeued = false;
1614 	pdd->runtime_inuse = false;
1615 	atomic64_set(&pdd->vram_usage, 0);
1616 	pdd->sdma_past_activity_counter = 0;
1617 	pdd->user_gpu_id = dev->id;
1618 	atomic64_set(&pdd->evict_duration_counter, 0);
1619 
1620 	p->pdds[p->n_pdds++] = pdd;
1621 	if (kfd_dbg_is_per_vmid_supported(pdd->dev))
1622 		pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
1623 							pdd->dev->adev,
1624 							false,
1625 							0);
1626 
1627 	/* Init idr used for memory handle translation */
1628 	idr_init(&pdd->alloc_idr);
1629 
1630 	return pdd;
1631 }
1632 
1633 /**
1634  * kfd_process_device_init_vm - Initialize a VM for a process-device
1635  *
1636  * @pdd: The process-device
1637  * @drm_file: Optional pointer to a DRM file descriptor
1638  *
1639  * If @drm_file is specified, it will be used to acquire the VM from
1640  * that file descriptor. If successful, the @pdd takes ownership of
1641  * the file descriptor.
1642  *
1643  * If @drm_file is NULL, a new VM is created.
1644  *
1645  * Returns 0 on success, -errno on failure.
1646  */
1647 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1648 			       struct file *drm_file)
1649 {
1650 	struct amdgpu_fpriv *drv_priv;
1651 	struct amdgpu_vm *avm;
1652 	struct kfd_process *p;
1653 	struct dma_fence *ef;
1654 	struct kfd_node *dev;
1655 	int ret;
1656 
1657 	if (!drm_file)
1658 		return -EINVAL;
1659 
1660 	if (pdd->drm_priv)
1661 		return -EBUSY;
1662 
1663 	ret = amdgpu_file_to_fpriv(drm_file, &drv_priv);
1664 	if (ret)
1665 		return ret;
1666 	avm = &drv_priv->vm;
1667 
1668 	p = pdd->process;
1669 	dev = pdd->dev;
1670 
1671 	ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
1672 						     &p->kgd_process_info,
1673 						     p->ef ? NULL : &ef);
1674 	if (ret) {
1675 		dev_err(dev->adev->dev, "Failed to create process VM object\n");
1676 		return ret;
1677 	}
1678 
1679 	if (!p->ef)
1680 		RCU_INIT_POINTER(p->ef, ef);
1681 
1682 	pdd->drm_priv = drm_file->private_data;
1683 
1684 	ret = kfd_process_device_reserve_ib_mem(pdd);
1685 	if (ret)
1686 		goto err_reserve_ib_mem;
1687 	ret = kfd_process_device_init_cwsr_dgpu(pdd);
1688 	if (ret)
1689 		goto err_init_cwsr;
1690 
1691 	if (unlikely(!avm->pasid)) {
1692 		dev_warn(pdd->dev->adev->dev, "WARN: vm %p has no pasid associated",
1693 				 avm);
1694 		ret = -EINVAL;
1695 		goto err_get_pasid;
1696 	}
1697 
1698 	pdd->pasid = avm->pasid;
1699 	pdd->drm_file = drm_file;
1700 
1701 	return 0;
1702 
1703 err_get_pasid:
1704 	kfd_process_device_destroy_cwsr_dgpu(pdd);
1705 err_init_cwsr:
1706 	kfd_process_device_destroy_ib_mem(pdd);
1707 err_reserve_ib_mem:
1708 	pdd->drm_priv = NULL;
1709 	amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm);
1710 
1711 	return ret;
1712 }
1713 
1714 /*
1715  * Direct the IOMMU to bind the process (specifically the pasid->mm)
1716  * to the device.
1717  * Unbinding occurs when the process dies or the device is removed.
1718  *
1719  * Assumes that the process lock is held.
1720  */
1721 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
1722 							struct kfd_process *p)
1723 {
1724 	struct kfd_process_device *pdd;
1725 	int err;
1726 
1727 	pdd = kfd_get_process_device_data(dev, p);
1728 	if (!pdd) {
1729 		dev_err(dev->adev->dev, "Process device data doesn't exist\n");
1730 		return ERR_PTR(-ENOMEM);
1731 	}
1732 
1733 	if (!pdd->drm_priv)
1734 		return ERR_PTR(-ENODEV);
1735 
1736 	/*
1737 	 * signal runtime-pm system to auto resume and prevent
1738 	 * further runtime suspend once device pdd is created until
1739 	 * pdd is destroyed.
1740 	 */
1741 	if (!pdd->runtime_inuse) {
1742 		err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev);
1743 		if (err < 0) {
1744 			pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
1745 			return ERR_PTR(err);
1746 		}
1747 	}
1748 
1749 	/*
1750 	 * make sure that runtime_usage counter is incremented just once
1751 	 * per pdd
1752 	 */
1753 	pdd->runtime_inuse = true;
1754 
1755 	return pdd;
1756 }
1757 
1758 /* Create specific handle mapped to mem from process local memory idr
1759  * Assumes that the process lock is held.
1760  */
1761 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1762 					void *mem)
1763 {
1764 	return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1765 }
1766 
1767 /* Translate specific handle from process local memory idr
1768  * Assumes that the process lock is held.
1769  */
1770 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1771 					int handle)
1772 {
1773 	if (handle < 0)
1774 		return NULL;
1775 
1776 	return idr_find(&pdd->alloc_idr, handle);
1777 }
1778 
1779 /* Remove specific handle from process local memory idr
1780  * Assumes that the process lock is held.
1781  */
1782 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1783 					int handle)
1784 {
1785 	if (handle >= 0)
1786 		idr_remove(&pdd->alloc_idr, handle);
1787 }
1788 
1789 static struct kfd_process_device *kfd_lookup_process_device_by_pasid(u32 pasid)
1790 {
1791 	struct kfd_process_device *ret_p = NULL;
1792 	struct kfd_process *p;
1793 	unsigned int temp;
1794 	int i;
1795 
1796 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1797 		for (i = 0; i < p->n_pdds; i++) {
1798 			if (p->pdds[i]->pasid == pasid) {
1799 				ret_p = p->pdds[i];
1800 				break;
1801 			}
1802 		}
1803 		if (ret_p)
1804 			break;
1805 	}
1806 	return ret_p;
1807 }
1808 
1809 /* This increments the process->ref counter. */
1810 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid,
1811 						struct kfd_process_device **pdd)
1812 {
1813 	struct kfd_process_device *ret_p;
1814 
1815 	int idx = srcu_read_lock(&kfd_processes_srcu);
1816 
1817 	ret_p = kfd_lookup_process_device_by_pasid(pasid);
1818 	if (ret_p) {
1819 		if (pdd)
1820 			*pdd = ret_p;
1821 		kref_get(&ret_p->process->ref);
1822 
1823 		srcu_read_unlock(&kfd_processes_srcu, idx);
1824 		return ret_p->process;
1825 	}
1826 
1827 	srcu_read_unlock(&kfd_processes_srcu, idx);
1828 
1829 	if (pdd)
1830 		*pdd = NULL;
1831 
1832 	return NULL;
1833 }
1834 
1835 /* This increments the process->ref counter. */
1836 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1837 {
1838 	struct kfd_process *p;
1839 
1840 	int idx = srcu_read_lock(&kfd_processes_srcu);
1841 
1842 	p = find_process_by_mm(mm);
1843 	if (p)
1844 		kref_get(&p->ref);
1845 
1846 	srcu_read_unlock(&kfd_processes_srcu, idx);
1847 
1848 	return p;
1849 }
1850 
1851 /* kfd_process_evict_queues - Evict all user queues of a process
1852  *
1853  * Eviction is reference-counted per process-device. This means multiple
1854  * evictions from different sources can be nested safely.
1855  */
1856 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
1857 {
1858 	int r = 0;
1859 	int i;
1860 	unsigned int n_evicted = 0;
1861 
1862 	for (i = 0; i < p->n_pdds; i++) {
1863 		struct kfd_process_device *pdd = p->pdds[i];
1864 		struct device *dev = pdd->dev->adev->dev;
1865 
1866 		kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid,
1867 					     trigger);
1868 
1869 		r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1870 							    &pdd->qpd);
1871 		/* evict return -EIO if HWS is hang or asic is resetting, in this case
1872 		 * we would like to set all the queues to be in evicted state to prevent
1873 		 * them been add back since they actually not be saved right now.
1874 		 */
1875 		if (r && r != -EIO) {
1876 			dev_err(dev, "Failed to evict process queues\n");
1877 			goto fail;
1878 		}
1879 		n_evicted++;
1880 
1881 		pdd->dev->dqm->is_hws_hang = false;
1882 	}
1883 
1884 	return r;
1885 
1886 fail:
1887 	/* To keep state consistent, roll back partial eviction by
1888 	 * restoring queues
1889 	 */
1890 	for (i = 0; i < p->n_pdds; i++) {
1891 		struct kfd_process_device *pdd = p->pdds[i];
1892 
1893 		if (n_evicted == 0)
1894 			break;
1895 
1896 		kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1897 
1898 		if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1899 							      &pdd->qpd))
1900 			dev_err(pdd->dev->adev->dev,
1901 				"Failed to restore queues\n");
1902 
1903 		n_evicted--;
1904 	}
1905 
1906 	return r;
1907 }
1908 
1909 /* kfd_process_restore_queues - Restore all user queues of a process */
1910 int kfd_process_restore_queues(struct kfd_process *p)
1911 {
1912 	int r, ret = 0;
1913 	int i;
1914 
1915 	for (i = 0; i < p->n_pdds; i++) {
1916 		struct kfd_process_device *pdd = p->pdds[i];
1917 		struct device *dev = pdd->dev->adev->dev;
1918 
1919 		kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1920 
1921 		r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1922 							      &pdd->qpd);
1923 		if (r) {
1924 			dev_err(dev, "Failed to restore process queues\n");
1925 			if (!ret)
1926 				ret = r;
1927 		}
1928 	}
1929 
1930 	return ret;
1931 }
1932 
1933 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
1934 {
1935 	int i;
1936 
1937 	for (i = 0; i < p->n_pdds; i++)
1938 		if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id)
1939 			return i;
1940 	return -EINVAL;
1941 }
1942 
1943 int
1944 kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
1945 			    uint32_t *gpuid, uint32_t *gpuidx)
1946 {
1947 	int i;
1948 
1949 	for (i = 0; i < p->n_pdds; i++)
1950 		if (p->pdds[i] && p->pdds[i]->dev == node) {
1951 			*gpuid = p->pdds[i]->user_gpu_id;
1952 			*gpuidx = i;
1953 			return 0;
1954 		}
1955 	return -EINVAL;
1956 }
1957 
1958 static int signal_eviction_fence(struct kfd_process *p)
1959 {
1960 	struct dma_fence *ef;
1961 	int ret;
1962 
1963 	rcu_read_lock();
1964 	ef = dma_fence_get_rcu_safe(&p->ef);
1965 	rcu_read_unlock();
1966 	if (!ef)
1967 		return -EINVAL;
1968 
1969 	ret = dma_fence_signal(ef);
1970 	dma_fence_put(ef);
1971 
1972 	return ret;
1973 }
1974 
1975 static void evict_process_worker(struct work_struct *work)
1976 {
1977 	int ret;
1978 	struct kfd_process *p;
1979 	struct delayed_work *dwork;
1980 
1981 	dwork = to_delayed_work(work);
1982 
1983 	/* Process termination destroys this worker thread. So during the
1984 	 * lifetime of this thread, kfd_process p will be valid
1985 	 */
1986 	p = container_of(dwork, struct kfd_process, eviction_work);
1987 
1988 	pr_debug("Started evicting process pid %d\n", p->lead_thread->pid);
1989 	ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
1990 	if (!ret) {
1991 		/* If another thread already signaled the eviction fence,
1992 		 * they are responsible stopping the queues and scheduling
1993 		 * the restore work.
1994 		 */
1995 		if (signal_eviction_fence(p) ||
1996 		    mod_delayed_work(kfd_restore_wq, &p->restore_work,
1997 				     msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
1998 			kfd_process_restore_queues(p);
1999 
2000 		pr_debug("Finished evicting process pid %d\n", p->lead_thread->pid);
2001 	} else
2002 		pr_err("Failed to evict queues of process pid %d\n", p->lead_thread->pid);
2003 }
2004 
2005 static int restore_process_helper(struct kfd_process *p)
2006 {
2007 	int ret = 0;
2008 
2009 	/* VMs may not have been acquired yet during debugging. */
2010 	if (p->kgd_process_info) {
2011 		ret = amdgpu_amdkfd_gpuvm_restore_process_bos(
2012 			p->kgd_process_info, &p->ef);
2013 		if (ret)
2014 			return ret;
2015 	}
2016 
2017 	ret = kfd_process_restore_queues(p);
2018 	if (!ret)
2019 		pr_debug("Finished restoring process pid %d\n",
2020 			p->lead_thread->pid);
2021 	else
2022 		pr_err("Failed to restore queues of process pid %d\n",
2023 		      p->lead_thread->pid);
2024 
2025 	return ret;
2026 }
2027 
2028 static void restore_process_worker(struct work_struct *work)
2029 {
2030 	struct delayed_work *dwork;
2031 	struct kfd_process *p;
2032 	int ret = 0;
2033 
2034 	dwork = to_delayed_work(work);
2035 
2036 	/* Process termination destroys this worker thread. So during the
2037 	 * lifetime of this thread, kfd_process p will be valid
2038 	 */
2039 	p = container_of(dwork, struct kfd_process, restore_work);
2040 	pr_debug("Started restoring process pasid %d\n", (int)p->lead_thread->pid);
2041 
2042 	/* Setting last_restore_timestamp before successful restoration.
2043 	 * Otherwise this would have to be set by KGD (restore_process_bos)
2044 	 * before KFD BOs are unreserved. If not, the process can be evicted
2045 	 * again before the timestamp is set.
2046 	 * If restore fails, the timestamp will be set again in the next
2047 	 * attempt. This would mean that the minimum GPU quanta would be
2048 	 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
2049 	 * functions)
2050 	 */
2051 
2052 	p->last_restore_timestamp = get_jiffies_64();
2053 
2054 	ret = restore_process_helper(p);
2055 	if (ret) {
2056 		pr_debug("Failed to restore BOs of process pid %d, retry after %d ms\n",
2057 			 p->lead_thread->pid, PROCESS_BACK_OFF_TIME_MS);
2058 		if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
2059 				     msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
2060 			kfd_process_restore_queues(p);
2061 	}
2062 }
2063 
2064 void kfd_suspend_all_processes(void)
2065 {
2066 	struct kfd_process *p;
2067 	unsigned int temp;
2068 	int idx = srcu_read_lock(&kfd_processes_srcu);
2069 
2070 	WARN(debug_evictions, "Evicting all processes");
2071 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2072 		if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
2073 			pr_err("Failed to suspend process pid %d\n", p->lead_thread->pid);
2074 		signal_eviction_fence(p);
2075 	}
2076 	srcu_read_unlock(&kfd_processes_srcu, idx);
2077 }
2078 
2079 int kfd_resume_all_processes(void)
2080 {
2081 	struct kfd_process *p;
2082 	unsigned int temp;
2083 	int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
2084 
2085 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2086 		if (restore_process_helper(p)) {
2087 			pr_err("Restore process pid %d failed during resume\n",
2088 			      p->lead_thread->pid);
2089 			ret = -EFAULT;
2090 		}
2091 	}
2092 	srcu_read_unlock(&kfd_processes_srcu, idx);
2093 	return ret;
2094 }
2095 
2096 int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
2097 			  struct vm_area_struct *vma)
2098 {
2099 	struct kfd_process_device *pdd;
2100 	struct qcm_process_device *qpd;
2101 
2102 	if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
2103 		dev_err(dev->adev->dev, "Incorrect CWSR mapping size.\n");
2104 		return -EINVAL;
2105 	}
2106 
2107 	pdd = kfd_get_process_device_data(dev, process);
2108 	if (!pdd)
2109 		return -EINVAL;
2110 	qpd = &pdd->qpd;
2111 
2112 	qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2113 					get_order(KFD_CWSR_TBA_TMA_SIZE));
2114 	if (!qpd->cwsr_kaddr) {
2115 		dev_err(dev->adev->dev,
2116 			"Error allocating per process CWSR buffer.\n");
2117 		return -ENOMEM;
2118 	}
2119 
2120 	vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
2121 		| VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
2122 	/* Mapping pages to user process */
2123 	return remap_pfn_range(vma, vma->vm_start,
2124 			       PFN_DOWN(__pa(qpd->cwsr_kaddr)),
2125 			       KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
2126 }
2127 
2128 /* assumes caller holds process lock. */
2129 int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
2130 {
2131 	uint32_t irq_drain_fence[8];
2132 	uint8_t node_id = 0;
2133 	int r = 0;
2134 
2135 	if (!KFD_IS_SOC15(pdd->dev))
2136 		return 0;
2137 
2138 	pdd->process->irq_drain_is_open = true;
2139 
2140 	memset(irq_drain_fence, 0, sizeof(irq_drain_fence));
2141 	irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) |
2142 							KFD_IRQ_FENCE_CLIENTID;
2143 	irq_drain_fence[3] = pdd->pasid;
2144 
2145 	/*
2146 	 * For GFX 9.4.3/9.5.0, send the NodeId also in IH cookie DW[3]
2147 	 */
2148 	if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3) ||
2149 	    KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 4) ||
2150 	    KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 5, 0)) {
2151 		node_id = ffs(pdd->dev->interrupt_bitmap) - 1;
2152 		irq_drain_fence[3] |= node_id << 16;
2153 	}
2154 
2155 	/* ensure stale irqs scheduled KFD interrupts and send drain fence. */
2156 	if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev,
2157 						     irq_drain_fence)) {
2158 		pdd->process->irq_drain_is_open = false;
2159 		return 0;
2160 	}
2161 
2162 	r = wait_event_interruptible(pdd->process->wait_irq_drain,
2163 				     !READ_ONCE(pdd->process->irq_drain_is_open));
2164 	if (r)
2165 		pdd->process->irq_drain_is_open = false;
2166 
2167 	return r;
2168 }
2169 
2170 void kfd_process_close_interrupt_drain(unsigned int pasid)
2171 {
2172 	struct kfd_process *p;
2173 
2174 	p = kfd_lookup_process_by_pasid(pasid, NULL);
2175 
2176 	if (!p)
2177 		return;
2178 
2179 	WRITE_ONCE(p->irq_drain_is_open, false);
2180 	wake_up_all(&p->wait_irq_drain);
2181 	kfd_unref_process(p);
2182 }
2183 
2184 struct send_exception_work_handler_workarea {
2185 	struct work_struct work;
2186 	struct kfd_process *p;
2187 	unsigned int queue_id;
2188 	uint64_t error_reason;
2189 };
2190 
2191 static void send_exception_work_handler(struct work_struct *work)
2192 {
2193 	struct send_exception_work_handler_workarea *workarea;
2194 	struct kfd_process *p;
2195 	struct queue *q;
2196 	struct mm_struct *mm;
2197 	struct kfd_context_save_area_header __user *csa_header;
2198 	uint64_t __user *err_payload_ptr;
2199 	uint64_t cur_err;
2200 	uint32_t ev_id;
2201 
2202 	workarea = container_of(work,
2203 				struct send_exception_work_handler_workarea,
2204 				work);
2205 	p = workarea->p;
2206 
2207 	mm = get_task_mm(p->lead_thread);
2208 
2209 	if (!mm)
2210 		return;
2211 
2212 	kthread_use_mm(mm);
2213 
2214 	q = pqm_get_user_queue(&p->pqm, workarea->queue_id);
2215 
2216 	if (!q)
2217 		goto out;
2218 
2219 	csa_header = (void __user *)q->properties.ctx_save_restore_area_address;
2220 
2221 	get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr);
2222 	get_user(cur_err, err_payload_ptr);
2223 	cur_err |= workarea->error_reason;
2224 	put_user(cur_err, err_payload_ptr);
2225 	get_user(ev_id, &csa_header->err_event_id);
2226 
2227 	kfd_set_event(p, ev_id);
2228 
2229 out:
2230 	kthread_unuse_mm(mm);
2231 	mmput(mm);
2232 }
2233 
2234 int kfd_send_exception_to_runtime(struct kfd_process *p,
2235 			unsigned int queue_id,
2236 			uint64_t error_reason)
2237 {
2238 	struct send_exception_work_handler_workarea worker;
2239 
2240 	INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler);
2241 
2242 	worker.p = p;
2243 	worker.queue_id = queue_id;
2244 	worker.error_reason = error_reason;
2245 
2246 	schedule_work(&worker.work);
2247 	flush_work(&worker.work);
2248 	destroy_work_on_stack(&worker.work);
2249 
2250 	return 0;
2251 }
2252 
2253 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
2254 {
2255 	int i;
2256 
2257 	if (gpu_id) {
2258 		for (i = 0; i < p->n_pdds; i++) {
2259 			struct kfd_process_device *pdd = p->pdds[i];
2260 
2261 			if (pdd->user_gpu_id == gpu_id)
2262 				return pdd;
2263 		}
2264 	}
2265 	return NULL;
2266 }
2267 
2268 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id)
2269 {
2270 	int i;
2271 
2272 	if (!actual_gpu_id)
2273 		return 0;
2274 
2275 	for (i = 0; i < p->n_pdds; i++) {
2276 		struct kfd_process_device *pdd = p->pdds[i];
2277 
2278 		if (pdd->dev->id == actual_gpu_id)
2279 			return pdd->user_gpu_id;
2280 	}
2281 	return -EINVAL;
2282 }
2283 
2284 #if defined(CONFIG_DEBUG_FS)
2285 
2286 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
2287 {
2288 	struct kfd_process *p;
2289 	unsigned int temp;
2290 	int r = 0;
2291 
2292 	int idx = srcu_read_lock(&kfd_processes_srcu);
2293 
2294 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2295 		seq_printf(m, "Process %d PASID %d:\n",
2296 			   p->lead_thread->tgid, p->lead_thread->pid);
2297 
2298 		mutex_lock(&p->mutex);
2299 		r = pqm_debugfs_mqds(m, &p->pqm);
2300 		mutex_unlock(&p->mutex);
2301 
2302 		if (r)
2303 			break;
2304 	}
2305 
2306 	srcu_read_unlock(&kfd_processes_srcu, idx);
2307 
2308 	return r;
2309 }
2310 
2311 #endif
2312