xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c (revision 1697398555f69b31e939e070b304292513d4c9ff)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drm_auth.h>
26 #include <drm/drm_exec.h>
27 #include <linux/pm_runtime.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_userq.h"
32 #include "amdgpu_userq_fence.h"
33 
34 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
35 {
36 	int i;
37 	u32 userq_ip_mask = 0;
38 
39 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
40 		if (adev->userq_funcs[i])
41 			userq_ip_mask |= (1 << i);
42 	}
43 
44 	return userq_ip_mask;
45 }
46 
47 static int
48 amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
49 			  struct amdgpu_usermode_queue *queue)
50 {
51 	struct amdgpu_device *adev = uq_mgr->adev;
52 	const struct amdgpu_userq_funcs *userq_funcs =
53 		adev->userq_funcs[queue->queue_type];
54 	int r = 0;
55 
56 	if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
57 		r = userq_funcs->unmap(uq_mgr, queue);
58 		if (r)
59 			queue->state = AMDGPU_USERQ_STATE_HUNG;
60 		else
61 			queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
62 	}
63 	return r;
64 }
65 
66 static int
67 amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
68 			struct amdgpu_usermode_queue *queue)
69 {
70 	struct amdgpu_device *adev = uq_mgr->adev;
71 	const struct amdgpu_userq_funcs *userq_funcs =
72 		adev->userq_funcs[queue->queue_type];
73 	int r = 0;
74 
75 	if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
76 		r = userq_funcs->map(uq_mgr, queue);
77 		if (r) {
78 			queue->state = AMDGPU_USERQ_STATE_HUNG;
79 		} else {
80 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
81 		}
82 	}
83 	return r;
84 }
85 
86 static void
87 amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
88 				 struct amdgpu_usermode_queue *queue)
89 {
90 	struct dma_fence *f = queue->last_fence;
91 	int ret;
92 
93 	if (f && !dma_fence_is_signaled(f)) {
94 		ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
95 		if (ret <= 0)
96 			drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
97 				     f->context, f->seqno);
98 	}
99 }
100 
101 static void
102 amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
103 		     struct amdgpu_usermode_queue *queue,
104 		     int queue_id)
105 {
106 	struct amdgpu_device *adev = uq_mgr->adev;
107 	const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
108 
109 	uq_funcs->mqd_destroy(uq_mgr, queue);
110 	amdgpu_userq_fence_driver_free(queue);
111 	idr_remove(&uq_mgr->userq_idr, queue_id);
112 	kfree(queue);
113 }
114 
115 int
116 amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr)
117 {
118 	struct amdgpu_usermode_queue *queue;
119 	int queue_id;
120 	int ret = 0;
121 
122 	mutex_lock(&uq_mgr->userq_mutex);
123 	/* Resume all the queues for this process */
124 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
125 		ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
126 
127 	mutex_unlock(&uq_mgr->userq_mutex);
128 	return ret;
129 }
130 
131 static struct amdgpu_usermode_queue *
132 amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
133 {
134 	return idr_find(&uq_mgr->userq_idr, qid);
135 }
136 
137 void
138 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
139 			     struct amdgpu_eviction_fence_mgr *evf_mgr)
140 {
141 	struct amdgpu_eviction_fence *ev_fence;
142 
143 retry:
144 	/* Flush any pending resume work to create ev_fence */
145 	flush_delayed_work(&uq_mgr->resume_work);
146 
147 	mutex_lock(&uq_mgr->userq_mutex);
148 	spin_lock(&evf_mgr->ev_fence_lock);
149 	ev_fence = evf_mgr->ev_fence;
150 	spin_unlock(&evf_mgr->ev_fence_lock);
151 	if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
152 		mutex_unlock(&uq_mgr->userq_mutex);
153 		/*
154 		 * Looks like there was no pending resume work,
155 		 * add one now to create a valid eviction fence
156 		 */
157 		schedule_delayed_work(&uq_mgr->resume_work, 0);
158 		goto retry;
159 	}
160 }
161 
162 int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
163 			       struct amdgpu_userq_obj *userq_obj,
164 			       int size)
165 {
166 	struct amdgpu_device *adev = uq_mgr->adev;
167 	struct amdgpu_bo_param bp;
168 	int r;
169 
170 	memset(&bp, 0, sizeof(bp));
171 	bp.byte_align = PAGE_SIZE;
172 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
173 	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
174 		   AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
175 	bp.type = ttm_bo_type_kernel;
176 	bp.size = size;
177 	bp.resv = NULL;
178 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
179 
180 	r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
181 	if (r) {
182 		drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
183 		return r;
184 	}
185 
186 	r = amdgpu_bo_reserve(userq_obj->obj, true);
187 	if (r) {
188 		drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
189 		goto free_obj;
190 	}
191 
192 	r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
193 	if (r) {
194 		drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
195 		goto unresv;
196 	}
197 
198 	r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
199 	if (r) {
200 		drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
201 		goto unresv;
202 	}
203 
204 	userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
205 	amdgpu_bo_unreserve(userq_obj->obj);
206 	memset(userq_obj->cpu_ptr, 0, size);
207 	return 0;
208 
209 unresv:
210 	amdgpu_bo_unreserve(userq_obj->obj);
211 
212 free_obj:
213 	amdgpu_bo_unref(&userq_obj->obj);
214 	return r;
215 }
216 
217 void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
218 				 struct amdgpu_userq_obj *userq_obj)
219 {
220 	amdgpu_bo_kunmap(userq_obj->obj);
221 	amdgpu_bo_unref(&userq_obj->obj);
222 }
223 
224 uint64_t
225 amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
226 				struct amdgpu_db_info *db_info,
227 				struct drm_file *filp)
228 {
229 	uint64_t index;
230 	struct drm_gem_object *gobj;
231 	struct amdgpu_userq_obj *db_obj = db_info->db_obj;
232 	int r, db_size;
233 
234 	gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
235 	if (gobj == NULL) {
236 		drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
237 		return -EINVAL;
238 	}
239 
240 	db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
241 	drm_gem_object_put(gobj);
242 
243 	r = amdgpu_bo_reserve(db_obj->obj, true);
244 	if (r) {
245 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
246 		goto unref_bo;
247 	}
248 
249 	/* Pin the BO before generating the index, unpin in queue destroy */
250 	r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
251 	if (r) {
252 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
253 		goto unresv_bo;
254 	}
255 
256 	switch (db_info->queue_type) {
257 	case AMDGPU_HW_IP_GFX:
258 	case AMDGPU_HW_IP_COMPUTE:
259 	case AMDGPU_HW_IP_DMA:
260 		db_size = sizeof(u64);
261 		break;
262 
263 	case AMDGPU_HW_IP_VCN_ENC:
264 		db_size = sizeof(u32);
265 		db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
266 		break;
267 
268 	case AMDGPU_HW_IP_VPE:
269 		db_size = sizeof(u32);
270 		db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
271 		break;
272 
273 	default:
274 		drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
275 			     db_info->queue_type);
276 		r = -EINVAL;
277 		goto unpin_bo;
278 	}
279 
280 	index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
281 					     db_info->doorbell_offset, db_size);
282 	drm_dbg_driver(adev_to_drm(uq_mgr->adev),
283 		       "[Usermode queues] doorbell index=%lld\n", index);
284 	amdgpu_bo_unreserve(db_obj->obj);
285 	return index;
286 
287 unpin_bo:
288 	amdgpu_bo_unpin(db_obj->obj);
289 unresv_bo:
290 	amdgpu_bo_unreserve(db_obj->obj);
291 unref_bo:
292 	amdgpu_bo_unref(&db_obj->obj);
293 	return r;
294 }
295 
296 static int
297 amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
298 {
299 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
300 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
301 	struct amdgpu_device *adev = uq_mgr->adev;
302 	struct amdgpu_usermode_queue *queue;
303 	int r = 0;
304 
305 	cancel_delayed_work_sync(&uq_mgr->resume_work);
306 	mutex_lock(&uq_mgr->userq_mutex);
307 
308 	queue = amdgpu_userq_find(uq_mgr, queue_id);
309 	if (!queue) {
310 		drm_dbg_driver(adev_to_drm(uq_mgr->adev), "Invalid queue id to destroy\n");
311 		mutex_unlock(&uq_mgr->userq_mutex);
312 		return -EINVAL;
313 	}
314 	amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
315 	r = amdgpu_bo_reserve(queue->db_obj.obj, true);
316 	if (!r) {
317 		amdgpu_bo_unpin(queue->db_obj.obj);
318 		amdgpu_bo_unreserve(queue->db_obj.obj);
319 	}
320 	amdgpu_bo_unref(&queue->db_obj.obj);
321 
322 #if defined(CONFIG_DEBUG_FS)
323 	debugfs_remove_recursive(queue->debugfs_queue);
324 #endif
325 	r = amdgpu_userq_unmap_helper(uq_mgr, queue);
326 	amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
327 	mutex_unlock(&uq_mgr->userq_mutex);
328 
329 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
330 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
331 
332 	return r;
333 }
334 
335 static int amdgpu_userq_priority_permit(struct drm_file *filp,
336 					int priority)
337 {
338 	if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
339 		return 0;
340 
341 	if (capable(CAP_SYS_NICE))
342 		return 0;
343 
344 	if (drm_is_current_master(filp))
345 		return 0;
346 
347 	return -EACCES;
348 }
349 
350 #if defined(CONFIG_DEBUG_FS)
351 static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
352 {
353 	struct amdgpu_usermode_queue *queue = m->private;
354 	struct amdgpu_bo *bo;
355 	int r;
356 
357 	if (!queue || !queue->mqd.obj)
358 		return -EINVAL;
359 
360 	bo = amdgpu_bo_ref(queue->mqd.obj);
361 	r = amdgpu_bo_reserve(bo, true);
362 	if (r) {
363 		amdgpu_bo_unref(&bo);
364 		return -EINVAL;
365 	}
366 
367 	seq_printf(m, "queue_type: %d\n", queue->queue_type);
368 	seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
369 
370 	amdgpu_bo_unreserve(bo);
371 	amdgpu_bo_unref(&bo);
372 
373 	return 0;
374 }
375 
376 static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
377 {
378 	return single_open(file, amdgpu_mqd_info_read, inode->i_private);
379 }
380 
381 static const struct file_operations amdgpu_mqd_info_fops = {
382 	.owner = THIS_MODULE,
383 	.open = amdgpu_mqd_info_open,
384 	.read = seq_read,
385 	.llseek = seq_lseek,
386 	.release = single_release,
387 };
388 #endif
389 
390 static int
391 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
392 {
393 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
394 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
395 	struct amdgpu_device *adev = uq_mgr->adev;
396 	const struct amdgpu_userq_funcs *uq_funcs;
397 	struct amdgpu_usermode_queue *queue;
398 	struct amdgpu_db_info db_info;
399 	char *queue_name;
400 	bool skip_map_queue;
401 	uint64_t index;
402 	int qid, r = 0;
403 	int priority =
404 		(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
405 		AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
406 
407 	r = amdgpu_userq_priority_permit(filp, priority);
408 	if (r)
409 		return r;
410 
411 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
412 	if (r < 0) {
413 		drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
414 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
415 		return r;
416 	}
417 
418 	/*
419 	 * There could be a situation that we are creating a new queue while
420 	 * the other queues under this UQ_mgr are suspended. So if there is any
421 	 * resume work pending, wait for it to get done.
422 	 *
423 	 * This will also make sure we have a valid eviction fence ready to be used.
424 	 */
425 	mutex_lock(&adev->userq_mutex);
426 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
427 
428 	uq_funcs = adev->userq_funcs[args->in.ip_type];
429 	if (!uq_funcs) {
430 		drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
431 			     args->in.ip_type);
432 		r = -EINVAL;
433 		goto unlock;
434 	}
435 
436 	queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
437 	if (!queue) {
438 		drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
439 		r = -ENOMEM;
440 		goto unlock;
441 	}
442 	queue->doorbell_handle = args->in.doorbell_handle;
443 	queue->queue_type = args->in.ip_type;
444 	queue->vm = &fpriv->vm;
445 	queue->priority = priority;
446 
447 	db_info.queue_type = queue->queue_type;
448 	db_info.doorbell_handle = queue->doorbell_handle;
449 	db_info.db_obj = &queue->db_obj;
450 	db_info.doorbell_offset = args->in.doorbell_offset;
451 
452 	/* Convert relative doorbell offset into absolute doorbell index */
453 	index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
454 	if (index == (uint64_t)-EINVAL) {
455 		drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
456 		kfree(queue);
457 		r = -EINVAL;
458 		goto unlock;
459 	}
460 
461 	queue->doorbell_index = index;
462 	xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
463 	r = amdgpu_userq_fence_driver_alloc(adev, queue);
464 	if (r) {
465 		drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
466 		goto unlock;
467 	}
468 
469 	r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
470 	if (r) {
471 		drm_file_err(uq_mgr->file, "Failed to create Queue\n");
472 		amdgpu_userq_fence_driver_free(queue);
473 		kfree(queue);
474 		goto unlock;
475 	}
476 
477 
478 	qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
479 	if (qid < 0) {
480 		drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
481 		amdgpu_userq_fence_driver_free(queue);
482 		uq_funcs->mqd_destroy(uq_mgr, queue);
483 		kfree(queue);
484 		r = -ENOMEM;
485 		goto unlock;
486 	}
487 
488 	/* don't map the queue if scheduling is halted */
489 	if (adev->userq_halt_for_enforce_isolation &&
490 	    ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
491 	     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
492 		skip_map_queue = true;
493 	else
494 		skip_map_queue = false;
495 	if (!skip_map_queue) {
496 		r = amdgpu_userq_map_helper(uq_mgr, queue);
497 		if (r) {
498 			drm_file_err(uq_mgr->file, "Failed to map Queue\n");
499 			idr_remove(&uq_mgr->userq_idr, qid);
500 			amdgpu_userq_fence_driver_free(queue);
501 			uq_funcs->mqd_destroy(uq_mgr, queue);
502 			kfree(queue);
503 			goto unlock;
504 		}
505 	}
506 
507 	queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid);
508 	if (!queue_name) {
509 		r = -ENOMEM;
510 		goto unlock;
511 	}
512 
513 #if defined(CONFIG_DEBUG_FS)
514 	/* Queue dentry per client to hold MQD information   */
515 	queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client);
516 	debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
517 #endif
518 	kfree(queue_name);
519 
520 	args->out.queue_id = qid;
521 
522 unlock:
523 	mutex_unlock(&uq_mgr->userq_mutex);
524 	mutex_unlock(&adev->userq_mutex);
525 
526 	return r;
527 }
528 
529 static int amdgpu_userq_input_args_validate(struct drm_device *dev,
530 					union drm_amdgpu_userq *args,
531 					struct drm_file *filp)
532 {
533 	struct amdgpu_device *adev = drm_to_adev(dev);
534 
535 	switch (args->in.op) {
536 	case AMDGPU_USERQ_OP_CREATE:
537 		if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
538 				       AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
539 			return -EINVAL;
540 		/* Usermode queues are only supported for GFX IP as of now */
541 		if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
542 		    args->in.ip_type != AMDGPU_HW_IP_DMA &&
543 		    args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
544 			drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
545 				     args->in.ip_type);
546 			return -EINVAL;
547 		}
548 
549 		if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
550 		    (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
551 		    (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
552 		    !amdgpu_is_tmz(adev)) {
553 			drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
554 			return -EINVAL;
555 		}
556 
557 		if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
558 		    args->in.queue_va == 0 ||
559 		    args->in.queue_size == 0) {
560 			drm_file_err(filp, "invalidate userq queue va or size\n");
561 			return -EINVAL;
562 		}
563 		if (!args->in.wptr_va || !args->in.rptr_va) {
564 			drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
565 			return -EINVAL;
566 		}
567 		break;
568 	case AMDGPU_USERQ_OP_FREE:
569 		if (args->in.ip_type ||
570 		    args->in.doorbell_handle ||
571 		    args->in.doorbell_offset ||
572 		    args->in.flags ||
573 		    args->in.queue_va ||
574 		    args->in.queue_size ||
575 		    args->in.rptr_va ||
576 		    args->in.wptr_va ||
577 		    args->in.mqd ||
578 		    args->in.mqd_size)
579 			return -EINVAL;
580 		break;
581 	default:
582 		return -EINVAL;
583 	}
584 
585 	return 0;
586 }
587 
588 int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
589 		       struct drm_file *filp)
590 {
591 	union drm_amdgpu_userq *args = data;
592 	int r;
593 
594 	if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
595 		return -EINVAL;
596 
597 	switch (args->in.op) {
598 	case AMDGPU_USERQ_OP_CREATE:
599 		r = amdgpu_userq_create(filp, args);
600 		if (r)
601 			drm_file_err(filp, "Failed to create usermode queue\n");
602 		break;
603 
604 	case AMDGPU_USERQ_OP_FREE:
605 		r = amdgpu_userq_destroy(filp, args->in.queue_id);
606 		if (r)
607 			drm_file_err(filp, "Failed to destroy usermode queue\n");
608 		break;
609 
610 	default:
611 		drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
612 		return -EINVAL;
613 	}
614 
615 	return r;
616 }
617 
618 static int
619 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
620 {
621 	struct amdgpu_usermode_queue *queue;
622 	int queue_id;
623 	int ret = 0, r;
624 
625 	/* Resume all the queues for this process */
626 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
627 		r = amdgpu_userq_map_helper(uq_mgr, queue);
628 		if (r)
629 			ret = r;
630 	}
631 
632 	if (ret)
633 		drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
634 	return ret;
635 }
636 
637 static int
638 amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
639 {
640 	struct ttm_operation_ctx ctx = { false, false };
641 	int ret;
642 
643 	amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
644 
645 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
646 	if (ret)
647 		DRM_ERROR("Fail to validate\n");
648 
649 	return ret;
650 }
651 
652 static int
653 amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
654 {
655 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
656 	struct amdgpu_vm *vm = &fpriv->vm;
657 	struct amdgpu_device *adev = uq_mgr->adev;
658 	struct amdgpu_bo_va *bo_va;
659 	struct ww_acquire_ctx *ticket;
660 	struct drm_exec exec;
661 	struct amdgpu_bo *bo;
662 	struct dma_resv *resv;
663 	bool clear, unlock;
664 	int ret = 0;
665 
666 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
667 	drm_exec_until_all_locked(&exec) {
668 		ret = amdgpu_vm_lock_pd(vm, &exec, 2);
669 		drm_exec_retry_on_contention(&exec);
670 		if (unlikely(ret)) {
671 			drm_file_err(uq_mgr->file, "Failed to lock PD\n");
672 			goto unlock_all;
673 		}
674 
675 		/* Lock the done list */
676 		list_for_each_entry(bo_va, &vm->done, base.vm_status) {
677 			bo = bo_va->base.bo;
678 			if (!bo)
679 				continue;
680 
681 			ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
682 			drm_exec_retry_on_contention(&exec);
683 			if (unlikely(ret))
684 				goto unlock_all;
685 		}
686 	}
687 
688 	spin_lock(&vm->status_lock);
689 	while (!list_empty(&vm->moved)) {
690 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
691 					 base.vm_status);
692 		spin_unlock(&vm->status_lock);
693 
694 		/* Per VM BOs never need to bo cleared in the page tables */
695 		ret = amdgpu_vm_bo_update(adev, bo_va, false);
696 		if (ret)
697 			goto unlock_all;
698 		spin_lock(&vm->status_lock);
699 	}
700 
701 	ticket = &exec.ticket;
702 	while (!list_empty(&vm->invalidated)) {
703 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
704 					 base.vm_status);
705 		resv = bo_va->base.bo->tbo.base.resv;
706 		spin_unlock(&vm->status_lock);
707 
708 		bo = bo_va->base.bo;
709 		ret = amdgpu_userq_validate_vm_bo(NULL, bo);
710 		if (ret) {
711 			drm_file_err(uq_mgr->file, "Failed to validate BO\n");
712 			goto unlock_all;
713 		}
714 
715 		/* Try to reserve the BO to avoid clearing its ptes */
716 		if (!adev->debug_vm && dma_resv_trylock(resv)) {
717 			clear = false;
718 			unlock = true;
719 		/* The caller is already holding the reservation lock */
720 		} else if (dma_resv_locking_ctx(resv) == ticket) {
721 			clear = false;
722 			unlock = false;
723 		/* Somebody else is using the BO right now */
724 		} else {
725 			clear = true;
726 			unlock = false;
727 		}
728 
729 		ret = amdgpu_vm_bo_update(adev, bo_va, clear);
730 
731 		if (unlock)
732 			dma_resv_unlock(resv);
733 		if (ret)
734 			goto unlock_all;
735 
736 		spin_lock(&vm->status_lock);
737 	}
738 	spin_unlock(&vm->status_lock);
739 
740 	ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
741 	if (ret)
742 		drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
743 
744 unlock_all:
745 	drm_exec_fini(&exec);
746 	return ret;
747 }
748 
749 static void amdgpu_userq_restore_worker(struct work_struct *work)
750 {
751 	struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
752 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
753 	int ret;
754 
755 	flush_delayed_work(&fpriv->evf_mgr.suspend_work);
756 
757 	mutex_lock(&uq_mgr->userq_mutex);
758 
759 	ret = amdgpu_userq_validate_bos(uq_mgr);
760 	if (ret) {
761 		drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
762 		goto unlock;
763 	}
764 
765 	ret = amdgpu_userq_restore_all(uq_mgr);
766 	if (ret) {
767 		drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
768 		goto unlock;
769 	}
770 
771 unlock:
772 	mutex_unlock(&uq_mgr->userq_mutex);
773 }
774 
775 static int
776 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
777 {
778 	struct amdgpu_usermode_queue *queue;
779 	int queue_id;
780 	int ret = 0, r;
781 
782 	/* Try to unmap all the queues in this process ctx */
783 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
784 		r = amdgpu_userq_unmap_helper(uq_mgr, queue);
785 		if (r)
786 			ret = r;
787 	}
788 
789 	if (ret)
790 		drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
791 	return ret;
792 }
793 
794 static int
795 amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
796 {
797 	struct amdgpu_usermode_queue *queue;
798 	int queue_id, ret;
799 
800 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
801 		struct dma_fence *f = queue->last_fence;
802 
803 		if (!f || dma_fence_is_signaled(f))
804 			continue;
805 		ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
806 		if (ret <= 0) {
807 			drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
808 				     f->context, f->seqno);
809 			return -ETIMEDOUT;
810 		}
811 	}
812 
813 	return 0;
814 }
815 
816 void
817 amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
818 		   struct amdgpu_eviction_fence *ev_fence)
819 {
820 	int ret;
821 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
822 	struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
823 
824 	/* Wait for any pending userqueue fence work to finish */
825 	ret = amdgpu_userq_wait_for_signal(uq_mgr);
826 	if (ret) {
827 		drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n");
828 		return;
829 	}
830 
831 	ret = amdgpu_userq_evict_all(uq_mgr);
832 	if (ret) {
833 		drm_file_err(uq_mgr->file, "Failed to evict userqueue\n");
834 		return;
835 	}
836 
837 	/* Signal current eviction fence */
838 	amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
839 
840 	if (evf_mgr->fd_closing) {
841 		cancel_delayed_work_sync(&uq_mgr->resume_work);
842 		return;
843 	}
844 
845 	/* Schedule a resume work */
846 	schedule_delayed_work(&uq_mgr->resume_work, 0);
847 }
848 
849 int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
850 			  struct amdgpu_device *adev)
851 {
852 	mutex_init(&userq_mgr->userq_mutex);
853 	idr_init_base(&userq_mgr->userq_idr, 1);
854 	userq_mgr->adev = adev;
855 	userq_mgr->file = file_priv;
856 
857 	mutex_lock(&adev->userq_mutex);
858 	list_add(&userq_mgr->list, &adev->userq_mgr_list);
859 	mutex_unlock(&adev->userq_mutex);
860 
861 	INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
862 	return 0;
863 }
864 
865 void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
866 {
867 	struct amdgpu_device *adev = userq_mgr->adev;
868 	struct amdgpu_usermode_queue *queue;
869 	struct amdgpu_userq_mgr *uqm, *tmp;
870 	uint32_t queue_id;
871 
872 	cancel_delayed_work_sync(&userq_mgr->resume_work);
873 
874 	mutex_lock(&adev->userq_mutex);
875 	mutex_lock(&userq_mgr->userq_mutex);
876 	idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
877 		amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
878 		amdgpu_userq_unmap_helper(userq_mgr, queue);
879 		amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
880 	}
881 
882 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
883 		if (uqm == userq_mgr) {
884 			list_del(&uqm->list);
885 			break;
886 		}
887 	}
888 	idr_destroy(&userq_mgr->userq_idr);
889 	mutex_unlock(&userq_mgr->userq_mutex);
890 	mutex_unlock(&adev->userq_mutex);
891 	mutex_destroy(&userq_mgr->userq_mutex);
892 }
893 
894 int amdgpu_userq_suspend(struct amdgpu_device *adev)
895 {
896 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
897 	struct amdgpu_usermode_queue *queue;
898 	struct amdgpu_userq_mgr *uqm, *tmp;
899 	int queue_id;
900 	int ret = 0, r;
901 
902 	if (!ip_mask)
903 		return 0;
904 
905 	mutex_lock(&adev->userq_mutex);
906 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
907 		cancel_delayed_work_sync(&uqm->resume_work);
908 		mutex_lock(&uqm->userq_mutex);
909 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
910 			r = amdgpu_userq_unmap_helper(uqm, queue);
911 			if (r)
912 				ret = r;
913 		}
914 		mutex_unlock(&uqm->userq_mutex);
915 	}
916 	mutex_unlock(&adev->userq_mutex);
917 	return ret;
918 }
919 
920 int amdgpu_userq_resume(struct amdgpu_device *adev)
921 {
922 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
923 	struct amdgpu_usermode_queue *queue;
924 	struct amdgpu_userq_mgr *uqm, *tmp;
925 	int queue_id;
926 	int ret = 0, r;
927 
928 	if (!ip_mask)
929 		return 0;
930 
931 	mutex_lock(&adev->userq_mutex);
932 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
933 		mutex_lock(&uqm->userq_mutex);
934 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
935 			r = amdgpu_userq_map_helper(uqm, queue);
936 			if (r)
937 				ret = r;
938 		}
939 		mutex_unlock(&uqm->userq_mutex);
940 	}
941 	mutex_unlock(&adev->userq_mutex);
942 	return ret;
943 }
944 
945 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
946 						  u32 idx)
947 {
948 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
949 	struct amdgpu_usermode_queue *queue;
950 	struct amdgpu_userq_mgr *uqm, *tmp;
951 	int queue_id;
952 	int ret = 0, r;
953 
954 	/* only need to stop gfx/compute */
955 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
956 		return 0;
957 
958 	mutex_lock(&adev->userq_mutex);
959 	if (adev->userq_halt_for_enforce_isolation)
960 		dev_warn(adev->dev, "userq scheduling already stopped!\n");
961 	adev->userq_halt_for_enforce_isolation = true;
962 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
963 		cancel_delayed_work_sync(&uqm->resume_work);
964 		mutex_lock(&uqm->userq_mutex);
965 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
966 			if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
967 			     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
968 			    (queue->xcp_id == idx)) {
969 				r = amdgpu_userq_unmap_helper(uqm, queue);
970 				if (r)
971 					ret = r;
972 			}
973 		}
974 		mutex_unlock(&uqm->userq_mutex);
975 	}
976 	mutex_unlock(&adev->userq_mutex);
977 	return ret;
978 }
979 
980 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
981 						   u32 idx)
982 {
983 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
984 	struct amdgpu_usermode_queue *queue;
985 	struct amdgpu_userq_mgr *uqm, *tmp;
986 	int queue_id;
987 	int ret = 0, r;
988 
989 	/* only need to stop gfx/compute */
990 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
991 		return 0;
992 
993 	mutex_lock(&adev->userq_mutex);
994 	if (!adev->userq_halt_for_enforce_isolation)
995 		dev_warn(adev->dev, "userq scheduling already started!\n");
996 	adev->userq_halt_for_enforce_isolation = false;
997 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
998 		mutex_lock(&uqm->userq_mutex);
999 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
1000 			if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
1001 			     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
1002 			    (queue->xcp_id == idx)) {
1003 				r = amdgpu_userq_map_helper(uqm, queue);
1004 				if (r)
1005 					ret = r;
1006 			}
1007 		}
1008 		mutex_unlock(&uqm->userq_mutex);
1009 	}
1010 	mutex_unlock(&adev->userq_mutex);
1011 	return ret;
1012 }
1013