xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c (revision 18ee2b9b7bd4e2346e467101c973d62300c8ba85)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drm_auth.h>
26 #include <drm/drm_exec.h>
27 #include <linux/pm_runtime.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_userq.h"
32 #include "amdgpu_userq_fence.h"
33 
amdgpu_userq_get_supported_ip_mask(struct amdgpu_device * adev)34 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
35 {
36 	int i;
37 	u32 userq_ip_mask = 0;
38 
39 	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
40 		if (adev->userq_funcs[i])
41 			userq_ip_mask |= (1 << i);
42 	}
43 
44 	return userq_ip_mask;
45 }
46 
47 static int
amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue)48 amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
49 			  struct amdgpu_usermode_queue *queue)
50 {
51 	struct amdgpu_device *adev = uq_mgr->adev;
52 	const struct amdgpu_userq_funcs *userq_funcs =
53 		adev->userq_funcs[queue->queue_type];
54 	int r = 0;
55 
56 	if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
57 		r = userq_funcs->unmap(uq_mgr, queue);
58 		if (r)
59 			queue->state = AMDGPU_USERQ_STATE_HUNG;
60 		else
61 			queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
62 	}
63 	return r;
64 }
65 
66 static int
amdgpu_userq_map_helper(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue)67 amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
68 			struct amdgpu_usermode_queue *queue)
69 {
70 	struct amdgpu_device *adev = uq_mgr->adev;
71 	const struct amdgpu_userq_funcs *userq_funcs =
72 		adev->userq_funcs[queue->queue_type];
73 	int r = 0;
74 
75 	if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
76 		r = userq_funcs->map(uq_mgr, queue);
77 		if (r) {
78 			queue->state = AMDGPU_USERQ_STATE_HUNG;
79 		} else {
80 			queue->state = AMDGPU_USERQ_STATE_MAPPED;
81 		}
82 	}
83 	return r;
84 }
85 
86 static void
amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue)87 amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
88 				 struct amdgpu_usermode_queue *queue)
89 {
90 	struct dma_fence *f = queue->last_fence;
91 	int ret;
92 
93 	if (f && !dma_fence_is_signaled(f)) {
94 		ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
95 		if (ret <= 0)
96 			drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
97 				     f->context, f->seqno);
98 	}
99 }
100 
101 static void
amdgpu_userq_cleanup(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue,int queue_id)102 amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
103 		     struct amdgpu_usermode_queue *queue,
104 		     int queue_id)
105 {
106 	struct amdgpu_device *adev = uq_mgr->adev;
107 	const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
108 
109 	uq_funcs->mqd_destroy(uq_mgr, queue);
110 	amdgpu_userq_fence_driver_free(queue);
111 	idr_remove(&uq_mgr->userq_idr, queue_id);
112 	kfree(queue);
113 }
114 
115 int
amdgpu_userq_active(struct amdgpu_userq_mgr * uq_mgr)116 amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr)
117 {
118 	struct amdgpu_usermode_queue *queue;
119 	int queue_id;
120 	int ret = 0;
121 
122 	mutex_lock(&uq_mgr->userq_mutex);
123 	/* Resume all the queues for this process */
124 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
125 		ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
126 
127 	mutex_unlock(&uq_mgr->userq_mutex);
128 	return ret;
129 }
130 
131 static struct amdgpu_usermode_queue *
amdgpu_userq_find(struct amdgpu_userq_mgr * uq_mgr,int qid)132 amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
133 {
134 	return idr_find(&uq_mgr->userq_idr, qid);
135 }
136 
137 void
amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_eviction_fence_mgr * evf_mgr)138 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
139 			     struct amdgpu_eviction_fence_mgr *evf_mgr)
140 {
141 	struct amdgpu_eviction_fence *ev_fence;
142 
143 retry:
144 	/* Flush any pending resume work to create ev_fence */
145 	flush_delayed_work(&uq_mgr->resume_work);
146 
147 	mutex_lock(&uq_mgr->userq_mutex);
148 	spin_lock(&evf_mgr->ev_fence_lock);
149 	ev_fence = evf_mgr->ev_fence;
150 	spin_unlock(&evf_mgr->ev_fence_lock);
151 	if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
152 		mutex_unlock(&uq_mgr->userq_mutex);
153 		/*
154 		 * Looks like there was no pending resume work,
155 		 * add one now to create a valid eviction fence
156 		 */
157 		schedule_delayed_work(&uq_mgr->resume_work, 0);
158 		goto retry;
159 	}
160 }
161 
amdgpu_userq_create_object(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_userq_obj * userq_obj,int size)162 int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
163 			       struct amdgpu_userq_obj *userq_obj,
164 			       int size)
165 {
166 	struct amdgpu_device *adev = uq_mgr->adev;
167 	struct amdgpu_bo_param bp;
168 	int r;
169 
170 	memset(&bp, 0, sizeof(bp));
171 	bp.byte_align = PAGE_SIZE;
172 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
173 	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
174 		   AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
175 	bp.type = ttm_bo_type_kernel;
176 	bp.size = size;
177 	bp.resv = NULL;
178 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
179 
180 	r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
181 	if (r) {
182 		drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
183 		return r;
184 	}
185 
186 	r = amdgpu_bo_reserve(userq_obj->obj, true);
187 	if (r) {
188 		drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
189 		goto free_obj;
190 	}
191 
192 	r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
193 	if (r) {
194 		drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
195 		goto unresv;
196 	}
197 
198 	r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
199 	if (r) {
200 		drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
201 		goto unresv;
202 	}
203 
204 	userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
205 	amdgpu_bo_unreserve(userq_obj->obj);
206 	memset(userq_obj->cpu_ptr, 0, size);
207 	return 0;
208 
209 unresv:
210 	amdgpu_bo_unreserve(userq_obj->obj);
211 
212 free_obj:
213 	amdgpu_bo_unref(&userq_obj->obj);
214 	return r;
215 }
216 
amdgpu_userq_destroy_object(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_userq_obj * userq_obj)217 void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
218 				 struct amdgpu_userq_obj *userq_obj)
219 {
220 	amdgpu_bo_kunmap(userq_obj->obj);
221 	amdgpu_bo_unref(&userq_obj->obj);
222 }
223 
224 uint64_t
amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_db_info * db_info,struct drm_file * filp)225 amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
226 				struct amdgpu_db_info *db_info,
227 				struct drm_file *filp)
228 {
229 	uint64_t index;
230 	struct drm_gem_object *gobj;
231 	struct amdgpu_userq_obj *db_obj = db_info->db_obj;
232 	int r, db_size;
233 
234 	gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
235 	if (gobj == NULL) {
236 		drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
237 		return -EINVAL;
238 	}
239 
240 	db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
241 	drm_gem_object_put(gobj);
242 
243 	r = amdgpu_bo_reserve(db_obj->obj, true);
244 	if (r) {
245 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
246 		goto unref_bo;
247 	}
248 
249 	/* Pin the BO before generating the index, unpin in queue destroy */
250 	r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
251 	if (r) {
252 		drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
253 		goto unresv_bo;
254 	}
255 
256 	switch (db_info->queue_type) {
257 	case AMDGPU_HW_IP_GFX:
258 	case AMDGPU_HW_IP_COMPUTE:
259 	case AMDGPU_HW_IP_DMA:
260 		db_size = sizeof(u64);
261 		break;
262 
263 	case AMDGPU_HW_IP_VCN_ENC:
264 		db_size = sizeof(u32);
265 		db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
266 		break;
267 
268 	case AMDGPU_HW_IP_VPE:
269 		db_size = sizeof(u32);
270 		db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
271 		break;
272 
273 	default:
274 		drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
275 			     db_info->queue_type);
276 		r = -EINVAL;
277 		goto unpin_bo;
278 	}
279 
280 	index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
281 					     db_info->doorbell_offset, db_size);
282 	drm_dbg_driver(adev_to_drm(uq_mgr->adev),
283 		       "[Usermode queues] doorbell index=%lld\n", index);
284 	amdgpu_bo_unreserve(db_obj->obj);
285 	return index;
286 
287 unpin_bo:
288 	amdgpu_bo_unpin(db_obj->obj);
289 unresv_bo:
290 	amdgpu_bo_unreserve(db_obj->obj);
291 unref_bo:
292 	amdgpu_bo_unref(&db_obj->obj);
293 	return r;
294 }
295 
296 static int
amdgpu_userq_destroy(struct drm_file * filp,int queue_id)297 amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
298 {
299 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
300 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
301 	struct amdgpu_device *adev = uq_mgr->adev;
302 	struct amdgpu_usermode_queue *queue;
303 	int r = 0;
304 
305 	cancel_delayed_work_sync(&uq_mgr->resume_work);
306 	mutex_lock(&uq_mgr->userq_mutex);
307 
308 	queue = amdgpu_userq_find(uq_mgr, queue_id);
309 	if (!queue) {
310 		drm_dbg_driver(adev_to_drm(uq_mgr->adev), "Invalid queue id to destroy\n");
311 		mutex_unlock(&uq_mgr->userq_mutex);
312 		return -EINVAL;
313 	}
314 	amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
315 	r = amdgpu_bo_reserve(queue->db_obj.obj, true);
316 	if (!r) {
317 		amdgpu_bo_unpin(queue->db_obj.obj);
318 		amdgpu_bo_unreserve(queue->db_obj.obj);
319 	}
320 	amdgpu_bo_unref(&queue->db_obj.obj);
321 
322 #if defined(CONFIG_DEBUG_FS)
323 	debugfs_remove_recursive(queue->debugfs_queue);
324 #endif
325 	r = amdgpu_userq_unmap_helper(uq_mgr, queue);
326 	amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
327 	mutex_unlock(&uq_mgr->userq_mutex);
328 
329 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
330 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
331 
332 	return r;
333 }
334 
amdgpu_userq_priority_permit(struct drm_file * filp,int priority)335 static int amdgpu_userq_priority_permit(struct drm_file *filp,
336 					int priority)
337 {
338 	if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
339 		return 0;
340 
341 	if (capable(CAP_SYS_NICE))
342 		return 0;
343 
344 	if (drm_is_current_master(filp))
345 		return 0;
346 
347 	return -EACCES;
348 }
349 
350 #if defined(CONFIG_DEBUG_FS)
amdgpu_mqd_info_read(struct seq_file * m,void * unused)351 static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
352 {
353 	struct amdgpu_usermode_queue *queue = m->private;
354 	struct amdgpu_bo *bo;
355 	int r;
356 
357 	if (!queue || !queue->mqd.obj)
358 		return -EINVAL;
359 
360 	bo = amdgpu_bo_ref(queue->mqd.obj);
361 	r = amdgpu_bo_reserve(bo, true);
362 	if (r) {
363 		amdgpu_bo_unref(&bo);
364 		return -EINVAL;
365 	}
366 
367 	seq_printf(m, "queue_type %d\n", queue->queue_type);
368 	seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
369 
370 	amdgpu_bo_unreserve(bo);
371 	amdgpu_bo_unref(&bo);
372 
373 	return 0;
374 }
375 
amdgpu_mqd_info_open(struct inode * inode,struct file * file)376 static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
377 {
378 	return single_open(file, amdgpu_mqd_info_read, inode->i_private);
379 }
380 
381 static const struct file_operations amdgpu_mqd_info_fops = {
382 	.owner = THIS_MODULE,
383 	.open = amdgpu_mqd_info_open,
384 	.read = seq_read,
385 	.llseek = seq_lseek,
386 	.release = single_release,
387 };
388 #endif
389 
390 static int
amdgpu_userq_create(struct drm_file * filp,union drm_amdgpu_userq * args)391 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
392 {
393 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
394 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
395 	struct amdgpu_device *adev = uq_mgr->adev;
396 	const struct amdgpu_userq_funcs *uq_funcs;
397 	struct amdgpu_usermode_queue *queue;
398 	struct amdgpu_db_info db_info;
399 	char *queue_name;
400 	bool skip_map_queue;
401 	uint64_t index;
402 	int qid, r = 0;
403 	int priority =
404 		(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
405 		AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
406 
407 	/* Usermode queues are only supported for GFX IP as of now */
408 	if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
409 	    args->in.ip_type != AMDGPU_HW_IP_DMA &&
410 	    args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
411 		drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n",
412 			     args->in.ip_type);
413 		return -EINVAL;
414 	}
415 
416 	r = amdgpu_userq_priority_permit(filp, priority);
417 	if (r)
418 		return r;
419 
420 	if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
421 	    (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
422 	    (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
423 	    !amdgpu_is_tmz(adev)) {
424 		drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n");
425 		return -EINVAL;
426 	}
427 
428 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
429 	if (r < 0) {
430 		drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
431 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
432 		return r;
433 	}
434 
435 	/*
436 	 * There could be a situation that we are creating a new queue while
437 	 * the other queues under this UQ_mgr are suspended. So if there is any
438 	 * resume work pending, wait for it to get done.
439 	 *
440 	 * This will also make sure we have a valid eviction fence ready to be used.
441 	 */
442 	mutex_lock(&adev->userq_mutex);
443 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
444 
445 	uq_funcs = adev->userq_funcs[args->in.ip_type];
446 	if (!uq_funcs) {
447 		drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
448 			     args->in.ip_type);
449 		r = -EINVAL;
450 		goto unlock;
451 	}
452 
453 	queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
454 	if (!queue) {
455 		drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
456 		r = -ENOMEM;
457 		goto unlock;
458 	}
459 	queue->doorbell_handle = args->in.doorbell_handle;
460 	queue->queue_type = args->in.ip_type;
461 	queue->vm = &fpriv->vm;
462 	queue->priority = priority;
463 
464 	db_info.queue_type = queue->queue_type;
465 	db_info.doorbell_handle = queue->doorbell_handle;
466 	db_info.db_obj = &queue->db_obj;
467 	db_info.doorbell_offset = args->in.doorbell_offset;
468 
469 	/* Convert relative doorbell offset into absolute doorbell index */
470 	index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
471 	if (index == (uint64_t)-EINVAL) {
472 		drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
473 		kfree(queue);
474 		r = -EINVAL;
475 		goto unlock;
476 	}
477 
478 	queue->doorbell_index = index;
479 	xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
480 	r = amdgpu_userq_fence_driver_alloc(adev, queue);
481 	if (r) {
482 		drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
483 		goto unlock;
484 	}
485 
486 	r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
487 	if (r) {
488 		drm_file_err(uq_mgr->file, "Failed to create Queue\n");
489 		amdgpu_userq_fence_driver_free(queue);
490 		kfree(queue);
491 		goto unlock;
492 	}
493 
494 
495 	qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
496 	if (qid < 0) {
497 		drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
498 		amdgpu_userq_fence_driver_free(queue);
499 		uq_funcs->mqd_destroy(uq_mgr, queue);
500 		kfree(queue);
501 		r = -ENOMEM;
502 		goto unlock;
503 	}
504 
505 	/* don't map the queue if scheduling is halted */
506 	if (adev->userq_halt_for_enforce_isolation &&
507 	    ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
508 	     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
509 		skip_map_queue = true;
510 	else
511 		skip_map_queue = false;
512 	if (!skip_map_queue) {
513 		r = amdgpu_userq_map_helper(uq_mgr, queue);
514 		if (r) {
515 			drm_file_err(uq_mgr->file, "Failed to map Queue\n");
516 			idr_remove(&uq_mgr->userq_idr, qid);
517 			amdgpu_userq_fence_driver_free(queue);
518 			uq_funcs->mqd_destroy(uq_mgr, queue);
519 			kfree(queue);
520 			goto unlock;
521 		}
522 	}
523 
524 	queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid);
525 	if (!queue_name) {
526 		r = -ENOMEM;
527 		goto unlock;
528 	}
529 
530 #if defined(CONFIG_DEBUG_FS)
531 	/* Queue dentry per client to hold MQD information   */
532 	queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client);
533 	debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
534 #endif
535 	kfree(queue_name);
536 
537 	args->out.queue_id = qid;
538 
539 unlock:
540 	mutex_unlock(&uq_mgr->userq_mutex);
541 	mutex_unlock(&adev->userq_mutex);
542 
543 	return r;
544 }
545 
amdgpu_userq_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)546 int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
547 		       struct drm_file *filp)
548 {
549 	union drm_amdgpu_userq *args = data;
550 	int r;
551 
552 	switch (args->in.op) {
553 	case AMDGPU_USERQ_OP_CREATE:
554 		if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
555 				       AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
556 			return -EINVAL;
557 		r = amdgpu_userq_create(filp, args);
558 		if (r)
559 			drm_file_err(filp, "Failed to create usermode queue\n");
560 		break;
561 
562 	case AMDGPU_USERQ_OP_FREE:
563 		if (args->in.ip_type ||
564 		    args->in.doorbell_handle ||
565 		    args->in.doorbell_offset ||
566 		    args->in.flags ||
567 		    args->in.queue_va ||
568 		    args->in.queue_size ||
569 		    args->in.rptr_va ||
570 		    args->in.wptr_va ||
571 		    args->in.wptr_va ||
572 		    args->in.mqd ||
573 		    args->in.mqd_size)
574 			return -EINVAL;
575 		r = amdgpu_userq_destroy(filp, args->in.queue_id);
576 		if (r)
577 			drm_file_err(filp, "Failed to destroy usermode queue\n");
578 		break;
579 
580 	default:
581 		drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
582 		return -EINVAL;
583 	}
584 
585 	return r;
586 }
587 
588 static int
amdgpu_userq_restore_all(struct amdgpu_userq_mgr * uq_mgr)589 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
590 {
591 	struct amdgpu_usermode_queue *queue;
592 	int queue_id;
593 	int ret = 0, r;
594 
595 	/* Resume all the queues for this process */
596 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
597 		r = amdgpu_userq_map_helper(uq_mgr, queue);
598 		if (r)
599 			ret = r;
600 	}
601 
602 	if (ret)
603 		drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
604 	return ret;
605 }
606 
607 static int
amdgpu_userq_validate_vm_bo(void * _unused,struct amdgpu_bo * bo)608 amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
609 {
610 	struct ttm_operation_ctx ctx = { false, false };
611 	int ret;
612 
613 	amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
614 
615 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
616 	if (ret)
617 		DRM_ERROR("Fail to validate\n");
618 
619 	return ret;
620 }
621 
622 static int
amdgpu_userq_validate_bos(struct amdgpu_userq_mgr * uq_mgr)623 amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
624 {
625 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
626 	struct amdgpu_vm *vm = &fpriv->vm;
627 	struct amdgpu_device *adev = uq_mgr->adev;
628 	struct amdgpu_bo_va *bo_va;
629 	struct ww_acquire_ctx *ticket;
630 	struct drm_exec exec;
631 	struct amdgpu_bo *bo;
632 	struct dma_resv *resv;
633 	bool clear, unlock;
634 	int ret = 0;
635 
636 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
637 	drm_exec_until_all_locked(&exec) {
638 		ret = amdgpu_vm_lock_pd(vm, &exec, 2);
639 		drm_exec_retry_on_contention(&exec);
640 		if (unlikely(ret)) {
641 			drm_file_err(uq_mgr->file, "Failed to lock PD\n");
642 			goto unlock_all;
643 		}
644 
645 		/* Lock the done list */
646 		list_for_each_entry(bo_va, &vm->done, base.vm_status) {
647 			bo = bo_va->base.bo;
648 			if (!bo)
649 				continue;
650 
651 			ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
652 			drm_exec_retry_on_contention(&exec);
653 			if (unlikely(ret))
654 				goto unlock_all;
655 		}
656 	}
657 
658 	spin_lock(&vm->status_lock);
659 	while (!list_empty(&vm->moved)) {
660 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
661 					 base.vm_status);
662 		spin_unlock(&vm->status_lock);
663 
664 		/* Per VM BOs never need to bo cleared in the page tables */
665 		ret = amdgpu_vm_bo_update(adev, bo_va, false);
666 		if (ret)
667 			goto unlock_all;
668 		spin_lock(&vm->status_lock);
669 	}
670 
671 	ticket = &exec.ticket;
672 	while (!list_empty(&vm->invalidated)) {
673 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
674 					 base.vm_status);
675 		resv = bo_va->base.bo->tbo.base.resv;
676 		spin_unlock(&vm->status_lock);
677 
678 		bo = bo_va->base.bo;
679 		ret = amdgpu_userq_validate_vm_bo(NULL, bo);
680 		if (ret) {
681 			drm_file_err(uq_mgr->file, "Failed to validate BO\n");
682 			goto unlock_all;
683 		}
684 
685 		/* Try to reserve the BO to avoid clearing its ptes */
686 		if (!adev->debug_vm && dma_resv_trylock(resv)) {
687 			clear = false;
688 			unlock = true;
689 		/* The caller is already holding the reservation lock */
690 		} else if (dma_resv_locking_ctx(resv) == ticket) {
691 			clear = false;
692 			unlock = false;
693 		/* Somebody else is using the BO right now */
694 		} else {
695 			clear = true;
696 			unlock = false;
697 		}
698 
699 		ret = amdgpu_vm_bo_update(adev, bo_va, clear);
700 
701 		if (unlock)
702 			dma_resv_unlock(resv);
703 		if (ret)
704 			goto unlock_all;
705 
706 		spin_lock(&vm->status_lock);
707 	}
708 	spin_unlock(&vm->status_lock);
709 
710 	ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
711 	if (ret)
712 		drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
713 
714 unlock_all:
715 	drm_exec_fini(&exec);
716 	return ret;
717 }
718 
amdgpu_userq_restore_worker(struct work_struct * work)719 static void amdgpu_userq_restore_worker(struct work_struct *work)
720 {
721 	struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
722 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
723 	int ret;
724 
725 	flush_delayed_work(&fpriv->evf_mgr.suspend_work);
726 
727 	mutex_lock(&uq_mgr->userq_mutex);
728 
729 	ret = amdgpu_userq_validate_bos(uq_mgr);
730 	if (ret) {
731 		drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
732 		goto unlock;
733 	}
734 
735 	ret = amdgpu_userq_restore_all(uq_mgr);
736 	if (ret) {
737 		drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
738 		goto unlock;
739 	}
740 
741 unlock:
742 	mutex_unlock(&uq_mgr->userq_mutex);
743 }
744 
745 static int
amdgpu_userq_evict_all(struct amdgpu_userq_mgr * uq_mgr)746 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
747 {
748 	struct amdgpu_usermode_queue *queue;
749 	int queue_id;
750 	int ret = 0, r;
751 
752 	/* Try to unmap all the queues in this process ctx */
753 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
754 		r = amdgpu_userq_unmap_helper(uq_mgr, queue);
755 		if (r)
756 			ret = r;
757 	}
758 
759 	if (ret)
760 		drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
761 	return ret;
762 }
763 
764 static int
amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr * uq_mgr)765 amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
766 {
767 	struct amdgpu_usermode_queue *queue;
768 	int queue_id, ret;
769 
770 	idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
771 		struct dma_fence *f = queue->last_fence;
772 
773 		if (!f || dma_fence_is_signaled(f))
774 			continue;
775 		ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
776 		if (ret <= 0) {
777 			drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
778 				     f->context, f->seqno);
779 			return -ETIMEDOUT;
780 		}
781 	}
782 
783 	return 0;
784 }
785 
786 void
amdgpu_userq_evict(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_eviction_fence * ev_fence)787 amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
788 		   struct amdgpu_eviction_fence *ev_fence)
789 {
790 	int ret;
791 	struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
792 	struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
793 
794 	/* Wait for any pending userqueue fence work to finish */
795 	ret = amdgpu_userq_wait_for_signal(uq_mgr);
796 	if (ret) {
797 		drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n");
798 		return;
799 	}
800 
801 	ret = amdgpu_userq_evict_all(uq_mgr);
802 	if (ret) {
803 		drm_file_err(uq_mgr->file, "Failed to evict userqueue\n");
804 		return;
805 	}
806 
807 	/* Signal current eviction fence */
808 	amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
809 
810 	if (evf_mgr->fd_closing) {
811 		cancel_delayed_work_sync(&uq_mgr->resume_work);
812 		return;
813 	}
814 
815 	/* Schedule a resume work */
816 	schedule_delayed_work(&uq_mgr->resume_work, 0);
817 }
818 
amdgpu_userq_mgr_init(struct amdgpu_userq_mgr * userq_mgr,struct drm_file * file_priv,struct amdgpu_device * adev)819 int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
820 			  struct amdgpu_device *adev)
821 {
822 	mutex_init(&userq_mgr->userq_mutex);
823 	idr_init_base(&userq_mgr->userq_idr, 1);
824 	userq_mgr->adev = adev;
825 	userq_mgr->file = file_priv;
826 
827 	mutex_lock(&adev->userq_mutex);
828 	list_add(&userq_mgr->list, &adev->userq_mgr_list);
829 	mutex_unlock(&adev->userq_mutex);
830 
831 	INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
832 	return 0;
833 }
834 
amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr * userq_mgr)835 void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
836 {
837 	struct amdgpu_device *adev = userq_mgr->adev;
838 	struct amdgpu_usermode_queue *queue;
839 	struct amdgpu_userq_mgr *uqm, *tmp;
840 	uint32_t queue_id;
841 
842 	cancel_delayed_work_sync(&userq_mgr->resume_work);
843 
844 	mutex_lock(&adev->userq_mutex);
845 	mutex_lock(&userq_mgr->userq_mutex);
846 	idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
847 		amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
848 		amdgpu_userq_unmap_helper(userq_mgr, queue);
849 		amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
850 	}
851 
852 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
853 		if (uqm == userq_mgr) {
854 			list_del(&uqm->list);
855 			break;
856 		}
857 	}
858 	idr_destroy(&userq_mgr->userq_idr);
859 	mutex_unlock(&userq_mgr->userq_mutex);
860 	mutex_unlock(&adev->userq_mutex);
861 	mutex_destroy(&userq_mgr->userq_mutex);
862 }
863 
amdgpu_userq_suspend(struct amdgpu_device * adev)864 int amdgpu_userq_suspend(struct amdgpu_device *adev)
865 {
866 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
867 	struct amdgpu_usermode_queue *queue;
868 	struct amdgpu_userq_mgr *uqm, *tmp;
869 	int queue_id;
870 	int ret = 0, r;
871 
872 	if (!ip_mask)
873 		return 0;
874 
875 	mutex_lock(&adev->userq_mutex);
876 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
877 		cancel_delayed_work_sync(&uqm->resume_work);
878 		mutex_lock(&uqm->userq_mutex);
879 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
880 			r = amdgpu_userq_unmap_helper(uqm, queue);
881 			if (r)
882 				ret = r;
883 		}
884 		mutex_unlock(&uqm->userq_mutex);
885 	}
886 	mutex_unlock(&adev->userq_mutex);
887 	return ret;
888 }
889 
amdgpu_userq_resume(struct amdgpu_device * adev)890 int amdgpu_userq_resume(struct amdgpu_device *adev)
891 {
892 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
893 	struct amdgpu_usermode_queue *queue;
894 	struct amdgpu_userq_mgr *uqm, *tmp;
895 	int queue_id;
896 	int ret = 0, r;
897 
898 	if (!ip_mask)
899 		return 0;
900 
901 	mutex_lock(&adev->userq_mutex);
902 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
903 		mutex_lock(&uqm->userq_mutex);
904 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
905 			r = amdgpu_userq_map_helper(uqm, queue);
906 			if (r)
907 				ret = r;
908 		}
909 		mutex_unlock(&uqm->userq_mutex);
910 	}
911 	mutex_unlock(&adev->userq_mutex);
912 	return ret;
913 }
914 
amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device * adev,u32 idx)915 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
916 						  u32 idx)
917 {
918 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
919 	struct amdgpu_usermode_queue *queue;
920 	struct amdgpu_userq_mgr *uqm, *tmp;
921 	int queue_id;
922 	int ret = 0, r;
923 
924 	/* only need to stop gfx/compute */
925 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
926 		return 0;
927 
928 	mutex_lock(&adev->userq_mutex);
929 	if (adev->userq_halt_for_enforce_isolation)
930 		dev_warn(adev->dev, "userq scheduling already stopped!\n");
931 	adev->userq_halt_for_enforce_isolation = true;
932 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
933 		cancel_delayed_work_sync(&uqm->resume_work);
934 		mutex_lock(&uqm->userq_mutex);
935 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
936 			if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
937 			     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
938 			    (queue->xcp_id == idx)) {
939 				r = amdgpu_userq_unmap_helper(uqm, queue);
940 				if (r)
941 					ret = r;
942 			}
943 		}
944 		mutex_unlock(&uqm->userq_mutex);
945 	}
946 	mutex_unlock(&adev->userq_mutex);
947 	return ret;
948 }
949 
amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device * adev,u32 idx)950 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
951 						   u32 idx)
952 {
953 	u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
954 	struct amdgpu_usermode_queue *queue;
955 	struct amdgpu_userq_mgr *uqm, *tmp;
956 	int queue_id;
957 	int ret = 0, r;
958 
959 	/* only need to stop gfx/compute */
960 	if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
961 		return 0;
962 
963 	mutex_lock(&adev->userq_mutex);
964 	if (!adev->userq_halt_for_enforce_isolation)
965 		dev_warn(adev->dev, "userq scheduling already started!\n");
966 	adev->userq_halt_for_enforce_isolation = false;
967 	list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
968 		mutex_lock(&uqm->userq_mutex);
969 		idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
970 			if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
971 			     (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
972 			    (queue->xcp_id == idx)) {
973 				r = amdgpu_userq_map_helper(uqm, queue);
974 				if (r)
975 					ret = r;
976 			}
977 		}
978 		mutex_unlock(&uqm->userq_mutex);
979 	}
980 	mutex_unlock(&adev->userq_mutex);
981 	return ret;
982 }
983