1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2023 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include <drm/drm_auth.h>
26 #include <drm/drm_exec.h>
27 #include <linux/pm_runtime.h>
28 #include <drm/drm_drv.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_reset.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_userq.h"
34 #include "amdgpu_hmm.h"
35 #include "amdgpu_userq_fence.h"
36
amdgpu_userq_get_supported_ip_mask(struct amdgpu_device * adev)37 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
38 {
39 int i;
40 u32 userq_ip_mask = 0;
41
42 for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
43 if (adev->userq_funcs[i])
44 userq_ip_mask |= (1 << i);
45 }
46
47 return userq_ip_mask;
48 }
49
amdgpu_userq_is_reset_type_supported(struct amdgpu_device * adev,enum amdgpu_ring_type ring_type,int reset_type)50 static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev,
51 enum amdgpu_ring_type ring_type, int reset_type)
52 {
53
54 if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX)
55 return false;
56
57 switch (ring_type) {
58 case AMDGPU_RING_TYPE_GFX:
59 if (adev->gfx.gfx_supported_reset & reset_type)
60 return true;
61 break;
62 case AMDGPU_RING_TYPE_COMPUTE:
63 if (adev->gfx.compute_supported_reset & reset_type)
64 return true;
65 break;
66 case AMDGPU_RING_TYPE_SDMA:
67 if (adev->sdma.supported_reset & reset_type)
68 return true;
69 break;
70 case AMDGPU_RING_TYPE_VCN_DEC:
71 case AMDGPU_RING_TYPE_VCN_ENC:
72 if (adev->vcn.supported_reset & reset_type)
73 return true;
74 break;
75 case AMDGPU_RING_TYPE_VCN_JPEG:
76 if (adev->jpeg.supported_reset & reset_type)
77 return true;
78 break;
79 default:
80 break;
81 }
82 return false;
83 }
84
amdgpu_userq_gpu_reset(struct amdgpu_device * adev)85 static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev)
86 {
87 if (amdgpu_device_should_recover_gpu(adev)) {
88 amdgpu_reset_domain_schedule(adev->reset_domain,
89 &adev->userq_reset_work);
90 /* Wait for the reset job to complete */
91 flush_work(&adev->userq_reset_work);
92 }
93 }
94
95 static int
amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr * uq_mgr)96 amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr)
97 {
98 struct amdgpu_device *adev = uq_mgr->adev;
99 const int queue_types[] = {
100 AMDGPU_RING_TYPE_COMPUTE,
101 AMDGPU_RING_TYPE_GFX,
102 AMDGPU_RING_TYPE_SDMA
103 };
104 const int num_queue_types = ARRAY_SIZE(queue_types);
105 bool gpu_reset = false;
106 int r = 0;
107 int i;
108
109 /* Warning if current process mutex is not held */
110 WARN_ON(!mutex_is_locked(&uq_mgr->userq_mutex));
111
112 if (unlikely(adev->debug_disable_gpu_ring_reset)) {
113 dev_err(adev->dev, "userq reset disabled by debug mask\n");
114 return 0;
115 }
116
117 /*
118 * If GPU recovery feature is disabled system-wide,
119 * skip all reset detection logic
120 */
121 if (!amdgpu_gpu_recovery)
122 return 0;
123
124 /*
125 * Iterate through all queue types to detect and reset problematic queues
126 * Process each queue type in the defined order
127 */
128 for (i = 0; i < num_queue_types; i++) {
129 int ring_type = queue_types[i];
130 const struct amdgpu_userq_funcs *funcs = adev->userq_funcs[ring_type];
131
132 if (!amdgpu_userq_is_reset_type_supported(adev, ring_type, AMDGPU_RESET_TYPE_PER_QUEUE))
133 continue;
134
135 if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 &&
136 funcs && funcs->detect_and_reset) {
137 r = funcs->detect_and_reset(adev, ring_type);
138 if (r) {
139 gpu_reset = true;
140 break;
141 }
142 }
143 }
144
145 if (gpu_reset)
146 amdgpu_userq_gpu_reset(adev);
147
148 return r;
149 }
150
amdgpu_userq_hang_detect_work(struct work_struct * work)151 static void amdgpu_userq_hang_detect_work(struct work_struct *work)
152 {
153 struct amdgpu_usermode_queue *queue = container_of(work,
154 struct amdgpu_usermode_queue,
155 hang_detect_work.work);
156 struct dma_fence *fence;
157 struct amdgpu_userq_mgr *uq_mgr;
158
159 if (!queue->userq_mgr)
160 return;
161
162 uq_mgr = queue->userq_mgr;
163 fence = READ_ONCE(queue->hang_detect_fence);
164 /* Fence already signaled – no action needed */
165 if (!fence || dma_fence_is_signaled(fence))
166 return;
167
168 mutex_lock(&uq_mgr->userq_mutex);
169 amdgpu_userq_detect_and_reset_queues(uq_mgr);
170 mutex_unlock(&uq_mgr->userq_mutex);
171 }
172
173 /*
174 * Start hang detection for a user queue fence. A delayed work will be scheduled
175 * to check if the fence is still pending after the timeout period.
176 */
amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue * queue)177 void amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue *queue)
178 {
179 struct amdgpu_device *adev;
180 unsigned long timeout_ms;
181
182 if (!queue || !queue->userq_mgr || !queue->userq_mgr->adev)
183 return;
184
185 adev = queue->userq_mgr->adev;
186 /* Determine timeout based on queue type */
187 switch (queue->queue_type) {
188 case AMDGPU_RING_TYPE_GFX:
189 timeout_ms = adev->gfx_timeout;
190 break;
191 case AMDGPU_RING_TYPE_COMPUTE:
192 timeout_ms = adev->compute_timeout;
193 break;
194 case AMDGPU_RING_TYPE_SDMA:
195 timeout_ms = adev->sdma_timeout;
196 break;
197 default:
198 timeout_ms = adev->gfx_timeout;
199 break;
200 }
201
202 /* Store the fence to monitor and schedule hang detection */
203 WRITE_ONCE(queue->hang_detect_fence, queue->last_fence);
204 schedule_delayed_work(&queue->hang_detect_work,
205 msecs_to_jiffies(timeout_ms));
206 }
207
amdgpu_userq_process_fence_irq(struct amdgpu_device * adev,u32 doorbell)208 void amdgpu_userq_process_fence_irq(struct amdgpu_device *adev, u32 doorbell)
209 {
210 struct xarray *xa = &adev->userq_doorbell_xa;
211 struct amdgpu_usermode_queue *queue;
212 unsigned long flags;
213
214 xa_lock_irqsave(xa, flags);
215 queue = xa_load(xa, doorbell);
216 if (queue)
217 amdgpu_userq_fence_driver_process(queue->fence_drv);
218 xa_unlock_irqrestore(xa, flags);
219 }
220
amdgpu_userq_init_hang_detect_work(struct amdgpu_usermode_queue * queue)221 static void amdgpu_userq_init_hang_detect_work(struct amdgpu_usermode_queue *queue)
222 {
223 INIT_DELAYED_WORK(&queue->hang_detect_work, amdgpu_userq_hang_detect_work);
224 queue->hang_detect_fence = NULL;
225 }
226
amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue * queue,struct amdgpu_bo_va_mapping * va_map,u64 addr)227 static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue,
228 struct amdgpu_bo_va_mapping *va_map, u64 addr)
229 {
230 struct amdgpu_userq_va_cursor *va_cursor;
231 struct userq_va_list;
232
233 va_cursor = kzalloc_obj(*va_cursor);
234 if (!va_cursor)
235 return -ENOMEM;
236
237 INIT_LIST_HEAD(&va_cursor->list);
238 va_cursor->gpu_addr = addr;
239 atomic_set(&va_map->bo_va->userq_va_mapped, 1);
240 list_add(&va_cursor->list, &queue->userq_va_list);
241
242 return 0;
243 }
244
amdgpu_userq_input_va_validate(struct amdgpu_device * adev,struct amdgpu_usermode_queue * queue,u64 addr,u64 expected_size)245 int amdgpu_userq_input_va_validate(struct amdgpu_device *adev,
246 struct amdgpu_usermode_queue *queue,
247 u64 addr, u64 expected_size)
248 {
249 struct amdgpu_bo_va_mapping *va_map;
250 struct amdgpu_vm *vm = queue->vm;
251 u64 user_addr;
252 u64 size;
253 int r = 0;
254
255 /* Caller must hold vm->root.bo reservation */
256 dma_resv_assert_held(queue->vm->root.bo->tbo.base.resv);
257
258 user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
259 size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
260
261 va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
262 if (!va_map) {
263 r = -EINVAL;
264 goto out_err;
265 }
266 /* Only validate the userq whether resident in the VM mapping range */
267 if (user_addr >= va_map->start &&
268 va_map->last - user_addr + 1 >= size) {
269 amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr);
270 return 0;
271 }
272
273 r = -EINVAL;
274 out_err:
275 return r;
276 }
277
amdgpu_userq_buffer_va_mapped(struct amdgpu_vm * vm,u64 addr)278 static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
279 {
280 struct amdgpu_bo_va_mapping *mapping;
281 bool r;
282
283 dma_resv_assert_held(vm->root.bo->tbo.base.resv);
284
285 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
286 if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
287 r = true;
288 else
289 r = false;
290
291 return r;
292 }
293
amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue * queue)294 static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue)
295 {
296 struct amdgpu_userq_va_cursor *va_cursor, *tmp;
297 int r = 0;
298
299 list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
300 r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr);
301 dev_dbg(queue->userq_mgr->adev->dev,
302 "validate the userq mapping:%p va:%llx r:%d\n",
303 queue, va_cursor->gpu_addr, r);
304 }
305
306 if (r != 0)
307 return true;
308
309 return false;
310 }
311
amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping * mapping,struct amdgpu_userq_va_cursor * va_cursor)312 static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping,
313 struct amdgpu_userq_va_cursor *va_cursor)
314 {
315 atomic_set(&mapping->bo_va->userq_va_mapped, 0);
316 list_del(&va_cursor->list);
317 kfree(va_cursor);
318 }
319
amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device * adev,struct amdgpu_usermode_queue * queue)320 static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev,
321 struct amdgpu_usermode_queue *queue)
322 {
323 struct amdgpu_userq_va_cursor *va_cursor, *tmp;
324 struct amdgpu_bo_va_mapping *mapping;
325
326 /* Caller must hold vm->root.bo reservation */
327 dma_resv_assert_held(queue->vm->root.bo->tbo.base.resv);
328
329 list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
330 mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr);
331 if (!mapping) {
332 return -EINVAL;
333 }
334 dev_dbg(adev->dev, "delete the userq:%p va:%llx\n",
335 queue, va_cursor->gpu_addr);
336 amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
337 }
338
339 return 0;
340 }
341
amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue * queue)342 static int amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue *queue)
343 {
344 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
345 struct amdgpu_device *adev = uq_mgr->adev;
346 const struct amdgpu_userq_funcs *userq_funcs =
347 adev->userq_funcs[queue->queue_type];
348 bool found_hung_queue = false;
349 int r = 0;
350
351 if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
352 r = userq_funcs->preempt(queue);
353 if (r) {
354 queue->state = AMDGPU_USERQ_STATE_HUNG;
355 found_hung_queue = true;
356 } else {
357 queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
358 }
359 }
360
361 if (found_hung_queue)
362 amdgpu_userq_detect_and_reset_queues(uq_mgr);
363
364 return r;
365 }
366
amdgpu_userq_restore_helper(struct amdgpu_usermode_queue * queue)367 static int amdgpu_userq_restore_helper(struct amdgpu_usermode_queue *queue)
368 {
369 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
370 struct amdgpu_device *adev = uq_mgr->adev;
371 const struct amdgpu_userq_funcs *userq_funcs =
372 adev->userq_funcs[queue->queue_type];
373 int r = 0;
374
375 if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
376 r = userq_funcs->restore(queue);
377 if (r) {
378 queue->state = AMDGPU_USERQ_STATE_HUNG;
379 } else {
380 queue->state = AMDGPU_USERQ_STATE_MAPPED;
381 }
382 }
383
384 return r;
385 }
386
amdgpu_userq_unmap_helper(struct amdgpu_usermode_queue * queue)387 static int amdgpu_userq_unmap_helper(struct amdgpu_usermode_queue *queue)
388 {
389 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
390 struct amdgpu_device *adev = uq_mgr->adev;
391 const struct amdgpu_userq_funcs *userq_funcs =
392 adev->userq_funcs[queue->queue_type];
393 bool found_hung_queue = false;
394 int r = 0;
395
396 if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
397 (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
398 r = userq_funcs->unmap(queue);
399 if (r) {
400 queue->state = AMDGPU_USERQ_STATE_HUNG;
401 found_hung_queue = true;
402 } else {
403 queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
404 }
405 }
406
407 if (found_hung_queue)
408 amdgpu_userq_detect_and_reset_queues(uq_mgr);
409
410 return r;
411 }
412
amdgpu_userq_map_helper(struct amdgpu_usermode_queue * queue)413 static int amdgpu_userq_map_helper(struct amdgpu_usermode_queue *queue)
414 {
415 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
416 struct amdgpu_device *adev = uq_mgr->adev;
417 const struct amdgpu_userq_funcs *userq_funcs =
418 adev->userq_funcs[queue->queue_type];
419 int r = 0;
420
421 if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
422 r = userq_funcs->map(queue);
423 if (r) {
424 queue->state = AMDGPU_USERQ_STATE_HUNG;
425 amdgpu_userq_detect_and_reset_queues(uq_mgr);
426 } else {
427 queue->state = AMDGPU_USERQ_STATE_MAPPED;
428 }
429 }
430
431 return r;
432 }
433
amdgpu_userq_wait_for_last_fence(struct amdgpu_usermode_queue * queue)434 static void amdgpu_userq_wait_for_last_fence(struct amdgpu_usermode_queue *queue)
435 {
436 struct dma_fence *f = queue->last_fence;
437
438 if (!f)
439 return;
440
441 dma_fence_wait(f, false);
442 }
443
amdgpu_userq_cleanup(struct amdgpu_usermode_queue * queue)444 static void amdgpu_userq_cleanup(struct amdgpu_usermode_queue *queue)
445 {
446 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
447 struct amdgpu_device *adev = uq_mgr->adev;
448 const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
449
450 /* Wait for mode-1 reset to complete */
451 down_read(&adev->reset_domain->sem);
452
453 uq_funcs->mqd_destroy(queue);
454 /* Use interrupt-safe locking since IRQ handlers may access these XArrays */
455 xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
456 amdgpu_userq_fence_driver_free(queue);
457 queue->fence_drv = NULL;
458 queue->userq_mgr = NULL;
459 list_del(&queue->userq_va_list);
460
461 up_read(&adev->reset_domain->sem);
462 }
463
464 /**
465 * amdgpu_userq_ensure_ev_fence - ensure a valid, unsignaled eviction fence exists
466 * @uq_mgr: the usermode queue manager for this process
467 * @evf_mgr: the eviction fence manager to check and rearm
468 *
469 * Ensures that a valid and not yet signaled eviction fence is attached to the
470 * usermode queue before any queue operations proceed. If it is signalled, then
471 * rearm a new eviction fence.
472 */
473 void
amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_eviction_fence_mgr * evf_mgr)474 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
475 struct amdgpu_eviction_fence_mgr *evf_mgr)
476 {
477 struct dma_fence *ev_fence;
478
479 retry:
480 /* Flush any pending resume work to create ev_fence */
481 flush_delayed_work(&uq_mgr->resume_work);
482
483 mutex_lock(&uq_mgr->userq_mutex);
484 ev_fence = amdgpu_evf_mgr_get_fence(evf_mgr);
485 if (dma_fence_is_signaled(ev_fence)) {
486 dma_fence_put(ev_fence);
487 mutex_unlock(&uq_mgr->userq_mutex);
488 /*
489 * Looks like there was no pending resume work,
490 * add one now to create a valid eviction fence
491 */
492 schedule_delayed_work(&uq_mgr->resume_work, 0);
493 goto retry;
494 }
495 dma_fence_put(ev_fence);
496 }
497
amdgpu_userq_create_object(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_userq_obj * userq_obj,int size)498 int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
499 struct amdgpu_userq_obj *userq_obj,
500 int size)
501 {
502 struct amdgpu_device *adev = uq_mgr->adev;
503 struct amdgpu_bo_param bp;
504 int r;
505
506 memset(&bp, 0, sizeof(bp));
507 bp.byte_align = PAGE_SIZE;
508 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
509 bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
510 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
511 bp.type = ttm_bo_type_kernel;
512 bp.size = size;
513 bp.resv = NULL;
514 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
515
516 r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
517 if (r) {
518 drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
519 return r;
520 }
521
522 r = amdgpu_bo_reserve(userq_obj->obj, true);
523 if (r) {
524 drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
525 goto free_obj;
526 }
527
528 r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
529 if (r) {
530 drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
531 goto unresv;
532 }
533
534 r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
535 if (r) {
536 drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
537 goto unresv;
538 }
539
540 userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
541 amdgpu_bo_unreserve(userq_obj->obj);
542 memset(userq_obj->cpu_ptr, 0, size);
543 return 0;
544
545 unresv:
546 amdgpu_bo_unreserve(userq_obj->obj);
547
548 free_obj:
549 amdgpu_bo_unref(&userq_obj->obj);
550 return r;
551 }
552
amdgpu_userq_destroy_object(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_userq_obj * userq_obj)553 void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
554 struct amdgpu_userq_obj *userq_obj)
555 {
556 amdgpu_bo_kunmap(userq_obj->obj);
557 amdgpu_bo_unref(&userq_obj->obj);
558 }
559
560 uint64_t
amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_db_info * db_info,struct drm_file * filp)561 amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
562 struct amdgpu_db_info *db_info,
563 struct drm_file *filp)
564 {
565 uint64_t index;
566 struct drm_gem_object *gobj;
567 struct amdgpu_userq_obj *db_obj = db_info->db_obj;
568 int r, db_size;
569
570 gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
571 if (gobj == NULL) {
572 drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
573 return -EINVAL;
574 }
575
576 db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
577 drm_gem_object_put(gobj);
578
579 r = amdgpu_bo_reserve(db_obj->obj, true);
580 if (r) {
581 drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
582 goto unref_bo;
583 }
584
585 /* Pin the BO before generating the index, unpin in queue destroy */
586 r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
587 if (r) {
588 drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
589 goto unresv_bo;
590 }
591
592 switch (db_info->queue_type) {
593 case AMDGPU_HW_IP_GFX:
594 case AMDGPU_HW_IP_COMPUTE:
595 case AMDGPU_HW_IP_DMA:
596 db_size = sizeof(u64);
597 break;
598 default:
599 drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
600 db_info->queue_type);
601 r = -EINVAL;
602 goto unpin_bo;
603 }
604
605 /* Validate doorbell_offset is within the doorbell BO */
606 if ((u64)db_info->doorbell_offset * db_size + db_size >
607 amdgpu_bo_size(db_obj->obj)) {
608 r = -EINVAL;
609 goto unpin_bo;
610 }
611
612 index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
613 db_info->doorbell_offset, db_size);
614 drm_dbg_driver(adev_to_drm(uq_mgr->adev),
615 "[Usermode queues] doorbell index=%lld\n", index);
616 amdgpu_bo_unreserve(db_obj->obj);
617 return index;
618
619 unpin_bo:
620 amdgpu_bo_unpin(db_obj->obj);
621 unresv_bo:
622 amdgpu_bo_unreserve(db_obj->obj);
623 unref_bo:
624 amdgpu_bo_unref(&db_obj->obj);
625 return r;
626 }
627
628 static int
amdgpu_userq_destroy(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue)629 amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue)
630 {
631 struct amdgpu_device *adev = uq_mgr->adev;
632 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
633 struct amdgpu_vm *vm = &fpriv->vm;
634
635 int r = 0;
636
637 cancel_delayed_work_sync(&uq_mgr->resume_work);
638
639 /* Cancel any pending hang detection work and cleanup */
640 cancel_delayed_work_sync(&queue->hang_detect_work);
641
642 r = amdgpu_bo_reserve(vm->root.bo, false);
643 if (r) {
644 drm_file_err(uq_mgr->file, "Failed to reserve root bo during userqueue destroy\n");
645 return r;
646 }
647 amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
648 amdgpu_bo_unreserve(vm->root.bo);
649
650 mutex_lock(&uq_mgr->userq_mutex);
651 queue->hang_detect_fence = NULL;
652 amdgpu_userq_wait_for_last_fence(queue);
653
654 #if defined(CONFIG_DEBUG_FS)
655 debugfs_remove_recursive(queue->debugfs_queue);
656 #endif
657 amdgpu_userq_detect_and_reset_queues(uq_mgr);
658 r = amdgpu_userq_unmap_helper(queue);
659 atomic_dec(&uq_mgr->userq_count[queue->queue_type]);
660 amdgpu_userq_cleanup(queue);
661 mutex_unlock(&uq_mgr->userq_mutex);
662
663 amdgpu_bo_reserve(queue->db_obj.obj, true);
664 amdgpu_bo_unpin(queue->db_obj.obj);
665 amdgpu_bo_unreserve(queue->db_obj.obj);
666 amdgpu_bo_unref(&queue->db_obj.obj);
667
668 amdgpu_bo_reserve(queue->wptr_obj.obj, true);
669 amdgpu_bo_unpin(queue->wptr_obj.obj);
670 amdgpu_bo_unreserve(queue->wptr_obj.obj);
671 amdgpu_bo_unref(&queue->wptr_obj.obj);
672 kfree(queue);
673
674 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
675
676 return r;
677 }
678
amdgpu_userq_kref_destroy(struct kref * kref)679 static void amdgpu_userq_kref_destroy(struct kref *kref)
680 {
681 int r;
682 struct amdgpu_usermode_queue *queue =
683 container_of(kref, struct amdgpu_usermode_queue, refcount);
684 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
685
686 r = amdgpu_userq_destroy(uq_mgr, queue);
687 if (r)
688 drm_file_err(uq_mgr->file, "Failed to destroy usermode queue %d\n", r);
689 }
690
amdgpu_userq_get(struct amdgpu_userq_mgr * uq_mgr,u32 qid)691 struct amdgpu_usermode_queue *amdgpu_userq_get(struct amdgpu_userq_mgr *uq_mgr, u32 qid)
692 {
693 struct amdgpu_usermode_queue *queue;
694
695 xa_lock(&uq_mgr->userq_xa);
696 queue = xa_load(&uq_mgr->userq_xa, qid);
697 if (queue)
698 kref_get(&queue->refcount);
699 xa_unlock(&uq_mgr->userq_xa);
700
701 return queue;
702 }
703
amdgpu_userq_put(struct amdgpu_usermode_queue * queue)704 void amdgpu_userq_put(struct amdgpu_usermode_queue *queue)
705 {
706 if (queue)
707 kref_put(&queue->refcount, amdgpu_userq_kref_destroy);
708 }
709
amdgpu_userq_priority_permit(struct drm_file * filp,int priority)710 static int amdgpu_userq_priority_permit(struct drm_file *filp,
711 int priority)
712 {
713 if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
714 return 0;
715
716 if (capable(CAP_SYS_NICE))
717 return 0;
718
719 if (drm_is_current_master(filp))
720 return 0;
721
722 return -EACCES;
723 }
724
725 static int
amdgpu_userq_create(struct drm_file * filp,union drm_amdgpu_userq * args)726 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
727 {
728 struct amdgpu_fpriv *fpriv = filp->driver_priv;
729 struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
730 struct amdgpu_device *adev = uq_mgr->adev;
731 const struct amdgpu_userq_funcs *uq_funcs;
732 struct amdgpu_usermode_queue *queue;
733 struct amdgpu_db_info db_info;
734 bool skip_map_queue;
735 u32 qid;
736 uint64_t index;
737 int r = 0;
738 int priority =
739 (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
740 AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
741
742 r = amdgpu_userq_priority_permit(filp, priority);
743 if (r)
744 return r;
745
746 r = pm_runtime_resume_and_get(adev_to_drm(adev)->dev);
747 if (r < 0) {
748 drm_file_err(uq_mgr->file, "pm_runtime_resume_and_get() failed for userqueue create\n");
749 return r;
750 }
751
752 uq_funcs = adev->userq_funcs[args->in.ip_type];
753 if (!uq_funcs) {
754 drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
755 args->in.ip_type);
756 r = -EINVAL;
757 goto err_pm_runtime;
758 }
759
760 queue = kzalloc_obj(struct amdgpu_usermode_queue);
761 if (!queue) {
762 drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
763 r = -ENOMEM;
764 goto err_pm_runtime;
765 }
766
767 INIT_LIST_HEAD(&queue->userq_va_list);
768 queue->doorbell_handle = args->in.doorbell_handle;
769 queue->queue_type = args->in.ip_type;
770 queue->vm = &fpriv->vm;
771 queue->priority = priority;
772
773 db_info.queue_type = queue->queue_type;
774 db_info.doorbell_handle = queue->doorbell_handle;
775 db_info.db_obj = &queue->db_obj;
776 db_info.doorbell_offset = args->in.doorbell_offset;
777
778 queue->userq_mgr = uq_mgr;
779
780 /* Validate the userq virtual address.*/
781 r = amdgpu_bo_reserve(fpriv->vm.root.bo, false);
782 if (r)
783 goto free_queue;
784
785 if (amdgpu_userq_input_va_validate(adev, queue, args->in.queue_va, args->in.queue_size) ||
786 amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
787 amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
788 r = -EINVAL;
789 amdgpu_bo_unreserve(fpriv->vm.root.bo);
790 goto clean_mapping;
791 }
792 amdgpu_bo_unreserve(fpriv->vm.root.bo);
793
794 /* Convert relative doorbell offset into absolute doorbell index */
795 index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
796 if (index == (uint64_t)-EINVAL) {
797 drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
798 r = -EINVAL;
799 goto clean_mapping;
800 }
801
802 queue->doorbell_index = index;
803 xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
804 r = amdgpu_userq_fence_driver_alloc(adev, &queue->fence_drv);
805 if (r) {
806 drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
807 goto clean_mapping;
808 }
809
810 r = uq_funcs->mqd_create(queue, &args->in);
811 if (r) {
812 drm_file_err(uq_mgr->file, "Failed to create Queue\n");
813 goto clean_fence_driver;
814 }
815
816 amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
817
818 /* don't map the queue if scheduling is halted */
819 if (adev->userq_halt_for_enforce_isolation &&
820 ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
821 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
822 skip_map_queue = true;
823 else
824 skip_map_queue = false;
825 if (!skip_map_queue) {
826 r = amdgpu_userq_map_helper(queue);
827 if (r) {
828 drm_file_err(uq_mgr->file, "Failed to map Queue\n");
829 goto clean_mqd;
830 }
831 }
832
833 /* drop this refcount during queue destroy */
834 kref_init(&queue->refcount);
835
836 /* Wait for mode-1 reset to complete */
837 down_read(&adev->reset_domain->sem);
838
839 r = xa_alloc(&uq_mgr->userq_xa, &qid, queue,
840 XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
841 if (r) {
842 if (!skip_map_queue)
843 amdgpu_userq_unmap_helper(queue);
844 r = -ENOMEM;
845 goto clean_reset_domain;
846 }
847
848 r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
849 if (r) {
850 xa_erase(&uq_mgr->userq_xa, qid);
851 if (!skip_map_queue)
852 amdgpu_userq_unmap_helper(queue);
853 goto clean_reset_domain;
854 }
855 up_read(&adev->reset_domain->sem);
856
857 amdgpu_debugfs_userq_init(filp, queue, qid);
858 amdgpu_userq_init_hang_detect_work(queue);
859
860 args->out.queue_id = qid;
861 atomic_inc(&uq_mgr->userq_count[queue->queue_type]);
862 mutex_unlock(&uq_mgr->userq_mutex);
863 return 0;
864
865 clean_reset_domain:
866 up_read(&adev->reset_domain->sem);
867 clean_mqd:
868 mutex_unlock(&uq_mgr->userq_mutex);
869 uq_funcs->mqd_destroy(queue);
870 clean_fence_driver:
871 amdgpu_userq_fence_driver_free(queue);
872 clean_mapping:
873 amdgpu_bo_reserve(fpriv->vm.root.bo, true);
874 amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
875 amdgpu_bo_unreserve(fpriv->vm.root.bo);
876 free_queue:
877 kfree(queue);
878 err_pm_runtime:
879 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
880 return r;
881 }
882
amdgpu_userq_input_args_validate(struct drm_device * dev,union drm_amdgpu_userq * args,struct drm_file * filp)883 static int amdgpu_userq_input_args_validate(struct drm_device *dev,
884 union drm_amdgpu_userq *args,
885 struct drm_file *filp)
886 {
887 struct amdgpu_device *adev = drm_to_adev(dev);
888
889 switch (args->in.op) {
890 case AMDGPU_USERQ_OP_CREATE:
891 if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
892 AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
893 return -EINVAL;
894 /* Usermode queues are only supported for GFX IP as of now */
895 if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
896 args->in.ip_type != AMDGPU_HW_IP_DMA &&
897 args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
898 drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
899 args->in.ip_type);
900 return -EINVAL;
901 }
902
903 if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
904 (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
905 (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
906 !amdgpu_is_tmz(adev)) {
907 drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
908 return -EINVAL;
909 }
910
911 if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
912 args->in.queue_va == 0 ||
913 args->in.queue_size == 0) {
914 drm_file_err(filp, "invalidate userq queue va or size\n");
915 return -EINVAL;
916 }
917
918 if (!is_power_of_2(args->in.queue_size)) {
919 drm_file_err(filp, "Queue size must be a power of 2\n");
920 return -EINVAL;
921 }
922
923 if (args->in.queue_size < AMDGPU_GPU_PAGE_SIZE) {
924 drm_file_err(filp, "Queue size smaller than AMDGPU_GPU_PAGE_SIZE\n");
925 return -EINVAL;
926 }
927
928 if (!args->in.wptr_va || !args->in.rptr_va) {
929 drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
930 return -EINVAL;
931 }
932 break;
933 case AMDGPU_USERQ_OP_FREE:
934 if (args->in.ip_type ||
935 args->in.doorbell_handle ||
936 args->in.doorbell_offset ||
937 args->in.flags ||
938 args->in.queue_va ||
939 args->in.queue_size ||
940 args->in.rptr_va ||
941 args->in.wptr_va ||
942 args->in.mqd ||
943 args->in.mqd_size)
944 return -EINVAL;
945 break;
946 default:
947 return -EINVAL;
948 }
949
950 return 0;
951 }
952
amdgpu_userq_enabled(struct drm_device * dev)953 bool amdgpu_userq_enabled(struct drm_device *dev)
954 {
955 struct amdgpu_device *adev = drm_to_adev(dev);
956 int i;
957
958 for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
959 if (adev->userq_funcs[i])
960 return true;
961 }
962
963 return false;
964 }
965
amdgpu_userq_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)966 int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
967 struct drm_file *filp)
968 {
969 union drm_amdgpu_userq *args = data;
970 struct amdgpu_fpriv *fpriv = filp->driver_priv;
971 struct amdgpu_usermode_queue *queue;
972 int r = 0;
973
974 if (!amdgpu_userq_enabled(dev))
975 return -ENOTSUPP;
976
977 if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
978 return -EINVAL;
979
980 switch (args->in.op) {
981 case AMDGPU_USERQ_OP_CREATE:
982 r = amdgpu_userq_create(filp, args);
983 if (r)
984 drm_file_err(filp, "Failed to create usermode queue\n");
985 break;
986
987 case AMDGPU_USERQ_OP_FREE: {
988 xa_lock(&fpriv->userq_mgr.userq_xa);
989 queue = __xa_erase(&fpriv->userq_mgr.userq_xa, args->in.queue_id);
990 xa_unlock(&fpriv->userq_mgr.userq_xa);
991 if (!queue)
992 return -ENOENT;
993
994 amdgpu_userq_put(queue);
995 break;
996 }
997
998 default:
999 drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
1000 return -EINVAL;
1001 }
1002
1003 return r;
1004 }
1005
1006 static int
amdgpu_userq_restore_all(struct amdgpu_userq_mgr * uq_mgr)1007 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
1008 {
1009 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1010 struct amdgpu_vm *vm = &fpriv->vm;
1011 struct amdgpu_usermode_queue *queue;
1012 unsigned long queue_id;
1013 int ret = 0, r;
1014
1015
1016 if (amdgpu_bo_reserve(vm->root.bo, false))
1017 return false;
1018
1019 mutex_lock(&uq_mgr->userq_mutex);
1020 /* Resume all the queues for this process */
1021 xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1022
1023 if (!amdgpu_userq_buffer_vas_mapped(queue)) {
1024 drm_file_err(uq_mgr->file,
1025 "trying restore queue without va mapping\n");
1026 queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
1027 continue;
1028 }
1029
1030 r = amdgpu_userq_restore_helper(queue);
1031 if (r)
1032 ret = r;
1033
1034 }
1035 mutex_unlock(&uq_mgr->userq_mutex);
1036 amdgpu_bo_unreserve(vm->root.bo);
1037
1038 if (ret)
1039 drm_file_err(uq_mgr->file,
1040 "Failed to map all the queues, restore failed ret=%d\n", ret);
1041 return ret;
1042 }
1043
amdgpu_userq_validate_vm(void * param,struct amdgpu_bo * bo)1044 static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo)
1045 {
1046 struct ttm_operation_ctx ctx = { false, false };
1047
1048 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1049 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1050 }
1051
1052 /* Handle all BOs on the invalidated list, validate them and update the PTs */
1053 static int
amdgpu_userq_bo_validate(struct amdgpu_device * adev,struct drm_exec * exec,struct amdgpu_vm * vm)1054 amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
1055 struct amdgpu_vm *vm)
1056 {
1057 struct ttm_operation_ctx ctx = { false, false };
1058 struct amdgpu_bo_va *bo_va;
1059 struct amdgpu_bo *bo;
1060 int ret;
1061
1062 spin_lock(&vm->status_lock);
1063 while (!list_empty(&vm->invalidated)) {
1064 bo_va = list_first_entry(&vm->invalidated,
1065 struct amdgpu_bo_va,
1066 base.vm_status);
1067 spin_unlock(&vm->status_lock);
1068
1069 bo = bo_va->base.bo;
1070 ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
1071 if (unlikely(ret))
1072 return ret;
1073
1074 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1075 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1076 if (ret)
1077 return ret;
1078
1079 /* This moves the bo_va to the done list */
1080 ret = amdgpu_vm_bo_update(adev, bo_va, false);
1081 if (ret)
1082 return ret;
1083
1084 spin_lock(&vm->status_lock);
1085 }
1086 spin_unlock(&vm->status_lock);
1087
1088 return 0;
1089 }
1090
1091 /* Make sure the whole VM is ready to be used */
1092 static int
amdgpu_userq_vm_validate(struct amdgpu_userq_mgr * uq_mgr)1093 amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
1094 {
1095 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1096 bool invalidated = false, new_addition = false;
1097 struct ttm_operation_ctx ctx = { true, false };
1098 struct amdgpu_device *adev = uq_mgr->adev;
1099 struct amdgpu_hmm_range *range;
1100 struct amdgpu_vm *vm = &fpriv->vm;
1101 unsigned long key, tmp_key;
1102 struct amdgpu_bo_va *bo_va;
1103 struct amdgpu_bo *bo;
1104 struct drm_exec exec;
1105 struct xarray xa;
1106 int ret;
1107
1108 xa_init(&xa);
1109
1110 retry_lock:
1111 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
1112 drm_exec_until_all_locked(&exec) {
1113 ret = amdgpu_vm_lock_pd(vm, &exec, 1);
1114 drm_exec_retry_on_contention(&exec);
1115 if (unlikely(ret))
1116 goto unlock_all;
1117
1118 ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
1119 drm_exec_retry_on_contention(&exec);
1120 if (unlikely(ret))
1121 goto unlock_all;
1122
1123 /* This validates PDs, PTs and per VM BOs */
1124 ret = amdgpu_vm_validate(adev, vm, NULL,
1125 amdgpu_userq_validate_vm,
1126 NULL);
1127 if (unlikely(ret))
1128 goto unlock_all;
1129
1130 /* This locks and validates the remaining evicted BOs */
1131 ret = amdgpu_userq_bo_validate(adev, &exec, vm);
1132 drm_exec_retry_on_contention(&exec);
1133 if (unlikely(ret))
1134 goto unlock_all;
1135 }
1136
1137 if (invalidated) {
1138 xa_for_each(&xa, tmp_key, range) {
1139 bo = range->bo;
1140 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1141 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1142 if (ret)
1143 goto unlock_all;
1144
1145 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
1146
1147 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
1148 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1149 if (ret)
1150 goto unlock_all;
1151 }
1152 invalidated = false;
1153 }
1154
1155 ret = amdgpu_vm_handle_moved(adev, vm, NULL);
1156 if (ret)
1157 goto unlock_all;
1158
1159 key = 0;
1160 /* Validate User Ptr BOs */
1161 list_for_each_entry(bo_va, &vm->done, base.vm_status) {
1162 bo = bo_va->base.bo;
1163 if (!bo)
1164 continue;
1165
1166 if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm))
1167 continue;
1168
1169 range = xa_load(&xa, key);
1170 if (range && range->bo != bo) {
1171 xa_erase(&xa, key);
1172 amdgpu_hmm_range_free(range);
1173 range = NULL;
1174 }
1175
1176 if (!range) {
1177 range = amdgpu_hmm_range_alloc(bo);
1178 if (!range) {
1179 ret = -ENOMEM;
1180 goto unlock_all;
1181 }
1182
1183 xa_store(&xa, key, range, GFP_KERNEL);
1184 new_addition = true;
1185 }
1186 key++;
1187 }
1188
1189 if (new_addition) {
1190 drm_exec_fini(&exec);
1191 xa_for_each(&xa, tmp_key, range) {
1192 if (!range)
1193 continue;
1194 bo = range->bo;
1195 ret = amdgpu_ttm_tt_get_user_pages(bo, range);
1196 if (ret)
1197 goto free_ranges;
1198 }
1199
1200 invalidated = true;
1201 new_addition = false;
1202 goto retry_lock;
1203 }
1204
1205 ret = amdgpu_vm_update_pdes(adev, vm, false);
1206 if (ret)
1207 goto unlock_all;
1208
1209 /*
1210 * We need to wait for all VM updates to finish before restarting the
1211 * queues. Using the done list like that is now ok since everything is
1212 * locked in place.
1213 */
1214 list_for_each_entry(bo_va, &vm->done, base.vm_status)
1215 dma_fence_wait(bo_va->last_pt_update, false);
1216 dma_fence_wait(vm->last_update, false);
1217
1218 ret = amdgpu_evf_mgr_rearm(&fpriv->evf_mgr, &exec);
1219 if (ret)
1220 drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
1221
1222 unlock_all:
1223 drm_exec_fini(&exec);
1224 free_ranges:
1225 xa_for_each(&xa, tmp_key, range) {
1226 if (!range)
1227 continue;
1228 bo = range->bo;
1229 amdgpu_hmm_range_free(range);
1230 }
1231 xa_destroy(&xa);
1232 return ret;
1233 }
1234
amdgpu_userq_restore_worker(struct work_struct * work)1235 static void amdgpu_userq_restore_worker(struct work_struct *work)
1236 {
1237 struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
1238 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
1239 struct dma_fence *ev_fence;
1240 int ret;
1241
1242 ev_fence = amdgpu_evf_mgr_get_fence(&fpriv->evf_mgr);
1243 if (!dma_fence_is_signaled(ev_fence))
1244 goto put_fence;
1245
1246 ret = amdgpu_userq_vm_validate(uq_mgr);
1247 if (ret) {
1248 drm_file_err(uq_mgr->file, "Failed to validate BOs to restore ret=%d\n", ret);
1249 goto put_fence;
1250 }
1251
1252 amdgpu_userq_restore_all(uq_mgr);
1253
1254 put_fence:
1255 dma_fence_put(ev_fence);
1256 }
1257
1258 static int
amdgpu_userq_evict_all(struct amdgpu_userq_mgr * uq_mgr)1259 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
1260 {
1261 struct amdgpu_usermode_queue *queue;
1262 unsigned long queue_id;
1263 int ret = 0, r;
1264
1265 amdgpu_userq_detect_and_reset_queues(uq_mgr);
1266 /* Try to unmap all the queues in this process ctx */
1267 xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1268 r = amdgpu_userq_preempt_helper(queue);
1269 if (r)
1270 ret = r;
1271 }
1272
1273 if (ret)
1274 drm_file_err(uq_mgr->file,
1275 "Couldn't unmap all the queues, eviction failed ret=%d\n", ret);
1276 return ret;
1277 }
1278
amdgpu_userq_reset_work(struct work_struct * work)1279 void amdgpu_userq_reset_work(struct work_struct *work)
1280 {
1281 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
1282 userq_reset_work);
1283 struct amdgpu_reset_context reset_context;
1284
1285 memset(&reset_context, 0, sizeof(reset_context));
1286
1287 reset_context.method = AMD_RESET_METHOD_NONE;
1288 reset_context.reset_req_dev = adev;
1289 reset_context.src = AMDGPU_RESET_SRC_USERQ;
1290 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
1291 /*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/
1292
1293 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
1294 }
1295
1296 static void
amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr * uq_mgr)1297 amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
1298 {
1299 struct amdgpu_usermode_queue *queue;
1300 unsigned long queue_id;
1301
1302 xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
1303 struct dma_fence *f = queue->last_fence;
1304
1305 if (!f)
1306 continue;
1307
1308 dma_fence_wait(f, false);
1309 }
1310 }
1311
1312 void
amdgpu_userq_evict(struct amdgpu_userq_mgr * uq_mgr)1313 amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr)
1314 {
1315 /* Wait for any pending userqueue fence work to finish */
1316 amdgpu_userq_wait_for_signal(uq_mgr);
1317 amdgpu_userq_evict_all(uq_mgr);
1318 }
1319
amdgpu_userq_mgr_init(struct amdgpu_userq_mgr * userq_mgr,struct drm_file * file_priv,struct amdgpu_device * adev)1320 int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
1321 struct amdgpu_device *adev)
1322 {
1323 mutex_init(&userq_mgr->userq_mutex);
1324 xa_init_flags(&userq_mgr->userq_xa, XA_FLAGS_ALLOC);
1325 userq_mgr->adev = adev;
1326 userq_mgr->file = file_priv;
1327
1328 INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
1329 return 0;
1330 }
1331
amdgpu_userq_mgr_cancel_resume(struct amdgpu_userq_mgr * userq_mgr)1332 void amdgpu_userq_mgr_cancel_resume(struct amdgpu_userq_mgr *userq_mgr)
1333 {
1334 cancel_delayed_work_sync(&userq_mgr->resume_work);
1335 }
1336
amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr * userq_mgr)1337 void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
1338 {
1339 struct amdgpu_usermode_queue *queue;
1340 unsigned long queue_id = 0;
1341
1342 for (;;) {
1343 xa_lock(&userq_mgr->userq_xa);
1344 queue = xa_find(&userq_mgr->userq_xa, &queue_id, ULONG_MAX,
1345 XA_PRESENT);
1346 if (queue)
1347 __xa_erase(&userq_mgr->userq_xa, queue_id);
1348 xa_unlock(&userq_mgr->userq_xa);
1349
1350 if (!queue)
1351 break;
1352
1353 amdgpu_userq_put(queue);
1354 }
1355
1356 xa_destroy(&userq_mgr->userq_xa);
1357 mutex_destroy(&userq_mgr->userq_mutex);
1358 }
1359
amdgpu_userq_suspend(struct amdgpu_device * adev)1360 int amdgpu_userq_suspend(struct amdgpu_device *adev)
1361 {
1362 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1363 struct amdgpu_usermode_queue *queue;
1364 struct amdgpu_userq_mgr *uqm;
1365 unsigned long queue_id;
1366 int r;
1367
1368 if (!ip_mask)
1369 return 0;
1370
1371 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1372 uqm = queue->userq_mgr;
1373 cancel_delayed_work_sync(&uqm->resume_work);
1374 guard(mutex)(&uqm->userq_mutex);
1375 amdgpu_userq_detect_and_reset_queues(uqm);
1376 if (adev->in_s0ix)
1377 r = amdgpu_userq_preempt_helper(queue);
1378 else
1379 r = amdgpu_userq_unmap_helper(queue);
1380 if (r)
1381 return r;
1382 }
1383 return 0;
1384 }
1385
amdgpu_userq_resume(struct amdgpu_device * adev)1386 int amdgpu_userq_resume(struct amdgpu_device *adev)
1387 {
1388 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1389 struct amdgpu_usermode_queue *queue;
1390 struct amdgpu_userq_mgr *uqm;
1391 unsigned long queue_id;
1392 int r;
1393
1394 if (!ip_mask)
1395 return 0;
1396
1397 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1398 uqm = queue->userq_mgr;
1399 guard(mutex)(&uqm->userq_mutex);
1400 if (adev->in_s0ix)
1401 r = amdgpu_userq_restore_helper(queue);
1402 else
1403 r = amdgpu_userq_map_helper(queue);
1404 if (r)
1405 return r;
1406 }
1407
1408 return 0;
1409 }
1410
amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device * adev,u32 idx)1411 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
1412 u32 idx)
1413 {
1414 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1415 struct amdgpu_usermode_queue *queue;
1416 struct amdgpu_userq_mgr *uqm;
1417 unsigned long queue_id;
1418 int ret = 0, r;
1419
1420 /* only need to stop gfx/compute */
1421 if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
1422 return 0;
1423
1424 if (adev->userq_halt_for_enforce_isolation)
1425 dev_warn(adev->dev, "userq scheduling already stopped!\n");
1426 adev->userq_halt_for_enforce_isolation = true;
1427 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1428 uqm = queue->userq_mgr;
1429 cancel_delayed_work_sync(&uqm->resume_work);
1430 mutex_lock(&uqm->userq_mutex);
1431 if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
1432 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
1433 (queue->xcp_id == idx)) {
1434 amdgpu_userq_detect_and_reset_queues(uqm);
1435 r = amdgpu_userq_preempt_helper(queue);
1436 if (r)
1437 ret = r;
1438 }
1439 mutex_unlock(&uqm->userq_mutex);
1440 }
1441
1442 return ret;
1443 }
1444
amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device * adev,u32 idx)1445 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
1446 u32 idx)
1447 {
1448 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1449 struct amdgpu_usermode_queue *queue;
1450 struct amdgpu_userq_mgr *uqm;
1451 unsigned long queue_id;
1452 int ret = 0, r;
1453
1454 /* only need to stop gfx/compute */
1455 if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
1456 return 0;
1457
1458 if (!adev->userq_halt_for_enforce_isolation)
1459 dev_warn(adev->dev, "userq scheduling already started!\n");
1460
1461 adev->userq_halt_for_enforce_isolation = false;
1462
1463 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1464 uqm = queue->userq_mgr;
1465 mutex_lock(&uqm->userq_mutex);
1466 if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
1467 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
1468 (queue->xcp_id == idx)) {
1469 r = amdgpu_userq_restore_helper(queue);
1470 if (r)
1471 ret = r;
1472 }
1473 mutex_unlock(&uqm->userq_mutex);
1474 }
1475
1476 return ret;
1477 }
1478
amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t saddr)1479 void amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
1480 struct amdgpu_bo_va_mapping *mapping,
1481 uint64_t saddr)
1482 {
1483 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
1484 struct amdgpu_bo_va *bo_va = mapping->bo_va;
1485 struct dma_resv *resv = bo_va->base.bo->tbo.base.resv;
1486
1487 if (!ip_mask)
1488 return;
1489
1490 dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr);
1491 /**
1492 * The userq VA mapping reservation should include the eviction fence,
1493 * if the eviction fence can't signal successfully during unmapping,
1494 * then driver will warn to flag this improper unmap of the userq VA.
1495 * Note: The eviction fence may be attached to different BOs, and this
1496 * unmap is only for one kind of userq VAs, so at this point suppose
1497 * the eviction fence is always unsignaled.
1498 */
1499 dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
1500 false, MAX_SCHEDULE_TIMEOUT);
1501 }
1502
amdgpu_userq_pre_reset(struct amdgpu_device * adev)1503 void amdgpu_userq_pre_reset(struct amdgpu_device *adev)
1504 {
1505 const struct amdgpu_userq_funcs *userq_funcs;
1506 struct amdgpu_usermode_queue *queue;
1507 struct amdgpu_userq_mgr *uqm;
1508 unsigned long queue_id;
1509
1510 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1511 uqm = queue->userq_mgr;
1512 cancel_delayed_work_sync(&uqm->resume_work);
1513 if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
1514 amdgpu_userq_wait_for_last_fence(queue);
1515 userq_funcs = adev->userq_funcs[queue->queue_type];
1516 userq_funcs->unmap(queue);
1517 /* just mark all queues as hung at this point.
1518 * if unmap succeeds, we could map again
1519 * in amdgpu_userq_post_reset() if vram is not lost
1520 */
1521 queue->state = AMDGPU_USERQ_STATE_HUNG;
1522 amdgpu_userq_fence_driver_force_completion(queue);
1523 }
1524 }
1525 }
1526
amdgpu_userq_post_reset(struct amdgpu_device * adev,bool vram_lost)1527 int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost)
1528 {
1529 /* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED
1530 * at this point, we should be able to map it again
1531 * and continue if vram is not lost.
1532 */
1533 struct amdgpu_usermode_queue *queue;
1534 const struct amdgpu_userq_funcs *userq_funcs;
1535 unsigned long queue_id;
1536 int r = 0;
1537
1538 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
1539 if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) {
1540 userq_funcs = adev->userq_funcs[queue->queue_type];
1541 /* Re-map queue */
1542 r = userq_funcs->map(queue);
1543 if (r) {
1544 dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id);
1545 continue;
1546 }
1547 queue->state = AMDGPU_USERQ_STATE_MAPPED;
1548 }
1549 }
1550
1551 return r;
1552 }
1553