xref: /linux/drivers/gpu/drm/xe/xe_exec.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_exec.h"
7 
8 #include <drm/drm_device.h>
9 #include <drm/drm_exec.h>
10 #include <drm/drm_file.h>
11 #include <uapi/drm/xe_drm.h>
12 #include <linux/delay.h>
13 
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_exec_queue.h"
17 #include "xe_hw_engine_group.h"
18 #include "xe_macros.h"
19 #include "xe_pm.h"
20 #include "xe_ring_ops_types.h"
21 #include "xe_sched_job.h"
22 #include "xe_sync.h"
23 #include "xe_svm.h"
24 #include "xe_vm.h"
25 
26 /**
27  * DOC: Execbuf (User GPU command submission)
28  *
29  * Execs have historically been rather complicated in DRM drivers (at least in
30  * the i915) because a few things:
31  *
32  * - Passing in a list BO which are read / written to creating implicit syncs
33  * - Binding at exec time
34  * - Flow controlling the ring at exec time
35  *
36  * In XE we avoid all of this complication by not allowing a BO list to be
37  * passed into an exec, using the dma-buf implicit sync uAPI, have binds as
38  * separate operations, and using the DRM scheduler to flow control the ring.
39  * Let's deep dive on each of these.
40  *
41  * We can get away from a BO list by forcing the user to use in / out fences on
42  * every exec rather than the kernel tracking dependencies of BO (e.g. if the
43  * user knows an exec writes to a BO and reads from the BO in the next exec, it
44  * is the user's responsibility to pass in / out fence between the two execs).
45  *
46  * We do not allow a user to trigger a bind at exec time rather we have a VM
47  * bind IOCTL which uses the same in / out fence interface as exec. In that
48  * sense, a VM bind is basically the same operation as an exec from the user
49  * perspective. e.g. If an exec depends on a VM bind use the in / out fence
50  * interface (struct drm_xe_sync) to synchronize like syncing between two
51  * dependent execs.
52  *
53  * Although a user cannot trigger a bind, we still have to rebind userptrs in
54  * the VM that have been invalidated since the last exec, likewise we also have
55  * to rebind BOs that have been evicted by the kernel. We schedule these rebinds
56  * behind any pending kernel operations on any external BOs in VM or any BOs
57  * private to the VM. This is accomplished by the rebinds waiting on BOs
58  * DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs
59  * slots (inflight execs are in the DMA_RESV_USAGE_BOOKKEEP for private BOs and
60  * for external BOs).
61  *
62  * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute
63  * mode VMs we use preempt fences and a rebind worker (TODO: add link).
64  *
65  * There is no need to flow control the ring in the exec as we write the ring at
66  * submission time and set the DRM scheduler max job limit SIZE_OF_RING /
67  * MAX_JOB_SIZE. The DRM scheduler will then hold all jobs until space in the
68  * ring is available.
69  *
70  * All of this results in a rather simple exec implementation.
71  *
72  * Flow
73  * ~~~~
74  *
75  * .. code-block::
76  *
77  *	Parse input arguments
78  *	Wait for any async VM bind passed as in-fences to start
79  *	<----------------------------------------------------------------------|
80  *	Lock global VM lock in read mode                                       |
81  *	Pin userptrs (also finds userptr invalidated since last exec)          |
82  *	Lock exec (VM dma-resv lock, external BOs dma-resv locks)              |
83  *	Validate BOs that have been evicted                                    |
84  *	Create job                                                             |
85  *	Rebind invalidated userptrs + evicted BOs (non-compute-mode)           |
86  *	Add rebind fence dependency to job                                     |
87  *	Add job VM dma-resv bookkeeping slot (non-compute mode)                |
88  *	Add job to external BOs dma-resv write slots (non-compute mode)        |
89  *	Check if any userptrs invalidated since pin ------ Drop locks ---------|
90  *	Install in / out fences for job
91  *	Submit job
92  *	Unlock all
93  */
94 
95 /*
96  * Add validation and rebinding to the drm_exec locking loop, since both can
97  * trigger eviction which may require sleeping dma_resv locks.
98  */
99 static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
100 {
101 	struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm);
102 	int ret;
103 
104 	/* The fence slot added here is intended for the exec sched job. */
105 	xe_vm_set_validation_exec(vm, &vm_exec->exec);
106 	ret = xe_vm_validate_rebind(vm, &vm_exec->exec, 1);
107 	xe_vm_set_validation_exec(vm, NULL);
108 	return ret;
109 }
110 
111 int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
112 {
113 	struct xe_device *xe = to_xe_device(dev);
114 	struct xe_file *xef = to_xe_file(file);
115 	struct drm_xe_exec *args = data;
116 	struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs);
117 	u64 __user *addresses_user = u64_to_user_ptr(args->address);
118 	struct xe_exec_queue *q;
119 	struct xe_sync_entry *syncs = NULL;
120 	u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
121 	struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
122 	struct drm_exec *exec = &vm_exec.exec;
123 	u32 i, num_syncs, num_ufence = 0;
124 	struct xe_validation_ctx ctx;
125 	struct xe_sched_job *job;
126 	struct xe_vm *vm;
127 	bool write_locked;
128 	int err = 0;
129 	struct xe_hw_engine_group *group;
130 	enum xe_hw_engine_group_execution_mode mode, previous_mode;
131 
132 	if (XE_IOCTL_DBG(xe, args->extensions) ||
133 	    XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
134 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
135 		return -EINVAL;
136 
137 	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
138 	if (XE_IOCTL_DBG(xe, !q))
139 		return -ENOENT;
140 
141 	if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) {
142 		err = -EINVAL;
143 		goto err_exec_queue;
144 	}
145 
146 	if (XE_IOCTL_DBG(xe, args->num_batch_buffer &&
147 			 q->width != args->num_batch_buffer)) {
148 		err = -EINVAL;
149 		goto err_exec_queue;
150 	}
151 
152 	if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) {
153 		err = -ECANCELED;
154 		goto err_exec_queue;
155 	}
156 
157 	if (args->num_syncs) {
158 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
159 		if (!syncs) {
160 			err = -ENOMEM;
161 			goto err_exec_queue;
162 		}
163 	}
164 
165 	vm = q->vm;
166 
167 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
168 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
169 					  &syncs_user[num_syncs], SYNC_PARSE_FLAG_EXEC |
170 					  (xe_vm_in_lr_mode(vm) ?
171 					   SYNC_PARSE_FLAG_LR_MODE : 0));
172 		if (err)
173 			goto err_syncs;
174 
175 		if (xe_sync_is_ufence(&syncs[num_syncs]))
176 			num_ufence++;
177 	}
178 
179 	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
180 		err = -EINVAL;
181 		goto err_syncs;
182 	}
183 
184 	if (xe_exec_queue_is_parallel(q)) {
185 		err = copy_from_user(addresses, addresses_user, sizeof(u64) *
186 				     q->width);
187 		if (err) {
188 			err = -EFAULT;
189 			goto err_syncs;
190 		}
191 	}
192 
193 	group = q->hwe->hw_engine_group;
194 	mode = xe_hw_engine_group_find_exec_mode(q);
195 
196 	if (mode == EXEC_MODE_DMA_FENCE) {
197 		err = xe_hw_engine_group_get_mode(group, mode, &previous_mode);
198 		if (err)
199 			goto err_syncs;
200 	}
201 
202 retry:
203 	if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) {
204 		err = down_write_killable(&vm->lock);
205 		write_locked = true;
206 	} else {
207 		/* We don't allow execs while the VM is in error state */
208 		err = down_read_interruptible(&vm->lock);
209 		write_locked = false;
210 	}
211 	if (err)
212 		goto err_hw_exec_mode;
213 
214 	if (write_locked) {
215 		err = xe_vm_userptr_pin(vm);
216 		downgrade_write(&vm->lock);
217 		write_locked = false;
218 		if (err)
219 			goto err_unlock_list;
220 	}
221 
222 	if (!args->num_batch_buffer) {
223 		err = xe_vm_lock(vm, true);
224 		if (err)
225 			goto err_unlock_list;
226 
227 		if (!xe_vm_in_lr_mode(vm)) {
228 			struct dma_fence *fence;
229 
230 			fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
231 			if (IS_ERR(fence)) {
232 				err = PTR_ERR(fence);
233 				xe_vm_unlock(vm);
234 				goto err_unlock_list;
235 			}
236 			for (i = 0; i < num_syncs; i++)
237 				xe_sync_entry_signal(&syncs[i], fence);
238 			xe_exec_queue_last_fence_set(q, vm, fence);
239 			dma_fence_put(fence);
240 		}
241 
242 		xe_vm_unlock(vm);
243 		goto err_unlock_list;
244 	}
245 
246 	/*
247 	 * It's OK to block interruptible here with the vm lock held, since
248 	 * on task freezing during suspend / hibernate, the call will
249 	 * return -ERESTARTSYS and the IOCTL will be rerun.
250 	 */
251 	err = xe_pm_block_on_suspend(xe);
252 	if (err)
253 		goto err_unlock_list;
254 
255 	if (!xe_vm_in_lr_mode(vm)) {
256 		vm_exec.vm = &vm->gpuvm;
257 		vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
258 		err = xe_validation_exec_lock(&ctx, &vm_exec, &xe->val);
259 		if (err)
260 			goto err_unlock_list;
261 	}
262 
263 	if (xe_vm_is_closed_or_banned(q->vm)) {
264 		drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n");
265 		err = -ECANCELED;
266 		goto err_exec;
267 	}
268 
269 	if (xe_exec_queue_uses_pxp(q)) {
270 		err = xe_vm_validate_protected(q->vm);
271 		if (err)
272 			goto err_exec;
273 	}
274 
275 	job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
276 				  addresses : &args->address);
277 	if (IS_ERR(job)) {
278 		err = PTR_ERR(job);
279 		goto err_exec;
280 	}
281 
282 	/* Wait behind rebinds */
283 	if (!xe_vm_in_lr_mode(vm)) {
284 		err = xe_sched_job_add_deps(job,
285 					    xe_vm_resv(vm),
286 					    DMA_RESV_USAGE_KERNEL);
287 		if (err)
288 			goto err_put_job;
289 	}
290 
291 	for (i = 0; i < num_syncs && !err; i++)
292 		err = xe_sync_entry_add_deps(&syncs[i], job);
293 	if (err)
294 		goto err_put_job;
295 
296 	if (!xe_vm_in_lr_mode(vm)) {
297 		err = xe_sched_job_last_fence_add_dep(job, vm);
298 		if (err)
299 			goto err_put_job;
300 
301 		err = xe_svm_notifier_lock_interruptible(vm);
302 		if (err)
303 			goto err_put_job;
304 
305 		err = __xe_vm_userptr_needs_repin(vm);
306 		if (err)
307 			goto err_repin;
308 	}
309 
310 	/*
311 	 * Point of no return, if we error after this point just set an error on
312 	 * the job and let the DRM scheduler / backend clean up the job.
313 	 */
314 	xe_sched_job_arm(job);
315 	if (!xe_vm_in_lr_mode(vm))
316 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
317 					 DMA_RESV_USAGE_BOOKKEEP,
318 					 DMA_RESV_USAGE_BOOKKEEP);
319 
320 	for (i = 0; i < num_syncs; i++) {
321 		xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished);
322 		xe_sched_job_init_user_fence(job, &syncs[i]);
323 	}
324 
325 	if (!xe_vm_in_lr_mode(vm))
326 		xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished);
327 	xe_sched_job_push(job);
328 	xe_vm_reactivate_rebind(vm);
329 
330 	if (!err && !xe_vm_in_lr_mode(vm)) {
331 		spin_lock(&xe->ttm.lru_lock);
332 		ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
333 		spin_unlock(&xe->ttm.lru_lock);
334 	}
335 
336 	if (mode == EXEC_MODE_LR)
337 		xe_hw_engine_group_resume_faulting_lr_jobs(group);
338 
339 err_repin:
340 	if (!xe_vm_in_lr_mode(vm))
341 		xe_svm_notifier_unlock(vm);
342 err_put_job:
343 	if (err)
344 		xe_sched_job_put(job);
345 err_exec:
346 	if (!xe_vm_in_lr_mode(vm))
347 		xe_validation_ctx_fini(&ctx);
348 err_unlock_list:
349 	up_read(&vm->lock);
350 	if (err == -EAGAIN)
351 		goto retry;
352 err_hw_exec_mode:
353 	if (mode == EXEC_MODE_DMA_FENCE)
354 		xe_hw_engine_group_put(group);
355 err_syncs:
356 	while (num_syncs--)
357 		xe_sync_entry_cleanup(&syncs[num_syncs]);
358 	kfree(syncs);
359 err_exec_queue:
360 	xe_exec_queue_put(q);
361 
362 	return err;
363 }
364