xref: /linux/drivers/gpu/drm/xe/xe_exec.c (revision 170aafe35cb98e0f3fbacb446ea86389fbce22ea)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_exec.h"
7 
8 #include <drm/drm_device.h>
9 #include <drm/drm_exec.h>
10 #include <drm/drm_file.h>
11 #include <drm/xe_drm.h>
12 #include <linux/delay.h>
13 
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_exec_queue.h"
17 #include "xe_macros.h"
18 #include "xe_ring_ops_types.h"
19 #include "xe_sched_job.h"
20 #include "xe_sync.h"
21 #include "xe_vm.h"
22 
23 /**
24  * DOC: Execbuf (User GPU command submission)
25  *
26  * Execs have historically been rather complicated in DRM drivers (at least in
27  * the i915) because a few things:
28  *
29  * - Passing in a list BO which are read / written to creating implicit syncs
30  * - Binding at exec time
31  * - Flow controlling the ring at exec time
32  *
33  * In XE we avoid all of this complication by not allowing a BO list to be
34  * passed into an exec, using the dma-buf implicit sync uAPI, have binds as
35  * seperate operations, and using the DRM scheduler to flow control the ring.
36  * Let's deep dive on each of these.
37  *
38  * We can get away from a BO list by forcing the user to use in / out fences on
39  * every exec rather than the kernel tracking dependencies of BO (e.g. if the
40  * user knows an exec writes to a BO and reads from the BO in the next exec, it
41  * is the user's responsibility to pass in / out fence between the two execs).
42  *
43  * Implicit dependencies for external BOs are handled by using the dma-buf
44  * implicit dependency uAPI (TODO: add link). To make this works each exec must
45  * install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external
46  * BO mapped in the VM.
47  *
48  * We do not allow a user to trigger a bind at exec time rather we have a VM
49  * bind IOCTL which uses the same in / out fence interface as exec. In that
50  * sense, a VM bind is basically the same operation as an exec from the user
51  * perspective. e.g. If an exec depends on a VM bind use the in / out fence
52  * interface (struct drm_xe_sync) to synchronize like syncing between two
53  * dependent execs.
54  *
55  * Although a user cannot trigger a bind, we still have to rebind userptrs in
56  * the VM that have been invalidated since the last exec, likewise we also have
57  * to rebind BOs that have been evicted by the kernel. We schedule these rebinds
58  * behind any pending kernel operations on any external BOs in VM or any BOs
59  * private to the VM. This is accomplished by the rebinds waiting on BOs
60  * DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs
61  * slots (inflight execs are in the DMA_RESV_USAGE_BOOKING for private BOs and
62  * in DMA_RESV_USAGE_WRITE for external BOs).
63  *
64  * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute
65  * mode VMs we use preempt fences and a rebind worker (TODO: add link).
66  *
67  * There is no need to flow control the ring in the exec as we write the ring at
68  * submission time and set the DRM scheduler max job limit SIZE_OF_RING /
69  * MAX_JOB_SIZE. The DRM scheduler will then hold all jobs until space in the
70  * ring is available.
71  *
72  * All of this results in a rather simple exec implementation.
73  *
74  * Flow
75  * ~~~~
76  *
77  * .. code-block::
78  *
79  *	Parse input arguments
80  *	Wait for any async VM bind passed as in-fences to start
81  *	<----------------------------------------------------------------------|
82  *	Lock global VM lock in read mode                                       |
83  *	Pin userptrs (also finds userptr invalidated since last exec)          |
84  *	Lock exec (VM dma-resv lock, external BOs dma-resv locks)              |
85  *	Validate BOs that have been evicted                                    |
86  *	Create job                                                             |
87  *	Rebind invalidated userptrs + evicted BOs (non-compute-mode)           |
88  *	Add rebind fence dependency to job                                     |
89  *	Add job VM dma-resv bookkeeping slot (non-compute mode)                |
90  *	Add job to external BOs dma-resv write slots (non-compute mode)        |
91  *	Check if any userptrs invalidated since pin ------ Drop locks ---------|
92  *	Install in / out fences for job
93  *	Submit job
94  *	Unlock all
95  */
96 
97 /*
98  * Add validation and rebinding to the drm_exec locking loop, since both can
99  * trigger eviction which may require sleeping dma_resv locks.
100  */
101 static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
102 {
103 	struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm);
104 
105 	/* The fence slot added here is intended for the exec sched job. */
106 	return xe_vm_validate_rebind(vm, &vm_exec->exec, 1);
107 }
108 
109 int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
110 {
111 	struct xe_device *xe = to_xe_device(dev);
112 	struct xe_file *xef = to_xe_file(file);
113 	struct drm_xe_exec *args = data;
114 	struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs);
115 	u64 __user *addresses_user = u64_to_user_ptr(args->address);
116 	struct xe_exec_queue *q;
117 	struct xe_sync_entry *syncs = NULL;
118 	u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
119 	struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
120 	struct drm_exec *exec = &vm_exec.exec;
121 	u32 i, num_syncs, num_ufence = 0;
122 	struct xe_sched_job *job;
123 	struct xe_vm *vm;
124 	bool write_locked, skip_retry = false;
125 	ktime_t end = 0;
126 	int err = 0;
127 
128 	if (XE_IOCTL_DBG(xe, args->extensions) ||
129 	    XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
130 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
131 		return -EINVAL;
132 
133 	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
134 	if (XE_IOCTL_DBG(xe, !q))
135 		return -ENOENT;
136 
137 	if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
138 		return -EINVAL;
139 
140 	if (XE_IOCTL_DBG(xe, args->num_batch_buffer &&
141 			 q->width != args->num_batch_buffer))
142 		return -EINVAL;
143 
144 	if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) {
145 		err = -ECANCELED;
146 		goto err_exec_queue;
147 	}
148 
149 	if (args->num_syncs) {
150 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
151 		if (!syncs) {
152 			err = -ENOMEM;
153 			goto err_exec_queue;
154 		}
155 	}
156 
157 	vm = q->vm;
158 
159 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
160 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
161 					  &syncs_user[num_syncs], SYNC_PARSE_FLAG_EXEC |
162 					  (xe_vm_in_lr_mode(vm) ?
163 					   SYNC_PARSE_FLAG_LR_MODE : 0));
164 		if (err)
165 			goto err_syncs;
166 
167 		if (xe_sync_is_ufence(&syncs[num_syncs]))
168 			num_ufence++;
169 	}
170 
171 	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
172 		err = -EINVAL;
173 		goto err_syncs;
174 	}
175 
176 	if (xe_exec_queue_is_parallel(q)) {
177 		err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
178 				       q->width);
179 		if (err) {
180 			err = -EFAULT;
181 			goto err_syncs;
182 		}
183 	}
184 
185 retry:
186 	if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) {
187 		err = down_write_killable(&vm->lock);
188 		write_locked = true;
189 	} else {
190 		/* We don't allow execs while the VM is in error state */
191 		err = down_read_interruptible(&vm->lock);
192 		write_locked = false;
193 	}
194 	if (err)
195 		goto err_syncs;
196 
197 	if (write_locked) {
198 		err = xe_vm_userptr_pin(vm);
199 		downgrade_write(&vm->lock);
200 		write_locked = false;
201 		if (err)
202 			goto err_unlock_list;
203 	}
204 
205 	if (!args->num_batch_buffer) {
206 		err = xe_vm_lock(vm, true);
207 		if (err)
208 			goto err_unlock_list;
209 
210 		if (!xe_vm_in_lr_mode(vm)) {
211 			struct dma_fence *fence;
212 
213 			fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
214 			if (IS_ERR(fence)) {
215 				err = PTR_ERR(fence);
216 				goto err_unlock_list;
217 			}
218 			for (i = 0; i < num_syncs; i++)
219 				xe_sync_entry_signal(&syncs[i], fence);
220 			xe_exec_queue_last_fence_set(q, vm, fence);
221 			dma_fence_put(fence);
222 		}
223 
224 		xe_vm_unlock(vm);
225 		goto err_unlock_list;
226 	}
227 
228 	vm_exec.vm = &vm->gpuvm;
229 	vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
230 	if (xe_vm_in_lr_mode(vm)) {
231 		drm_exec_init(exec, vm_exec.flags, 0);
232 	} else {
233 		err = drm_gpuvm_exec_lock(&vm_exec);
234 		if (err) {
235 			if (xe_vm_validate_should_retry(exec, err, &end))
236 				err = -EAGAIN;
237 			goto err_unlock_list;
238 		}
239 	}
240 
241 	if (xe_vm_is_closed_or_banned(q->vm)) {
242 		drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n");
243 		err = -ECANCELED;
244 		goto err_exec;
245 	}
246 
247 	if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
248 		err = -EWOULDBLOCK;	/* Aliased to -EAGAIN */
249 		skip_retry = true;
250 		goto err_exec;
251 	}
252 
253 	job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
254 				  addresses : &args->address);
255 	if (IS_ERR(job)) {
256 		err = PTR_ERR(job);
257 		goto err_exec;
258 	}
259 
260 	/* Wait behind rebinds */
261 	if (!xe_vm_in_lr_mode(vm)) {
262 		err = xe_sched_job_add_deps(job,
263 					    xe_vm_resv(vm),
264 					    DMA_RESV_USAGE_KERNEL);
265 		if (err)
266 			goto err_put_job;
267 	}
268 
269 	for (i = 0; i < num_syncs && !err; i++)
270 		err = xe_sync_entry_add_deps(&syncs[i], job);
271 	if (err)
272 		goto err_put_job;
273 
274 	if (!xe_vm_in_lr_mode(vm)) {
275 		err = xe_sched_job_last_fence_add_dep(job, vm);
276 		if (err)
277 			goto err_put_job;
278 
279 		err = down_read_interruptible(&vm->userptr.notifier_lock);
280 		if (err)
281 			goto err_put_job;
282 
283 		err = __xe_vm_userptr_needs_repin(vm);
284 		if (err)
285 			goto err_repin;
286 	}
287 
288 	/*
289 	 * Point of no return, if we error after this point just set an error on
290 	 * the job and let the DRM scheduler / backend clean up the job.
291 	 */
292 	xe_sched_job_arm(job);
293 	if (!xe_vm_in_lr_mode(vm))
294 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
295 					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
296 
297 	for (i = 0; i < num_syncs; i++) {
298 		xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished);
299 		xe_sched_job_init_user_fence(job, &syncs[i]);
300 	}
301 
302 	if (xe_exec_queue_is_lr(q))
303 		q->ring_ops->emit_job(job);
304 	if (!xe_vm_in_lr_mode(vm))
305 		xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished);
306 	xe_sched_job_push(job);
307 	xe_vm_reactivate_rebind(vm);
308 
309 	if (!err && !xe_vm_in_lr_mode(vm)) {
310 		spin_lock(&xe->ttm.lru_lock);
311 		ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
312 		spin_unlock(&xe->ttm.lru_lock);
313 	}
314 
315 err_repin:
316 	if (!xe_vm_in_lr_mode(vm))
317 		up_read(&vm->userptr.notifier_lock);
318 err_put_job:
319 	if (err)
320 		xe_sched_job_put(job);
321 err_exec:
322 	drm_exec_fini(exec);
323 err_unlock_list:
324 	up_read(&vm->lock);
325 	if (err == -EAGAIN && !skip_retry)
326 		goto retry;
327 err_syncs:
328 	while (num_syncs--)
329 		xe_sync_entry_cleanup(&syncs[num_syncs]);
330 	kfree(syncs);
331 err_exec_queue:
332 	xe_exec_queue_put(q);
333 
334 	return err;
335 }
336