xref: /linux/drivers/gpu/drm/xe/xe_exec.c (revision 8cdcef1c2f82d207aa8b2a02298fbc17191c6261)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_exec.h"
7 
8 #include <drm/drm_device.h>
9 #include <drm/drm_exec.h>
10 #include <drm/drm_file.h>
11 #include <drm/xe_drm.h>
12 #include <linux/delay.h>
13 
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_exec_queue.h"
17 #include "xe_macros.h"
18 #include "xe_ring_ops_types.h"
19 #include "xe_sched_job.h"
20 #include "xe_sync.h"
21 #include "xe_vm.h"
22 
23 /**
24  * DOC: Execbuf (User GPU command submission)
25  *
26  * Execs have historically been rather complicated in DRM drivers (at least in
27  * the i915) because a few things:
28  *
29  * - Passing in a list BO which are read / written to creating implicit syncs
30  * - Binding at exec time
31  * - Flow controlling the ring at exec time
32  *
33  * In XE we avoid all of this complication by not allowing a BO list to be
34  * passed into an exec, using the dma-buf implicit sync uAPI, have binds as
35  * seperate operations, and using the DRM scheduler to flow control the ring.
36  * Let's deep dive on each of these.
37  *
38  * We can get away from a BO list by forcing the user to use in / out fences on
39  * every exec rather than the kernel tracking dependencies of BO (e.g. if the
40  * user knows an exec writes to a BO and reads from the BO in the next exec, it
41  * is the user's responsibility to pass in / out fence between the two execs).
42  *
43  * Implicit dependencies for external BOs are handled by using the dma-buf
44  * implicit dependency uAPI (TODO: add link). To make this works each exec must
45  * install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external
46  * BO mapped in the VM.
47  *
48  * We do not allow a user to trigger a bind at exec time rather we have a VM
49  * bind IOCTL which uses the same in / out fence interface as exec. In that
50  * sense, a VM bind is basically the same operation as an exec from the user
51  * perspective. e.g. If an exec depends on a VM bind use the in / out fence
52  * interface (struct drm_xe_sync) to synchronize like syncing between two
53  * dependent execs.
54  *
55  * Although a user cannot trigger a bind, we still have to rebind userptrs in
56  * the VM that have been invalidated since the last exec, likewise we also have
57  * to rebind BOs that have been evicted by the kernel. We schedule these rebinds
58  * behind any pending kernel operations on any external BOs in VM or any BOs
59  * private to the VM. This is accomplished by the rebinds waiting on BOs
60  * DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs
61  * slots (inflight execs are in the DMA_RESV_USAGE_BOOKING for private BOs and
62  * in DMA_RESV_USAGE_WRITE for external BOs).
63  *
64  * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute
65  * mode VMs we use preempt fences and a rebind worker (TODO: add link).
66  *
67  * There is no need to flow control the ring in the exec as we write the ring at
68  * submission time and set the DRM scheduler max job limit SIZE_OF_RING /
69  * MAX_JOB_SIZE. The DRM scheduler will then hold all jobs until space in the
70  * ring is available.
71  *
72  * All of this results in a rather simple exec implementation.
73  *
74  * Flow
75  * ~~~~
76  *
77  * .. code-block::
78  *
79  *	Parse input arguments
80  *	Wait for any async VM bind passed as in-fences to start
81  *	<----------------------------------------------------------------------|
82  *	Lock global VM lock in read mode                                       |
83  *	Pin userptrs (also finds userptr invalidated since last exec)          |
84  *	Lock exec (VM dma-resv lock, external BOs dma-resv locks)              |
85  *	Validate BOs that have been evicted                                    |
86  *	Create job                                                             |
87  *	Rebind invalidated userptrs + evicted BOs (non-compute-mode)           |
88  *	Add rebind fence dependency to job                                     |
89  *	Add job VM dma-resv bookkeeping slot (non-compute mode)                |
90  *	Add job to external BOs dma-resv write slots (non-compute mode)        |
91  *	Check if any userptrs invalidated since pin ------ Drop locks ---------|
92  *	Install in / out fences for job
93  *	Submit job
94  *	Unlock all
95  */
96 
97 static int xe_exec_begin(struct drm_exec *exec, struct xe_vm *vm)
98 {
99 	struct xe_vma *vma;
100 	LIST_HEAD(dups);
101 	int err = 0;
102 
103 	if (xe_vm_in_lr_mode(vm))
104 		return 0;
105 
106 	/*
107 	 * 1 fence for job from exec plus a fence for each tile from a possible
108 	 * rebind
109 	 */
110 	err = xe_vm_lock_dma_resv(vm, exec, 1 + vm->xe->info.tile_count, true);
111 	if (err)
112 		return err;
113 
114 	/*
115 	 * Validate BOs that have been evicted (i.e. make sure the
116 	 * BOs have valid placements possibly moving an evicted BO back
117 	 * to a location where the GPU can access it).
118 	 */
119 	list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
120 		xe_assert(vm->xe, !xe_vma_is_null(vma));
121 
122 		if (xe_vma_is_userptr(vma))
123 			continue;
124 
125 		err = xe_bo_validate(xe_vma_bo(vma), vm, false);
126 		if (err)
127 			break;
128 	}
129 
130 	return err;
131 }
132 
133 int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
134 {
135 	struct xe_device *xe = to_xe_device(dev);
136 	struct xe_file *xef = to_xe_file(file);
137 	struct drm_xe_exec *args = data;
138 	struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs);
139 	u64 __user *addresses_user = u64_to_user_ptr(args->address);
140 	struct xe_exec_queue *q;
141 	struct xe_sync_entry *syncs = NULL;
142 	u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
143 	struct drm_exec exec;
144 	u32 i, num_syncs = 0;
145 	struct xe_sched_job *job;
146 	struct dma_fence *rebind_fence;
147 	struct xe_vm *vm;
148 	bool write_locked;
149 	ktime_t end = 0;
150 	int err = 0;
151 
152 	if (XE_IOCTL_DBG(xe, args->extensions) ||
153 	    XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
154 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
155 		return -EINVAL;
156 
157 	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
158 	if (XE_IOCTL_DBG(xe, !q))
159 		return -ENOENT;
160 
161 	if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
162 		return -EINVAL;
163 
164 	if (XE_IOCTL_DBG(xe, q->width != args->num_batch_buffer))
165 		return -EINVAL;
166 
167 	if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_BANNED)) {
168 		err = -ECANCELED;
169 		goto err_exec_queue;
170 	}
171 
172 	if (args->num_syncs) {
173 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
174 		if (!syncs) {
175 			err = -ENOMEM;
176 			goto err_exec_queue;
177 		}
178 	}
179 
180 	vm = q->vm;
181 
182 	for (i = 0; i < args->num_syncs; i++) {
183 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++],
184 					  &syncs_user[i], true,
185 					  xe_vm_in_lr_mode(vm));
186 		if (err)
187 			goto err_syncs;
188 	}
189 
190 	if (xe_exec_queue_is_parallel(q)) {
191 		err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
192 				       q->width);
193 		if (err) {
194 			err = -EFAULT;
195 			goto err_syncs;
196 		}
197 	}
198 
199 retry:
200 	if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) {
201 		err = down_write_killable(&vm->lock);
202 		write_locked = true;
203 	} else {
204 		/* We don't allow execs while the VM is in error state */
205 		err = down_read_interruptible(&vm->lock);
206 		write_locked = false;
207 	}
208 	if (err)
209 		goto err_syncs;
210 
211 	if (write_locked) {
212 		err = xe_vm_userptr_pin(vm);
213 		downgrade_write(&vm->lock);
214 		write_locked = false;
215 		if (err)
216 			goto err_unlock_list;
217 	}
218 
219 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
220 	drm_exec_until_all_locked(&exec) {
221 		err = xe_exec_begin(&exec, vm);
222 		drm_exec_retry_on_contention(&exec);
223 		if (err && xe_vm_validate_should_retry(&exec, err, &end)) {
224 			err = -EAGAIN;
225 			goto err_unlock_list;
226 		}
227 		if (err)
228 			goto err_exec;
229 	}
230 
231 	if (xe_vm_is_closed_or_banned(q->vm)) {
232 		drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n");
233 		err = -ECANCELED;
234 		goto err_exec;
235 	}
236 
237 	if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
238 		err = -EWOULDBLOCK;
239 		goto err_exec;
240 	}
241 
242 	job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
243 				  addresses : &args->address);
244 	if (IS_ERR(job)) {
245 		err = PTR_ERR(job);
246 		goto err_exec;
247 	}
248 
249 	/*
250 	 * Rebind any invalidated userptr or evicted BOs in the VM, non-compute
251 	 * VM mode only.
252 	 */
253 	rebind_fence = xe_vm_rebind(vm, false);
254 	if (IS_ERR(rebind_fence)) {
255 		err = PTR_ERR(rebind_fence);
256 		goto err_put_job;
257 	}
258 
259 	/*
260 	 * We store the rebind_fence in the VM so subsequent execs don't get
261 	 * scheduled before the rebinds of userptrs / evicted BOs is complete.
262 	 */
263 	if (rebind_fence) {
264 		dma_fence_put(vm->rebind_fence);
265 		vm->rebind_fence = rebind_fence;
266 	}
267 	if (vm->rebind_fence) {
268 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
269 			     &vm->rebind_fence->flags)) {
270 			dma_fence_put(vm->rebind_fence);
271 			vm->rebind_fence = NULL;
272 		} else {
273 			dma_fence_get(vm->rebind_fence);
274 			err = drm_sched_job_add_dependency(&job->drm,
275 							   vm->rebind_fence);
276 			if (err)
277 				goto err_put_job;
278 		}
279 	}
280 
281 	/* Wait behind munmap style rebinds */
282 	if (!xe_vm_in_lr_mode(vm)) {
283 		err = drm_sched_job_add_resv_dependencies(&job->drm,
284 							  xe_vm_resv(vm),
285 							  DMA_RESV_USAGE_KERNEL);
286 		if (err)
287 			goto err_put_job;
288 	}
289 
290 	for (i = 0; i < num_syncs && !err; i++)
291 		err = xe_sync_entry_add_deps(&syncs[i], job);
292 	if (err)
293 		goto err_put_job;
294 
295 	if (!xe_vm_in_lr_mode(vm)) {
296 		err = down_read_interruptible(&vm->userptr.notifier_lock);
297 		if (err)
298 			goto err_put_job;
299 
300 		err = __xe_vm_userptr_needs_repin(vm);
301 		if (err)
302 			goto err_repin;
303 	}
304 
305 	/*
306 	 * Point of no return, if we error after this point just set an error on
307 	 * the job and let the DRM scheduler / backend clean up the job.
308 	 */
309 	xe_sched_job_arm(job);
310 	if (!xe_vm_in_lr_mode(vm)) {
311 		/* Block userptr invalidations / BO eviction */
312 		dma_resv_add_fence(xe_vm_resv(vm),
313 				   &job->drm.s_fence->finished,
314 				   DMA_RESV_USAGE_BOOKKEEP);
315 
316 		/*
317 		 * Make implicit sync work across drivers, assuming all external
318 		 * BOs are written as we don't pass in a read / write list.
319 		 */
320 		xe_vm_fence_all_extobjs(vm, &job->drm.s_fence->finished,
321 					DMA_RESV_USAGE_WRITE);
322 	}
323 
324 	for (i = 0; i < num_syncs; i++)
325 		xe_sync_entry_signal(&syncs[i], job,
326 				     &job->drm.s_fence->finished);
327 
328 	if (xe_exec_queue_is_lr(q))
329 		q->ring_ops->emit_job(job);
330 	xe_sched_job_push(job);
331 	xe_vm_reactivate_rebind(vm);
332 
333 	if (!err && !xe_vm_in_lr_mode(vm)) {
334 		spin_lock(&xe->ttm.lru_lock);
335 		ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
336 		spin_unlock(&xe->ttm.lru_lock);
337 	}
338 
339 err_repin:
340 	if (!xe_vm_in_lr_mode(vm))
341 		up_read(&vm->userptr.notifier_lock);
342 err_put_job:
343 	if (err)
344 		xe_sched_job_put(job);
345 err_exec:
346 	drm_exec_fini(&exec);
347 err_unlock_list:
348 	if (write_locked)
349 		up_write(&vm->lock);
350 	else
351 		up_read(&vm->lock);
352 	if (err == -EAGAIN)
353 		goto retry;
354 err_syncs:
355 	for (i = 0; i < num_syncs; i++)
356 		xe_sync_entry_cleanup(&syncs[i]);
357 	kfree(syncs);
358 err_exec_queue:
359 	xe_exec_queue_put(q);
360 
361 	return err;
362 }
363