xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c (revision b0f84a84fff180718995b1269da2988e5b28be42)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008,2010 Intel Corporation
5  */
6 
7 #include <linux/intel-iommu.h>
8 #include <linux/reservation.h>
9 #include <linux/sync_file.h>
10 #include <linux/uaccess.h>
11 
12 #include <drm/drm_syncobj.h>
13 #include <drm/i915_drm.h>
14 
15 #include "display/intel_frontbuffer.h"
16 
17 #include "gem/i915_gem_ioctls.h"
18 #include "gt/intel_context.h"
19 #include "gt/intel_gt_pm.h"
20 
21 #include "i915_gem_ioctls.h"
22 #include "i915_gem_clflush.h"
23 #include "i915_gem_context.h"
24 #include "i915_trace.h"
25 #include "intel_drv.h"
26 
27 enum {
28 	FORCE_CPU_RELOC = 1,
29 	FORCE_GTT_RELOC,
30 	FORCE_GPU_RELOC,
31 #define DBG_FORCE_RELOC 0 /* choose one of the above! */
32 };
33 
34 #define __EXEC_OBJECT_HAS_REF		BIT(31)
35 #define __EXEC_OBJECT_HAS_PIN		BIT(30)
36 #define __EXEC_OBJECT_HAS_FENCE		BIT(29)
37 #define __EXEC_OBJECT_NEEDS_MAP		BIT(28)
38 #define __EXEC_OBJECT_NEEDS_BIAS	BIT(27)
39 #define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 27) /* all of the above */
40 #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
41 
42 #define __EXEC_HAS_RELOC	BIT(31)
43 #define __EXEC_VALIDATED	BIT(30)
44 #define __EXEC_INTERNAL_FLAGS	(~0u << 30)
45 #define UPDATE			PIN_OFFSET_FIXED
46 
47 #define BATCH_OFFSET_BIAS (256*1024)
48 
49 #define __I915_EXEC_ILLEGAL_FLAGS \
50 	(__I915_EXEC_UNKNOWN_FLAGS | \
51 	 I915_EXEC_CONSTANTS_MASK  | \
52 	 I915_EXEC_RESOURCE_STREAMER)
53 
54 /* Catch emission of unexpected errors for CI! */
55 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
56 #undef EINVAL
57 #define EINVAL ({ \
58 	DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
59 	22; \
60 })
61 #endif
62 
63 /**
64  * DOC: User command execution
65  *
66  * Userspace submits commands to be executed on the GPU as an instruction
67  * stream within a GEM object we call a batchbuffer. This instructions may
68  * refer to other GEM objects containing auxiliary state such as kernels,
69  * samplers, render targets and even secondary batchbuffers. Userspace does
70  * not know where in the GPU memory these objects reside and so before the
71  * batchbuffer is passed to the GPU for execution, those addresses in the
72  * batchbuffer and auxiliary objects are updated. This is known as relocation,
73  * or patching. To try and avoid having to relocate each object on the next
74  * execution, userspace is told the location of those objects in this pass,
75  * but this remains just a hint as the kernel may choose a new location for
76  * any object in the future.
77  *
78  * At the level of talking to the hardware, submitting a batchbuffer for the
79  * GPU to execute is to add content to a buffer from which the HW
80  * command streamer is reading.
81  *
82  * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
83  *    Execlists, this command is not placed on the same buffer as the
84  *    remaining items.
85  *
86  * 2. Add a command to invalidate caches to the buffer.
87  *
88  * 3. Add a batchbuffer start command to the buffer; the start command is
89  *    essentially a token together with the GPU address of the batchbuffer
90  *    to be executed.
91  *
92  * 4. Add a pipeline flush to the buffer.
93  *
94  * 5. Add a memory write command to the buffer to record when the GPU
95  *    is done executing the batchbuffer. The memory write writes the
96  *    global sequence number of the request, ``i915_request::global_seqno``;
97  *    the i915 driver uses the current value in the register to determine
98  *    if the GPU has completed the batchbuffer.
99  *
100  * 6. Add a user interrupt command to the buffer. This command instructs
101  *    the GPU to issue an interrupt when the command, pipeline flush and
102  *    memory write are completed.
103  *
104  * 7. Inform the hardware of the additional commands added to the buffer
105  *    (by updating the tail pointer).
106  *
107  * Processing an execbuf ioctl is conceptually split up into a few phases.
108  *
109  * 1. Validation - Ensure all the pointers, handles and flags are valid.
110  * 2. Reservation - Assign GPU address space for every object
111  * 3. Relocation - Update any addresses to point to the final locations
112  * 4. Serialisation - Order the request with respect to its dependencies
113  * 5. Construction - Construct a request to execute the batchbuffer
114  * 6. Submission (at some point in the future execution)
115  *
116  * Reserving resources for the execbuf is the most complicated phase. We
117  * neither want to have to migrate the object in the address space, nor do
118  * we want to have to update any relocations pointing to this object. Ideally,
119  * we want to leave the object where it is and for all the existing relocations
120  * to match. If the object is given a new address, or if userspace thinks the
121  * object is elsewhere, we have to parse all the relocation entries and update
122  * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
123  * all the target addresses in all of its objects match the value in the
124  * relocation entries and that they all match the presumed offsets given by the
125  * list of execbuffer objects. Using this knowledge, we know that if we haven't
126  * moved any buffers, all the relocation entries are valid and we can skip
127  * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
128  * hang.) The requirement for using I915_EXEC_NO_RELOC are:
129  *
130  *      The addresses written in the objects must match the corresponding
131  *      reloc.presumed_offset which in turn must match the corresponding
132  *      execobject.offset.
133  *
134  *      Any render targets written to in the batch must be flagged with
135  *      EXEC_OBJECT_WRITE.
136  *
137  *      To avoid stalling, execobject.offset should match the current
138  *      address of that object within the active context.
139  *
140  * The reservation is done is multiple phases. First we try and keep any
141  * object already bound in its current location - so as long as meets the
142  * constraints imposed by the new execbuffer. Any object left unbound after the
143  * first pass is then fitted into any available idle space. If an object does
144  * not fit, all objects are removed from the reservation and the process rerun
145  * after sorting the objects into a priority order (more difficult to fit
146  * objects are tried first). Failing that, the entire VM is cleared and we try
147  * to fit the execbuf once last time before concluding that it simply will not
148  * fit.
149  *
150  * A small complication to all of this is that we allow userspace not only to
151  * specify an alignment and a size for the object in the address space, but
152  * we also allow userspace to specify the exact offset. This objects are
153  * simpler to place (the location is known a priori) all we have to do is make
154  * sure the space is available.
155  *
156  * Once all the objects are in place, patching up the buried pointers to point
157  * to the final locations is a fairly simple job of walking over the relocation
158  * entry arrays, looking up the right address and rewriting the value into
159  * the object. Simple! ... The relocation entries are stored in user memory
160  * and so to access them we have to copy them into a local buffer. That copy
161  * has to avoid taking any pagefaults as they may lead back to a GEM object
162  * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
163  * the relocation into multiple passes. First we try to do everything within an
164  * atomic context (avoid the pagefaults) which requires that we never wait. If
165  * we detect that we may wait, or if we need to fault, then we have to fallback
166  * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
167  * bells yet?) Dropping the mutex means that we lose all the state we have
168  * built up so far for the execbuf and we must reset any global data. However,
169  * we do leave the objects pinned in their final locations - which is a
170  * potential issue for concurrent execbufs. Once we have left the mutex, we can
171  * allocate and copy all the relocation entries into a large array at our
172  * leisure, reacquire the mutex, reclaim all the objects and other state and
173  * then proceed to update any incorrect addresses with the objects.
174  *
175  * As we process the relocation entries, we maintain a record of whether the
176  * object is being written to. Using NORELOC, we expect userspace to provide
177  * this information instead. We also check whether we can skip the relocation
178  * by comparing the expected value inside the relocation entry with the target's
179  * final address. If they differ, we have to map the current object and rewrite
180  * the 4 or 8 byte pointer within.
181  *
182  * Serialising an execbuf is quite simple according to the rules of the GEM
183  * ABI. Execution within each context is ordered by the order of submission.
184  * Writes to any GEM object are in order of submission and are exclusive. Reads
185  * from a GEM object are unordered with respect to other reads, but ordered by
186  * writes. A write submitted after a read cannot occur before the read, and
187  * similarly any read submitted after a write cannot occur before the write.
188  * Writes are ordered between engines such that only one write occurs at any
189  * time (completing any reads beforehand) - using semaphores where available
190  * and CPU serialisation otherwise. Other GEM access obey the same rules, any
191  * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
192  * reads before starting, and any read (either using set-domain or pread) must
193  * flush all GPU writes before starting. (Note we only employ a barrier before,
194  * we currently rely on userspace not concurrently starting a new execution
195  * whilst reading or writing to an object. This may be an advantage or not
196  * depending on how much you trust userspace not to shoot themselves in the
197  * foot.) Serialisation may just result in the request being inserted into
198  * a DAG awaiting its turn, but most simple is to wait on the CPU until
199  * all dependencies are resolved.
200  *
201  * After all of that, is just a matter of closing the request and handing it to
202  * the hardware (well, leaving it in a queue to be executed). However, we also
203  * offer the ability for batchbuffers to be run with elevated privileges so
204  * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
205  * Before any batch is given extra privileges we first must check that it
206  * contains no nefarious instructions, we check that each instruction is from
207  * our whitelist and all registers are also from an allowed list. We first
208  * copy the user's batchbuffer to a shadow (so that the user doesn't have
209  * access to it, either by the CPU or GPU as we scan it) and then parse each
210  * instruction. If everything is ok, we set a flag telling the hardware to run
211  * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
212  */
213 
214 struct i915_execbuffer {
215 	struct drm_i915_private *i915; /** i915 backpointer */
216 	struct drm_file *file; /** per-file lookup tables and limits */
217 	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
218 	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
219 	struct i915_vma **vma;
220 	unsigned int *flags;
221 
222 	struct intel_engine_cs *engine; /** engine to queue the request to */
223 	struct intel_context *context; /* logical state for the request */
224 	struct i915_gem_context *gem_context; /** caller's context */
225 	struct i915_address_space *vm; /** GTT and vma for the request */
226 
227 	struct i915_request *request; /** our request to build */
228 	struct i915_vma *batch; /** identity of the batch obj/vma */
229 
230 	/** actual size of execobj[] as we may extend it for the cmdparser */
231 	unsigned int buffer_count;
232 
233 	/** list of vma not yet bound during reservation phase */
234 	struct list_head unbound;
235 
236 	/** list of vma that have execobj.relocation_count */
237 	struct list_head relocs;
238 
239 	/**
240 	 * Track the most recently used object for relocations, as we
241 	 * frequently have to perform multiple relocations within the same
242 	 * obj/page
243 	 */
244 	struct reloc_cache {
245 		struct drm_mm_node node; /** temporary GTT binding */
246 		unsigned long vaddr; /** Current kmap address */
247 		unsigned long page; /** Currently mapped page index */
248 		unsigned int gen; /** Cached value of INTEL_GEN */
249 		bool use_64bit_reloc : 1;
250 		bool has_llc : 1;
251 		bool has_fence : 1;
252 		bool needs_unfenced : 1;
253 
254 		struct i915_request *rq;
255 		u32 *rq_cmd;
256 		unsigned int rq_size;
257 	} reloc_cache;
258 
259 	u64 invalid_flags; /** Set of execobj.flags that are invalid */
260 	u32 context_flags; /** Set of execobj.flags to insert from the ctx */
261 
262 	u32 batch_start_offset; /** Location within object of batch */
263 	u32 batch_len; /** Length of batch within object */
264 	u32 batch_flags; /** Flags composed for emit_bb_start() */
265 
266 	/**
267 	 * Indicate either the size of the hastable used to resolve
268 	 * relocation handles, or if negative that we are using a direct
269 	 * index into the execobj[].
270 	 */
271 	int lut_size;
272 	struct hlist_head *buckets; /** ht for relocation handles */
273 };
274 
275 #define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
276 
277 /*
278  * Used to convert any address to canonical form.
279  * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
280  * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
281  * addresses to be in a canonical form:
282  * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
283  * canonical form [63:48] == [47]."
284  */
285 #define GEN8_HIGH_ADDRESS_BIT 47
286 static inline u64 gen8_canonical_addr(u64 address)
287 {
288 	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
289 }
290 
291 static inline u64 gen8_noncanonical_addr(u64 address)
292 {
293 	return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
294 }
295 
296 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
297 {
298 	return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
299 }
300 
301 static int eb_create(struct i915_execbuffer *eb)
302 {
303 	if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
304 		unsigned int size = 1 + ilog2(eb->buffer_count);
305 
306 		/*
307 		 * Without a 1:1 association between relocation handles and
308 		 * the execobject[] index, we instead create a hashtable.
309 		 * We size it dynamically based on available memory, starting
310 		 * first with 1:1 assocative hash and scaling back until
311 		 * the allocation succeeds.
312 		 *
313 		 * Later on we use a positive lut_size to indicate we are
314 		 * using this hashtable, and a negative value to indicate a
315 		 * direct lookup.
316 		 */
317 		do {
318 			gfp_t flags;
319 
320 			/* While we can still reduce the allocation size, don't
321 			 * raise a warning and allow the allocation to fail.
322 			 * On the last pass though, we want to try as hard
323 			 * as possible to perform the allocation and warn
324 			 * if it fails.
325 			 */
326 			flags = GFP_KERNEL;
327 			if (size > 1)
328 				flags |= __GFP_NORETRY | __GFP_NOWARN;
329 
330 			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
331 					      flags);
332 			if (eb->buckets)
333 				break;
334 		} while (--size);
335 
336 		if (unlikely(!size))
337 			return -ENOMEM;
338 
339 		eb->lut_size = size;
340 	} else {
341 		eb->lut_size = -eb->buffer_count;
342 	}
343 
344 	return 0;
345 }
346 
347 static bool
348 eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
349 		 const struct i915_vma *vma,
350 		 unsigned int flags)
351 {
352 	if (vma->node.size < entry->pad_to_size)
353 		return true;
354 
355 	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
356 		return true;
357 
358 	if (flags & EXEC_OBJECT_PINNED &&
359 	    vma->node.start != entry->offset)
360 		return true;
361 
362 	if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
363 	    vma->node.start < BATCH_OFFSET_BIAS)
364 		return true;
365 
366 	if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
367 	    (vma->node.start + vma->node.size - 1) >> 32)
368 		return true;
369 
370 	if (flags & __EXEC_OBJECT_NEEDS_MAP &&
371 	    !i915_vma_is_map_and_fenceable(vma))
372 		return true;
373 
374 	return false;
375 }
376 
377 static inline bool
378 eb_pin_vma(struct i915_execbuffer *eb,
379 	   const struct drm_i915_gem_exec_object2 *entry,
380 	   struct i915_vma *vma)
381 {
382 	unsigned int exec_flags = *vma->exec_flags;
383 	u64 pin_flags;
384 
385 	if (vma->node.size)
386 		pin_flags = vma->node.start;
387 	else
388 		pin_flags = entry->offset & PIN_OFFSET_MASK;
389 
390 	pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
391 	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
392 		pin_flags |= PIN_GLOBAL;
393 
394 	if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
395 		return false;
396 
397 	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
398 		if (unlikely(i915_vma_pin_fence(vma))) {
399 			i915_vma_unpin(vma);
400 			return false;
401 		}
402 
403 		if (vma->fence)
404 			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
405 	}
406 
407 	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
408 	return !eb_vma_misplaced(entry, vma, exec_flags);
409 }
410 
411 static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
412 {
413 	GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
414 
415 	if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
416 		__i915_vma_unpin_fence(vma);
417 
418 	__i915_vma_unpin(vma);
419 }
420 
421 static inline void
422 eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
423 {
424 	if (!(*flags & __EXEC_OBJECT_HAS_PIN))
425 		return;
426 
427 	__eb_unreserve_vma(vma, *flags);
428 	*flags &= ~__EXEC_OBJECT_RESERVED;
429 }
430 
431 static int
432 eb_validate_vma(struct i915_execbuffer *eb,
433 		struct drm_i915_gem_exec_object2 *entry,
434 		struct i915_vma *vma)
435 {
436 	if (unlikely(entry->flags & eb->invalid_flags))
437 		return -EINVAL;
438 
439 	if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
440 		return -EINVAL;
441 
442 	/*
443 	 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
444 	 * any non-page-aligned or non-canonical addresses.
445 	 */
446 	if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
447 		     entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
448 		return -EINVAL;
449 
450 	/* pad_to_size was once a reserved field, so sanitize it */
451 	if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
452 		if (unlikely(offset_in_page(entry->pad_to_size)))
453 			return -EINVAL;
454 	} else {
455 		entry->pad_to_size = 0;
456 	}
457 
458 	if (unlikely(vma->exec_flags)) {
459 		DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
460 			  entry->handle, (int)(entry - eb->exec));
461 		return -EINVAL;
462 	}
463 
464 	/*
465 	 * From drm_mm perspective address space is continuous,
466 	 * so from this point we're always using non-canonical
467 	 * form internally.
468 	 */
469 	entry->offset = gen8_noncanonical_addr(entry->offset);
470 
471 	if (!eb->reloc_cache.has_fence) {
472 		entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
473 	} else {
474 		if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
475 		     eb->reloc_cache.needs_unfenced) &&
476 		    i915_gem_object_is_tiled(vma->obj))
477 			entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
478 	}
479 
480 	if (!(entry->flags & EXEC_OBJECT_PINNED))
481 		entry->flags |= eb->context_flags;
482 
483 	return 0;
484 }
485 
486 static int
487 eb_add_vma(struct i915_execbuffer *eb,
488 	   unsigned int i, unsigned batch_idx,
489 	   struct i915_vma *vma)
490 {
491 	struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
492 	int err;
493 
494 	GEM_BUG_ON(i915_vma_is_closed(vma));
495 
496 	if (!(eb->args->flags & __EXEC_VALIDATED)) {
497 		err = eb_validate_vma(eb, entry, vma);
498 		if (unlikely(err))
499 			return err;
500 	}
501 
502 	if (eb->lut_size > 0) {
503 		vma->exec_handle = entry->handle;
504 		hlist_add_head(&vma->exec_node,
505 			       &eb->buckets[hash_32(entry->handle,
506 						    eb->lut_size)]);
507 	}
508 
509 	if (entry->relocation_count)
510 		list_add_tail(&vma->reloc_link, &eb->relocs);
511 
512 	/*
513 	 * Stash a pointer from the vma to execobj, so we can query its flags,
514 	 * size, alignment etc as provided by the user. Also we stash a pointer
515 	 * to the vma inside the execobj so that we can use a direct lookup
516 	 * to find the right target VMA when doing relocations.
517 	 */
518 	eb->vma[i] = vma;
519 	eb->flags[i] = entry->flags;
520 	vma->exec_flags = &eb->flags[i];
521 
522 	/*
523 	 * SNA is doing fancy tricks with compressing batch buffers, which leads
524 	 * to negative relocation deltas. Usually that works out ok since the
525 	 * relocate address is still positive, except when the batch is placed
526 	 * very low in the GTT. Ensure this doesn't happen.
527 	 *
528 	 * Note that actual hangs have only been observed on gen7, but for
529 	 * paranoia do it everywhere.
530 	 */
531 	if (i == batch_idx) {
532 		if (entry->relocation_count &&
533 		    !(eb->flags[i] & EXEC_OBJECT_PINNED))
534 			eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
535 		if (eb->reloc_cache.has_fence)
536 			eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
537 
538 		eb->batch = vma;
539 	}
540 
541 	err = 0;
542 	if (eb_pin_vma(eb, entry, vma)) {
543 		if (entry->offset != vma->node.start) {
544 			entry->offset = vma->node.start | UPDATE;
545 			eb->args->flags |= __EXEC_HAS_RELOC;
546 		}
547 	} else {
548 		eb_unreserve_vma(vma, vma->exec_flags);
549 
550 		list_add_tail(&vma->exec_link, &eb->unbound);
551 		if (drm_mm_node_allocated(&vma->node))
552 			err = i915_vma_unbind(vma);
553 		if (unlikely(err))
554 			vma->exec_flags = NULL;
555 	}
556 	return err;
557 }
558 
559 static inline int use_cpu_reloc(const struct reloc_cache *cache,
560 				const struct drm_i915_gem_object *obj)
561 {
562 	if (!i915_gem_object_has_struct_page(obj))
563 		return false;
564 
565 	if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
566 		return true;
567 
568 	if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
569 		return false;
570 
571 	return (cache->has_llc ||
572 		obj->cache_dirty ||
573 		obj->cache_level != I915_CACHE_NONE);
574 }
575 
576 static int eb_reserve_vma(const struct i915_execbuffer *eb,
577 			  struct i915_vma *vma)
578 {
579 	struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
580 	unsigned int exec_flags = *vma->exec_flags;
581 	u64 pin_flags;
582 	int err;
583 
584 	pin_flags = PIN_USER | PIN_NONBLOCK;
585 	if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
586 		pin_flags |= PIN_GLOBAL;
587 
588 	/*
589 	 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
590 	 * limit address to the first 4GBs for unflagged objects.
591 	 */
592 	if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
593 		pin_flags |= PIN_ZONE_4G;
594 
595 	if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
596 		pin_flags |= PIN_MAPPABLE;
597 
598 	if (exec_flags & EXEC_OBJECT_PINNED) {
599 		pin_flags |= entry->offset | PIN_OFFSET_FIXED;
600 		pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
601 	} else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
602 		pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
603 	}
604 
605 	err = i915_vma_pin(vma,
606 			   entry->pad_to_size, entry->alignment,
607 			   pin_flags);
608 	if (err)
609 		return err;
610 
611 	if (entry->offset != vma->node.start) {
612 		entry->offset = vma->node.start | UPDATE;
613 		eb->args->flags |= __EXEC_HAS_RELOC;
614 	}
615 
616 	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
617 		err = i915_vma_pin_fence(vma);
618 		if (unlikely(err)) {
619 			i915_vma_unpin(vma);
620 			return err;
621 		}
622 
623 		if (vma->fence)
624 			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
625 	}
626 
627 	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
628 	GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
629 
630 	return 0;
631 }
632 
633 static int eb_reserve(struct i915_execbuffer *eb)
634 {
635 	const unsigned int count = eb->buffer_count;
636 	struct list_head last;
637 	struct i915_vma *vma;
638 	unsigned int i, pass;
639 	int err;
640 
641 	/*
642 	 * Attempt to pin all of the buffers into the GTT.
643 	 * This is done in 3 phases:
644 	 *
645 	 * 1a. Unbind all objects that do not match the GTT constraints for
646 	 *     the execbuffer (fenceable, mappable, alignment etc).
647 	 * 1b. Increment pin count for already bound objects.
648 	 * 2.  Bind new objects.
649 	 * 3.  Decrement pin count.
650 	 *
651 	 * This avoid unnecessary unbinding of later objects in order to make
652 	 * room for the earlier objects *unless* we need to defragment.
653 	 */
654 
655 	pass = 0;
656 	err = 0;
657 	do {
658 		list_for_each_entry(vma, &eb->unbound, exec_link) {
659 			err = eb_reserve_vma(eb, vma);
660 			if (err)
661 				break;
662 		}
663 		if (err != -ENOSPC)
664 			return err;
665 
666 		/* Resort *all* the objects into priority order */
667 		INIT_LIST_HEAD(&eb->unbound);
668 		INIT_LIST_HEAD(&last);
669 		for (i = 0; i < count; i++) {
670 			unsigned int flags = eb->flags[i];
671 			struct i915_vma *vma = eb->vma[i];
672 
673 			if (flags & EXEC_OBJECT_PINNED &&
674 			    flags & __EXEC_OBJECT_HAS_PIN)
675 				continue;
676 
677 			eb_unreserve_vma(vma, &eb->flags[i]);
678 
679 			if (flags & EXEC_OBJECT_PINNED)
680 				/* Pinned must have their slot */
681 				list_add(&vma->exec_link, &eb->unbound);
682 			else if (flags & __EXEC_OBJECT_NEEDS_MAP)
683 				/* Map require the lowest 256MiB (aperture) */
684 				list_add_tail(&vma->exec_link, &eb->unbound);
685 			else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
686 				/* Prioritise 4GiB region for restricted bo */
687 				list_add(&vma->exec_link, &last);
688 			else
689 				list_add_tail(&vma->exec_link, &last);
690 		}
691 		list_splice_tail(&last, &eb->unbound);
692 
693 		switch (pass++) {
694 		case 0:
695 			break;
696 
697 		case 1:
698 			/* Too fragmented, unbind everything and retry */
699 			err = i915_gem_evict_vm(eb->vm);
700 			if (err)
701 				return err;
702 			break;
703 
704 		default:
705 			return -ENOSPC;
706 		}
707 	} while (1);
708 }
709 
710 static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
711 {
712 	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
713 		return 0;
714 	else
715 		return eb->buffer_count - 1;
716 }
717 
718 static int eb_select_context(struct i915_execbuffer *eb)
719 {
720 	struct i915_gem_context *ctx;
721 
722 	ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
723 	if (unlikely(!ctx))
724 		return -ENOENT;
725 
726 	eb->gem_context = ctx;
727 	if (ctx->vm) {
728 		eb->vm = ctx->vm;
729 		eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
730 	} else {
731 		eb->vm = &eb->i915->ggtt.vm;
732 	}
733 
734 	eb->context_flags = 0;
735 	if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
736 		eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
737 
738 	return 0;
739 }
740 
741 static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
742 {
743 	struct i915_request *rq;
744 
745 	/*
746 	 * Completely unscientific finger-in-the-air estimates for suitable
747 	 * maximum user request size (to avoid blocking) and then backoff.
748 	 */
749 	if (intel_ring_update_space(ring) >= PAGE_SIZE)
750 		return NULL;
751 
752 	/*
753 	 * Find a request that after waiting upon, there will be at least half
754 	 * the ring available. The hysteresis allows us to compete for the
755 	 * shared ring and should mean that we sleep less often prior to
756 	 * claiming our resources, but not so long that the ring completely
757 	 * drains before we can submit our next request.
758 	 */
759 	list_for_each_entry(rq, &ring->request_list, ring_link) {
760 		if (__intel_ring_space(rq->postfix,
761 				       ring->emit, ring->size) > ring->size / 2)
762 			break;
763 	}
764 	if (&rq->ring_link == &ring->request_list)
765 		return NULL; /* weird, we will check again later for real */
766 
767 	return i915_request_get(rq);
768 }
769 
770 static int eb_wait_for_ring(const struct i915_execbuffer *eb)
771 {
772 	struct i915_request *rq;
773 	int ret = 0;
774 
775 	/*
776 	 * Apply a light amount of backpressure to prevent excessive hogs
777 	 * from blocking waiting for space whilst holding struct_mutex and
778 	 * keeping all of their resources pinned.
779 	 */
780 
781 	rq = __eb_wait_for_ring(eb->context->ring);
782 	if (rq) {
783 		mutex_unlock(&eb->i915->drm.struct_mutex);
784 
785 		if (i915_request_wait(rq,
786 				      I915_WAIT_INTERRUPTIBLE,
787 				      MAX_SCHEDULE_TIMEOUT) < 0)
788 			ret = -EINTR;
789 
790 		i915_request_put(rq);
791 
792 		mutex_lock(&eb->i915->drm.struct_mutex);
793 	}
794 
795 	return ret;
796 }
797 
798 static int eb_lookup_vmas(struct i915_execbuffer *eb)
799 {
800 	struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
801 	struct drm_i915_gem_object *obj;
802 	unsigned int i, batch;
803 	int err;
804 
805 	if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
806 		return -EIO;
807 
808 	INIT_LIST_HEAD(&eb->relocs);
809 	INIT_LIST_HEAD(&eb->unbound);
810 
811 	batch = eb_batch_index(eb);
812 
813 	mutex_lock(&eb->gem_context->mutex);
814 	if (unlikely(i915_gem_context_is_closed(eb->gem_context))) {
815 		err = -ENOENT;
816 		goto err_ctx;
817 	}
818 
819 	for (i = 0; i < eb->buffer_count; i++) {
820 		u32 handle = eb->exec[i].handle;
821 		struct i915_lut_handle *lut;
822 		struct i915_vma *vma;
823 
824 		vma = radix_tree_lookup(handles_vma, handle);
825 		if (likely(vma))
826 			goto add_vma;
827 
828 		obj = i915_gem_object_lookup(eb->file, handle);
829 		if (unlikely(!obj)) {
830 			err = -ENOENT;
831 			goto err_vma;
832 		}
833 
834 		vma = i915_vma_instance(obj, eb->vm, NULL);
835 		if (IS_ERR(vma)) {
836 			err = PTR_ERR(vma);
837 			goto err_obj;
838 		}
839 
840 		lut = i915_lut_handle_alloc();
841 		if (unlikely(!lut)) {
842 			err = -ENOMEM;
843 			goto err_obj;
844 		}
845 
846 		err = radix_tree_insert(handles_vma, handle, vma);
847 		if (unlikely(err)) {
848 			i915_lut_handle_free(lut);
849 			goto err_obj;
850 		}
851 
852 		/* transfer ref to lut */
853 		if (!atomic_fetch_inc(&vma->open_count))
854 			i915_vma_reopen(vma);
855 		lut->handle = handle;
856 		lut->ctx = eb->gem_context;
857 
858 		i915_gem_object_lock(obj);
859 		list_add(&lut->obj_link, &obj->lut_list);
860 		i915_gem_object_unlock(obj);
861 
862 add_vma:
863 		err = eb_add_vma(eb, i, batch, vma);
864 		if (unlikely(err))
865 			goto err_vma;
866 
867 		GEM_BUG_ON(vma != eb->vma[i]);
868 		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
869 		GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
870 			   eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
871 	}
872 
873 	mutex_unlock(&eb->gem_context->mutex);
874 
875 	eb->args->flags |= __EXEC_VALIDATED;
876 	return eb_reserve(eb);
877 
878 err_obj:
879 	i915_gem_object_put(obj);
880 err_vma:
881 	eb->vma[i] = NULL;
882 err_ctx:
883 	mutex_unlock(&eb->gem_context->mutex);
884 	return err;
885 }
886 
887 static struct i915_vma *
888 eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
889 {
890 	if (eb->lut_size < 0) {
891 		if (handle >= -eb->lut_size)
892 			return NULL;
893 		return eb->vma[handle];
894 	} else {
895 		struct hlist_head *head;
896 		struct i915_vma *vma;
897 
898 		head = &eb->buckets[hash_32(handle, eb->lut_size)];
899 		hlist_for_each_entry(vma, head, exec_node) {
900 			if (vma->exec_handle == handle)
901 				return vma;
902 		}
903 		return NULL;
904 	}
905 }
906 
907 static void eb_release_vmas(const struct i915_execbuffer *eb)
908 {
909 	const unsigned int count = eb->buffer_count;
910 	unsigned int i;
911 
912 	for (i = 0; i < count; i++) {
913 		struct i915_vma *vma = eb->vma[i];
914 		unsigned int flags = eb->flags[i];
915 
916 		if (!vma)
917 			break;
918 
919 		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
920 		vma->exec_flags = NULL;
921 		eb->vma[i] = NULL;
922 
923 		if (flags & __EXEC_OBJECT_HAS_PIN)
924 			__eb_unreserve_vma(vma, flags);
925 
926 		if (flags & __EXEC_OBJECT_HAS_REF)
927 			i915_vma_put(vma);
928 	}
929 }
930 
931 static void eb_reset_vmas(const struct i915_execbuffer *eb)
932 {
933 	eb_release_vmas(eb);
934 	if (eb->lut_size > 0)
935 		memset(eb->buckets, 0,
936 		       sizeof(struct hlist_head) << eb->lut_size);
937 }
938 
939 static void eb_destroy(const struct i915_execbuffer *eb)
940 {
941 	GEM_BUG_ON(eb->reloc_cache.rq);
942 
943 	if (eb->lut_size > 0)
944 		kfree(eb->buckets);
945 }
946 
947 static inline u64
948 relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
949 		  const struct i915_vma *target)
950 {
951 	return gen8_canonical_addr((int)reloc->delta + target->node.start);
952 }
953 
954 static void reloc_cache_init(struct reloc_cache *cache,
955 			     struct drm_i915_private *i915)
956 {
957 	cache->page = -1;
958 	cache->vaddr = 0;
959 	/* Must be a variable in the struct to allow GCC to unroll. */
960 	cache->gen = INTEL_GEN(i915);
961 	cache->has_llc = HAS_LLC(i915);
962 	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
963 	cache->has_fence = cache->gen < 4;
964 	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
965 	cache->node.allocated = false;
966 	cache->rq = NULL;
967 	cache->rq_size = 0;
968 }
969 
970 static inline void *unmask_page(unsigned long p)
971 {
972 	return (void *)(uintptr_t)(p & PAGE_MASK);
973 }
974 
975 static inline unsigned int unmask_flags(unsigned long p)
976 {
977 	return p & ~PAGE_MASK;
978 }
979 
980 #define KMAP 0x4 /* after CLFLUSH_FLAGS */
981 
982 static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
983 {
984 	struct drm_i915_private *i915 =
985 		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
986 	return &i915->ggtt;
987 }
988 
989 static void reloc_gpu_flush(struct reloc_cache *cache)
990 {
991 	GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
992 	cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
993 
994 	__i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
995 	i915_gem_object_unpin_map(cache->rq->batch->obj);
996 
997 	i915_gem_chipset_flush(cache->rq->i915);
998 
999 	i915_request_add(cache->rq);
1000 	cache->rq = NULL;
1001 }
1002 
1003 static void reloc_cache_reset(struct reloc_cache *cache)
1004 {
1005 	void *vaddr;
1006 
1007 	if (cache->rq)
1008 		reloc_gpu_flush(cache);
1009 
1010 	if (!cache->vaddr)
1011 		return;
1012 
1013 	vaddr = unmask_page(cache->vaddr);
1014 	if (cache->vaddr & KMAP) {
1015 		if (cache->vaddr & CLFLUSH_AFTER)
1016 			mb();
1017 
1018 		kunmap_atomic(vaddr);
1019 		i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
1020 	} else {
1021 		wmb();
1022 		io_mapping_unmap_atomic((void __iomem *)vaddr);
1023 		if (cache->node.allocated) {
1024 			struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1025 
1026 			ggtt->vm.clear_range(&ggtt->vm,
1027 					     cache->node.start,
1028 					     cache->node.size);
1029 			drm_mm_remove_node(&cache->node);
1030 		} else {
1031 			i915_vma_unpin((struct i915_vma *)cache->node.mm);
1032 		}
1033 	}
1034 
1035 	cache->vaddr = 0;
1036 	cache->page = -1;
1037 }
1038 
1039 static void *reloc_kmap(struct drm_i915_gem_object *obj,
1040 			struct reloc_cache *cache,
1041 			unsigned long page)
1042 {
1043 	void *vaddr;
1044 
1045 	if (cache->vaddr) {
1046 		kunmap_atomic(unmask_page(cache->vaddr));
1047 	} else {
1048 		unsigned int flushes;
1049 		int err;
1050 
1051 		err = i915_gem_object_prepare_write(obj, &flushes);
1052 		if (err)
1053 			return ERR_PTR(err);
1054 
1055 		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
1056 		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
1057 
1058 		cache->vaddr = flushes | KMAP;
1059 		cache->node.mm = (void *)obj;
1060 		if (flushes)
1061 			mb();
1062 	}
1063 
1064 	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
1065 	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
1066 	cache->page = page;
1067 
1068 	return vaddr;
1069 }
1070 
1071 static void *reloc_iomap(struct drm_i915_gem_object *obj,
1072 			 struct reloc_cache *cache,
1073 			 unsigned long page)
1074 {
1075 	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1076 	unsigned long offset;
1077 	void *vaddr;
1078 
1079 	if (cache->vaddr) {
1080 		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
1081 	} else {
1082 		struct i915_vma *vma;
1083 		int err;
1084 
1085 		if (use_cpu_reloc(cache, obj))
1086 			return NULL;
1087 
1088 		i915_gem_object_lock(obj);
1089 		err = i915_gem_object_set_to_gtt_domain(obj, true);
1090 		i915_gem_object_unlock(obj);
1091 		if (err)
1092 			return ERR_PTR(err);
1093 
1094 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1095 					       PIN_MAPPABLE |
1096 					       PIN_NONBLOCK |
1097 					       PIN_NONFAULT);
1098 		if (IS_ERR(vma)) {
1099 			memset(&cache->node, 0, sizeof(cache->node));
1100 			err = drm_mm_insert_node_in_range
1101 				(&ggtt->vm.mm, &cache->node,
1102 				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1103 				 0, ggtt->mappable_end,
1104 				 DRM_MM_INSERT_LOW);
1105 			if (err) /* no inactive aperture space, use cpu reloc */
1106 				return NULL;
1107 		} else {
1108 			err = i915_vma_put_fence(vma);
1109 			if (err) {
1110 				i915_vma_unpin(vma);
1111 				return ERR_PTR(err);
1112 			}
1113 
1114 			cache->node.start = vma->node.start;
1115 			cache->node.mm = (void *)vma;
1116 		}
1117 	}
1118 
1119 	offset = cache->node.start;
1120 	if (cache->node.allocated) {
1121 		wmb();
1122 		ggtt->vm.insert_page(&ggtt->vm,
1123 				     i915_gem_object_get_dma_address(obj, page),
1124 				     offset, I915_CACHE_NONE, 0);
1125 	} else {
1126 		offset += page << PAGE_SHIFT;
1127 	}
1128 
1129 	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
1130 							 offset);
1131 	cache->page = page;
1132 	cache->vaddr = (unsigned long)vaddr;
1133 
1134 	return vaddr;
1135 }
1136 
1137 static void *reloc_vaddr(struct drm_i915_gem_object *obj,
1138 			 struct reloc_cache *cache,
1139 			 unsigned long page)
1140 {
1141 	void *vaddr;
1142 
1143 	if (cache->page == page) {
1144 		vaddr = unmask_page(cache->vaddr);
1145 	} else {
1146 		vaddr = NULL;
1147 		if ((cache->vaddr & KMAP) == 0)
1148 			vaddr = reloc_iomap(obj, cache, page);
1149 		if (!vaddr)
1150 			vaddr = reloc_kmap(obj, cache, page);
1151 	}
1152 
1153 	return vaddr;
1154 }
1155 
1156 static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1157 {
1158 	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
1159 		if (flushes & CLFLUSH_BEFORE) {
1160 			clflushopt(addr);
1161 			mb();
1162 		}
1163 
1164 		*addr = value;
1165 
1166 		/*
1167 		 * Writes to the same cacheline are serialised by the CPU
1168 		 * (including clflush). On the write path, we only require
1169 		 * that it hits memory in an orderly fashion and place
1170 		 * mb barriers at the start and end of the relocation phase
1171 		 * to ensure ordering of clflush wrt to the system.
1172 		 */
1173 		if (flushes & CLFLUSH_AFTER)
1174 			clflushopt(addr);
1175 	} else
1176 		*addr = value;
1177 }
1178 
1179 static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
1180 {
1181 	struct drm_i915_gem_object *obj = vma->obj;
1182 	int err;
1183 
1184 	i915_vma_lock(vma);
1185 
1186 	if (obj->cache_dirty & ~obj->cache_coherent)
1187 		i915_gem_clflush_object(obj, 0);
1188 	obj->write_domain = 0;
1189 
1190 	err = i915_request_await_object(rq, vma->obj, true);
1191 	if (err == 0)
1192 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1193 
1194 	i915_vma_unlock(vma);
1195 
1196 	return err;
1197 }
1198 
1199 static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
1200 			     struct i915_vma *vma,
1201 			     unsigned int len)
1202 {
1203 	struct reloc_cache *cache = &eb->reloc_cache;
1204 	struct drm_i915_gem_object *obj;
1205 	struct i915_request *rq;
1206 	struct i915_vma *batch;
1207 	u32 *cmd;
1208 	int err;
1209 
1210 	obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
1211 	if (IS_ERR(obj))
1212 		return PTR_ERR(obj);
1213 
1214 	cmd = i915_gem_object_pin_map(obj,
1215 				      cache->has_llc ?
1216 				      I915_MAP_FORCE_WB :
1217 				      I915_MAP_FORCE_WC);
1218 	i915_gem_object_unpin_pages(obj);
1219 	if (IS_ERR(cmd))
1220 		return PTR_ERR(cmd);
1221 
1222 	batch = i915_vma_instance(obj, vma->vm, NULL);
1223 	if (IS_ERR(batch)) {
1224 		err = PTR_ERR(batch);
1225 		goto err_unmap;
1226 	}
1227 
1228 	err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
1229 	if (err)
1230 		goto err_unmap;
1231 
1232 	rq = i915_request_create(eb->context);
1233 	if (IS_ERR(rq)) {
1234 		err = PTR_ERR(rq);
1235 		goto err_unpin;
1236 	}
1237 
1238 	err = reloc_move_to_gpu(rq, vma);
1239 	if (err)
1240 		goto err_request;
1241 
1242 	err = eb->engine->emit_bb_start(rq,
1243 					batch->node.start, PAGE_SIZE,
1244 					cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
1245 	if (err)
1246 		goto skip_request;
1247 
1248 	i915_vma_lock(batch);
1249 	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
1250 	err = i915_vma_move_to_active(batch, rq, 0);
1251 	i915_vma_unlock(batch);
1252 	if (err)
1253 		goto skip_request;
1254 
1255 	rq->batch = batch;
1256 	i915_vma_unpin(batch);
1257 
1258 	cache->rq = rq;
1259 	cache->rq_cmd = cmd;
1260 	cache->rq_size = 0;
1261 
1262 	/* Return with batch mapping (cmd) still pinned */
1263 	return 0;
1264 
1265 skip_request:
1266 	i915_request_skip(rq, err);
1267 err_request:
1268 	i915_request_add(rq);
1269 err_unpin:
1270 	i915_vma_unpin(batch);
1271 err_unmap:
1272 	i915_gem_object_unpin_map(obj);
1273 	return err;
1274 }
1275 
1276 static u32 *reloc_gpu(struct i915_execbuffer *eb,
1277 		      struct i915_vma *vma,
1278 		      unsigned int len)
1279 {
1280 	struct reloc_cache *cache = &eb->reloc_cache;
1281 	u32 *cmd;
1282 
1283 	if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
1284 		reloc_gpu_flush(cache);
1285 
1286 	if (unlikely(!cache->rq)) {
1287 		int err;
1288 
1289 		/* If we need to copy for the cmdparser, we will stall anyway */
1290 		if (eb_use_cmdparser(eb))
1291 			return ERR_PTR(-EWOULDBLOCK);
1292 
1293 		if (!intel_engine_can_store_dword(eb->engine))
1294 			return ERR_PTR(-ENODEV);
1295 
1296 		err = __reloc_gpu_alloc(eb, vma, len);
1297 		if (unlikely(err))
1298 			return ERR_PTR(err);
1299 	}
1300 
1301 	cmd = cache->rq_cmd + cache->rq_size;
1302 	cache->rq_size += len;
1303 
1304 	return cmd;
1305 }
1306 
1307 static u64
1308 relocate_entry(struct i915_vma *vma,
1309 	       const struct drm_i915_gem_relocation_entry *reloc,
1310 	       struct i915_execbuffer *eb,
1311 	       const struct i915_vma *target)
1312 {
1313 	u64 offset = reloc->offset;
1314 	u64 target_offset = relocation_target(reloc, target);
1315 	bool wide = eb->reloc_cache.use_64bit_reloc;
1316 	void *vaddr;
1317 
1318 	if (!eb->reloc_cache.vaddr &&
1319 	    (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1320 	     !reservation_object_test_signaled_rcu(vma->resv, true))) {
1321 		const unsigned int gen = eb->reloc_cache.gen;
1322 		unsigned int len;
1323 		u32 *batch;
1324 		u64 addr;
1325 
1326 		if (wide)
1327 			len = offset & 7 ? 8 : 5;
1328 		else if (gen >= 4)
1329 			len = 4;
1330 		else
1331 			len = 3;
1332 
1333 		batch = reloc_gpu(eb, vma, len);
1334 		if (IS_ERR(batch))
1335 			goto repeat;
1336 
1337 		addr = gen8_canonical_addr(vma->node.start + offset);
1338 		if (wide) {
1339 			if (offset & 7) {
1340 				*batch++ = MI_STORE_DWORD_IMM_GEN4;
1341 				*batch++ = lower_32_bits(addr);
1342 				*batch++ = upper_32_bits(addr);
1343 				*batch++ = lower_32_bits(target_offset);
1344 
1345 				addr = gen8_canonical_addr(addr + 4);
1346 
1347 				*batch++ = MI_STORE_DWORD_IMM_GEN4;
1348 				*batch++ = lower_32_bits(addr);
1349 				*batch++ = upper_32_bits(addr);
1350 				*batch++ = upper_32_bits(target_offset);
1351 			} else {
1352 				*batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
1353 				*batch++ = lower_32_bits(addr);
1354 				*batch++ = upper_32_bits(addr);
1355 				*batch++ = lower_32_bits(target_offset);
1356 				*batch++ = upper_32_bits(target_offset);
1357 			}
1358 		} else if (gen >= 6) {
1359 			*batch++ = MI_STORE_DWORD_IMM_GEN4;
1360 			*batch++ = 0;
1361 			*batch++ = addr;
1362 			*batch++ = target_offset;
1363 		} else if (gen >= 4) {
1364 			*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1365 			*batch++ = 0;
1366 			*batch++ = addr;
1367 			*batch++ = target_offset;
1368 		} else {
1369 			*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1370 			*batch++ = addr;
1371 			*batch++ = target_offset;
1372 		}
1373 
1374 		goto out;
1375 	}
1376 
1377 repeat:
1378 	vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
1379 	if (IS_ERR(vaddr))
1380 		return PTR_ERR(vaddr);
1381 
1382 	clflush_write32(vaddr + offset_in_page(offset),
1383 			lower_32_bits(target_offset),
1384 			eb->reloc_cache.vaddr);
1385 
1386 	if (wide) {
1387 		offset += sizeof(u32);
1388 		target_offset >>= 32;
1389 		wide = false;
1390 		goto repeat;
1391 	}
1392 
1393 out:
1394 	return target->node.start | UPDATE;
1395 }
1396 
1397 static u64
1398 eb_relocate_entry(struct i915_execbuffer *eb,
1399 		  struct i915_vma *vma,
1400 		  const struct drm_i915_gem_relocation_entry *reloc)
1401 {
1402 	struct i915_vma *target;
1403 	int err;
1404 
1405 	/* we've already hold a reference to all valid objects */
1406 	target = eb_get_vma(eb, reloc->target_handle);
1407 	if (unlikely(!target))
1408 		return -ENOENT;
1409 
1410 	/* Validate that the target is in a valid r/w GPU domain */
1411 	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1412 		DRM_DEBUG("reloc with multiple write domains: "
1413 			  "target %d offset %d "
1414 			  "read %08x write %08x",
1415 			  reloc->target_handle,
1416 			  (int) reloc->offset,
1417 			  reloc->read_domains,
1418 			  reloc->write_domain);
1419 		return -EINVAL;
1420 	}
1421 	if (unlikely((reloc->write_domain | reloc->read_domains)
1422 		     & ~I915_GEM_GPU_DOMAINS)) {
1423 		DRM_DEBUG("reloc with read/write non-GPU domains: "
1424 			  "target %d offset %d "
1425 			  "read %08x write %08x",
1426 			  reloc->target_handle,
1427 			  (int) reloc->offset,
1428 			  reloc->read_domains,
1429 			  reloc->write_domain);
1430 		return -EINVAL;
1431 	}
1432 
1433 	if (reloc->write_domain) {
1434 		*target->exec_flags |= EXEC_OBJECT_WRITE;
1435 
1436 		/*
1437 		 * Sandybridge PPGTT errata: We need a global gtt mapping
1438 		 * for MI and pipe_control writes because the gpu doesn't
1439 		 * properly redirect them through the ppgtt for non_secure
1440 		 * batchbuffers.
1441 		 */
1442 		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1443 		    IS_GEN(eb->i915, 6)) {
1444 			err = i915_vma_bind(target, target->obj->cache_level,
1445 					    PIN_GLOBAL);
1446 			if (WARN_ONCE(err,
1447 				      "Unexpected failure to bind target VMA!"))
1448 				return err;
1449 		}
1450 	}
1451 
1452 	/*
1453 	 * If the relocation already has the right value in it, no
1454 	 * more work needs to be done.
1455 	 */
1456 	if (!DBG_FORCE_RELOC &&
1457 	    gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
1458 		return 0;
1459 
1460 	/* Check that the relocation address is valid... */
1461 	if (unlikely(reloc->offset >
1462 		     vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1463 		DRM_DEBUG("Relocation beyond object bounds: "
1464 			  "target %d offset %d size %d.\n",
1465 			  reloc->target_handle,
1466 			  (int)reloc->offset,
1467 			  (int)vma->size);
1468 		return -EINVAL;
1469 	}
1470 	if (unlikely(reloc->offset & 3)) {
1471 		DRM_DEBUG("Relocation not 4-byte aligned: "
1472 			  "target %d offset %d.\n",
1473 			  reloc->target_handle,
1474 			  (int)reloc->offset);
1475 		return -EINVAL;
1476 	}
1477 
1478 	/*
1479 	 * If we write into the object, we need to force the synchronisation
1480 	 * barrier, either with an asynchronous clflush or if we executed the
1481 	 * patching using the GPU (though that should be serialised by the
1482 	 * timeline). To be completely sure, and since we are required to
1483 	 * do relocations we are already stalling, disable the user's opt
1484 	 * out of our synchronisation.
1485 	 */
1486 	*vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
1487 
1488 	/* and update the user's relocation entry */
1489 	return relocate_entry(vma, reloc, eb, target);
1490 }
1491 
1492 static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
1493 {
1494 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1495 	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
1496 	struct drm_i915_gem_relocation_entry __user *urelocs;
1497 	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1498 	unsigned int remain;
1499 
1500 	urelocs = u64_to_user_ptr(entry->relocs_ptr);
1501 	remain = entry->relocation_count;
1502 	if (unlikely(remain > N_RELOC(ULONG_MAX)))
1503 		return -EINVAL;
1504 
1505 	/*
1506 	 * We must check that the entire relocation array is safe
1507 	 * to read. However, if the array is not writable the user loses
1508 	 * the updated relocation values.
1509 	 */
1510 	if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
1511 		return -EFAULT;
1512 
1513 	do {
1514 		struct drm_i915_gem_relocation_entry *r = stack;
1515 		unsigned int count =
1516 			min_t(unsigned int, remain, ARRAY_SIZE(stack));
1517 		unsigned int copied;
1518 
1519 		/*
1520 		 * This is the fast path and we cannot handle a pagefault
1521 		 * whilst holding the struct mutex lest the user pass in the
1522 		 * relocations contained within a mmaped bo. For in such a case
1523 		 * we, the page fault handler would call i915_gem_fault() and
1524 		 * we would try to acquire the struct mutex again. Obviously
1525 		 * this is bad and so lockdep complains vehemently.
1526 		 */
1527 		pagefault_disable();
1528 		copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
1529 		pagefault_enable();
1530 		if (unlikely(copied)) {
1531 			remain = -EFAULT;
1532 			goto out;
1533 		}
1534 
1535 		remain -= count;
1536 		do {
1537 			u64 offset = eb_relocate_entry(eb, vma, r);
1538 
1539 			if (likely(offset == 0)) {
1540 			} else if ((s64)offset < 0) {
1541 				remain = (int)offset;
1542 				goto out;
1543 			} else {
1544 				/*
1545 				 * Note that reporting an error now
1546 				 * leaves everything in an inconsistent
1547 				 * state as we have *already* changed
1548 				 * the relocation value inside the
1549 				 * object. As we have not changed the
1550 				 * reloc.presumed_offset or will not
1551 				 * change the execobject.offset, on the
1552 				 * call we may not rewrite the value
1553 				 * inside the object, leaving it
1554 				 * dangling and causing a GPU hang. Unless
1555 				 * userspace dynamically rebuilds the
1556 				 * relocations on each execbuf rather than
1557 				 * presume a static tree.
1558 				 *
1559 				 * We did previously check if the relocations
1560 				 * were writable (access_ok), an error now
1561 				 * would be a strange race with mprotect,
1562 				 * having already demonstrated that we
1563 				 * can read from this userspace address.
1564 				 */
1565 				offset = gen8_canonical_addr(offset & ~UPDATE);
1566 				if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) {
1567 					remain = -EFAULT;
1568 					goto out;
1569 				}
1570 			}
1571 		} while (r++, --count);
1572 		urelocs += ARRAY_SIZE(stack);
1573 	} while (remain);
1574 out:
1575 	reloc_cache_reset(&eb->reloc_cache);
1576 	return remain;
1577 }
1578 
1579 static int
1580 eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
1581 {
1582 	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1583 	struct drm_i915_gem_relocation_entry *relocs =
1584 		u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
1585 	unsigned int i;
1586 	int err;
1587 
1588 	for (i = 0; i < entry->relocation_count; i++) {
1589 		u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
1590 
1591 		if ((s64)offset < 0) {
1592 			err = (int)offset;
1593 			goto err;
1594 		}
1595 	}
1596 	err = 0;
1597 err:
1598 	reloc_cache_reset(&eb->reloc_cache);
1599 	return err;
1600 }
1601 
1602 static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1603 {
1604 	const char __user *addr, *end;
1605 	unsigned long size;
1606 	char __maybe_unused c;
1607 
1608 	size = entry->relocation_count;
1609 	if (size == 0)
1610 		return 0;
1611 
1612 	if (size > N_RELOC(ULONG_MAX))
1613 		return -EINVAL;
1614 
1615 	addr = u64_to_user_ptr(entry->relocs_ptr);
1616 	size *= sizeof(struct drm_i915_gem_relocation_entry);
1617 	if (!access_ok(addr, size))
1618 		return -EFAULT;
1619 
1620 	end = addr + size;
1621 	for (; addr < end; addr += PAGE_SIZE) {
1622 		int err = __get_user(c, addr);
1623 		if (err)
1624 			return err;
1625 	}
1626 	return __get_user(c, end - 1);
1627 }
1628 
1629 static int eb_copy_relocations(const struct i915_execbuffer *eb)
1630 {
1631 	const unsigned int count = eb->buffer_count;
1632 	unsigned int i;
1633 	int err;
1634 
1635 	for (i = 0; i < count; i++) {
1636 		const unsigned int nreloc = eb->exec[i].relocation_count;
1637 		struct drm_i915_gem_relocation_entry __user *urelocs;
1638 		struct drm_i915_gem_relocation_entry *relocs;
1639 		unsigned long size;
1640 		unsigned long copied;
1641 
1642 		if (nreloc == 0)
1643 			continue;
1644 
1645 		err = check_relocations(&eb->exec[i]);
1646 		if (err)
1647 			goto err;
1648 
1649 		urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
1650 		size = nreloc * sizeof(*relocs);
1651 
1652 		relocs = kvmalloc_array(size, 1, GFP_KERNEL);
1653 		if (!relocs) {
1654 			err = -ENOMEM;
1655 			goto err;
1656 		}
1657 
1658 		/* copy_from_user is limited to < 4GiB */
1659 		copied = 0;
1660 		do {
1661 			unsigned int len =
1662 				min_t(u64, BIT_ULL(31), size - copied);
1663 
1664 			if (__copy_from_user((char *)relocs + copied,
1665 					     (char __user *)urelocs + copied,
1666 					     len)) {
1667 end_user:
1668 				user_access_end();
1669 end:
1670 				kvfree(relocs);
1671 				err = -EFAULT;
1672 				goto err;
1673 			}
1674 
1675 			copied += len;
1676 		} while (copied < size);
1677 
1678 		/*
1679 		 * As we do not update the known relocation offsets after
1680 		 * relocating (due to the complexities in lock handling),
1681 		 * we need to mark them as invalid now so that we force the
1682 		 * relocation processing next time. Just in case the target
1683 		 * object is evicted and then rebound into its old
1684 		 * presumed_offset before the next execbuffer - if that
1685 		 * happened we would make the mistake of assuming that the
1686 		 * relocations were valid.
1687 		 */
1688 		if (!user_access_begin(urelocs, size))
1689 			goto end;
1690 
1691 		for (copied = 0; copied < nreloc; copied++)
1692 			unsafe_put_user(-1,
1693 					&urelocs[copied].presumed_offset,
1694 					end_user);
1695 		user_access_end();
1696 
1697 		eb->exec[i].relocs_ptr = (uintptr_t)relocs;
1698 	}
1699 
1700 	return 0;
1701 
1702 err:
1703 	while (i--) {
1704 		struct drm_i915_gem_relocation_entry *relocs =
1705 			u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
1706 		if (eb->exec[i].relocation_count)
1707 			kvfree(relocs);
1708 	}
1709 	return err;
1710 }
1711 
1712 static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1713 {
1714 	const unsigned int count = eb->buffer_count;
1715 	unsigned int i;
1716 
1717 	if (unlikely(i915_modparams.prefault_disable))
1718 		return 0;
1719 
1720 	for (i = 0; i < count; i++) {
1721 		int err;
1722 
1723 		err = check_relocations(&eb->exec[i]);
1724 		if (err)
1725 			return err;
1726 	}
1727 
1728 	return 0;
1729 }
1730 
1731 static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
1732 {
1733 	struct drm_device *dev = &eb->i915->drm;
1734 	bool have_copy = false;
1735 	struct i915_vma *vma;
1736 	int err = 0;
1737 
1738 repeat:
1739 	if (signal_pending(current)) {
1740 		err = -ERESTARTSYS;
1741 		goto out;
1742 	}
1743 
1744 	/* We may process another execbuffer during the unlock... */
1745 	eb_reset_vmas(eb);
1746 	mutex_unlock(&dev->struct_mutex);
1747 
1748 	/*
1749 	 * We take 3 passes through the slowpatch.
1750 	 *
1751 	 * 1 - we try to just prefault all the user relocation entries and
1752 	 * then attempt to reuse the atomic pagefault disabled fast path again.
1753 	 *
1754 	 * 2 - we copy the user entries to a local buffer here outside of the
1755 	 * local and allow ourselves to wait upon any rendering before
1756 	 * relocations
1757 	 *
1758 	 * 3 - we already have a local copy of the relocation entries, but
1759 	 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
1760 	 */
1761 	if (!err) {
1762 		err = eb_prefault_relocations(eb);
1763 	} else if (!have_copy) {
1764 		err = eb_copy_relocations(eb);
1765 		have_copy = err == 0;
1766 	} else {
1767 		cond_resched();
1768 		err = 0;
1769 	}
1770 	if (err) {
1771 		mutex_lock(&dev->struct_mutex);
1772 		goto out;
1773 	}
1774 
1775 	/* A frequent cause for EAGAIN are currently unavailable client pages */
1776 	flush_workqueue(eb->i915->mm.userptr_wq);
1777 
1778 	err = i915_mutex_lock_interruptible(dev);
1779 	if (err) {
1780 		mutex_lock(&dev->struct_mutex);
1781 		goto out;
1782 	}
1783 
1784 	/* reacquire the objects */
1785 	err = eb_lookup_vmas(eb);
1786 	if (err)
1787 		goto err;
1788 
1789 	GEM_BUG_ON(!eb->batch);
1790 
1791 	list_for_each_entry(vma, &eb->relocs, reloc_link) {
1792 		if (!have_copy) {
1793 			pagefault_disable();
1794 			err = eb_relocate_vma(eb, vma);
1795 			pagefault_enable();
1796 			if (err)
1797 				goto repeat;
1798 		} else {
1799 			err = eb_relocate_vma_slow(eb, vma);
1800 			if (err)
1801 				goto err;
1802 		}
1803 	}
1804 
1805 	/*
1806 	 * Leave the user relocations as are, this is the painfully slow path,
1807 	 * and we want to avoid the complication of dropping the lock whilst
1808 	 * having buffers reserved in the aperture and so causing spurious
1809 	 * ENOSPC for random operations.
1810 	 */
1811 
1812 err:
1813 	if (err == -EAGAIN)
1814 		goto repeat;
1815 
1816 out:
1817 	if (have_copy) {
1818 		const unsigned int count = eb->buffer_count;
1819 		unsigned int i;
1820 
1821 		for (i = 0; i < count; i++) {
1822 			const struct drm_i915_gem_exec_object2 *entry =
1823 				&eb->exec[i];
1824 			struct drm_i915_gem_relocation_entry *relocs;
1825 
1826 			if (!entry->relocation_count)
1827 				continue;
1828 
1829 			relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
1830 			kvfree(relocs);
1831 		}
1832 	}
1833 
1834 	return err;
1835 }
1836 
1837 static int eb_relocate(struct i915_execbuffer *eb)
1838 {
1839 	if (eb_lookup_vmas(eb))
1840 		goto slow;
1841 
1842 	/* The objects are in their final locations, apply the relocations. */
1843 	if (eb->args->flags & __EXEC_HAS_RELOC) {
1844 		struct i915_vma *vma;
1845 
1846 		list_for_each_entry(vma, &eb->relocs, reloc_link) {
1847 			if (eb_relocate_vma(eb, vma))
1848 				goto slow;
1849 		}
1850 	}
1851 
1852 	return 0;
1853 
1854 slow:
1855 	return eb_relocate_slow(eb);
1856 }
1857 
1858 static int eb_move_to_gpu(struct i915_execbuffer *eb)
1859 {
1860 	const unsigned int count = eb->buffer_count;
1861 	struct ww_acquire_ctx acquire;
1862 	unsigned int i;
1863 	int err = 0;
1864 
1865 	ww_acquire_init(&acquire, &reservation_ww_class);
1866 
1867 	for (i = 0; i < count; i++) {
1868 		struct i915_vma *vma = eb->vma[i];
1869 
1870 		err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
1871 		if (!err)
1872 			continue;
1873 
1874 		GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */
1875 
1876 		if (err == -EDEADLK) {
1877 			GEM_BUG_ON(i == 0);
1878 			do {
1879 				int j = i - 1;
1880 
1881 				ww_mutex_unlock(&eb->vma[j]->resv->lock);
1882 
1883 				swap(eb->flags[i], eb->flags[j]);
1884 				swap(eb->vma[i],  eb->vma[j]);
1885 				eb->vma[i]->exec_flags = &eb->flags[i];
1886 			} while (--i);
1887 			GEM_BUG_ON(vma != eb->vma[0]);
1888 			vma->exec_flags = &eb->flags[0];
1889 
1890 			err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
1891 							       &acquire);
1892 		}
1893 		if (err)
1894 			break;
1895 	}
1896 	ww_acquire_done(&acquire);
1897 
1898 	while (i--) {
1899 		unsigned int flags = eb->flags[i];
1900 		struct i915_vma *vma = eb->vma[i];
1901 		struct drm_i915_gem_object *obj = vma->obj;
1902 
1903 		assert_vma_held(vma);
1904 
1905 		if (flags & EXEC_OBJECT_CAPTURE) {
1906 			struct i915_capture_list *capture;
1907 
1908 			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
1909 			if (capture) {
1910 				capture->next = eb->request->capture_list;
1911 				capture->vma = vma;
1912 				eb->request->capture_list = capture;
1913 			}
1914 		}
1915 
1916 		/*
1917 		 * If the GPU is not _reading_ through the CPU cache, we need
1918 		 * to make sure that any writes (both previous GPU writes from
1919 		 * before a change in snooping levels and normal CPU writes)
1920 		 * caught in that cache are flushed to main memory.
1921 		 *
1922 		 * We want to say
1923 		 *   obj->cache_dirty &&
1924 		 *   !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
1925 		 * but gcc's optimiser doesn't handle that as well and emits
1926 		 * two jumps instead of one. Maybe one day...
1927 		 */
1928 		if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
1929 			if (i915_gem_clflush_object(obj, 0))
1930 				flags &= ~EXEC_OBJECT_ASYNC;
1931 		}
1932 
1933 		if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
1934 			err = i915_request_await_object
1935 				(eb->request, obj, flags & EXEC_OBJECT_WRITE);
1936 		}
1937 
1938 		if (err == 0)
1939 			err = i915_vma_move_to_active(vma, eb->request, flags);
1940 
1941 		i915_vma_unlock(vma);
1942 
1943 		__eb_unreserve_vma(vma, flags);
1944 		vma->exec_flags = NULL;
1945 
1946 		if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
1947 			i915_vma_put(vma);
1948 	}
1949 	ww_acquire_fini(&acquire);
1950 
1951 	if (unlikely(err))
1952 		goto err_skip;
1953 
1954 	eb->exec = NULL;
1955 
1956 	/* Unconditionally flush any chipset caches (for streaming writes). */
1957 	i915_gem_chipset_flush(eb->i915);
1958 	return 0;
1959 
1960 err_skip:
1961 	i915_request_skip(eb->request, err);
1962 	return err;
1963 }
1964 
1965 static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1966 {
1967 	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1968 		return false;
1969 
1970 	/* Kernel clipping was a DRI1 misfeature */
1971 	if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
1972 		if (exec->num_cliprects || exec->cliprects_ptr)
1973 			return false;
1974 	}
1975 
1976 	if (exec->DR4 == 0xffffffff) {
1977 		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1978 		exec->DR4 = 0;
1979 	}
1980 	if (exec->DR1 || exec->DR4)
1981 		return false;
1982 
1983 	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1984 		return false;
1985 
1986 	return true;
1987 }
1988 
1989 static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
1990 {
1991 	u32 *cs;
1992 	int i;
1993 
1994 	if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
1995 		DRM_DEBUG("sol reset is gen7/rcs only\n");
1996 		return -EINVAL;
1997 	}
1998 
1999 	cs = intel_ring_begin(rq, 4 * 2 + 2);
2000 	if (IS_ERR(cs))
2001 		return PTR_ERR(cs);
2002 
2003 	*cs++ = MI_LOAD_REGISTER_IMM(4);
2004 	for (i = 0; i < 4; i++) {
2005 		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
2006 		*cs++ = 0;
2007 	}
2008 	*cs++ = MI_NOOP;
2009 	intel_ring_advance(rq, cs);
2010 
2011 	return 0;
2012 }
2013 
2014 static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
2015 {
2016 	struct drm_i915_gem_object *shadow_batch_obj;
2017 	struct i915_vma *vma;
2018 	int err;
2019 
2020 	shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
2021 						   PAGE_ALIGN(eb->batch_len));
2022 	if (IS_ERR(shadow_batch_obj))
2023 		return ERR_CAST(shadow_batch_obj);
2024 
2025 	err = intel_engine_cmd_parser(eb->engine,
2026 				      eb->batch->obj,
2027 				      shadow_batch_obj,
2028 				      eb->batch_start_offset,
2029 				      eb->batch_len,
2030 				      is_master);
2031 	if (err) {
2032 		if (err == -EACCES) /* unhandled chained batch */
2033 			vma = NULL;
2034 		else
2035 			vma = ERR_PTR(err);
2036 		goto out;
2037 	}
2038 
2039 	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
2040 	if (IS_ERR(vma))
2041 		goto out;
2042 
2043 	eb->vma[eb->buffer_count] = i915_vma_get(vma);
2044 	eb->flags[eb->buffer_count] =
2045 		__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
2046 	vma->exec_flags = &eb->flags[eb->buffer_count];
2047 	eb->buffer_count++;
2048 
2049 out:
2050 	i915_gem_object_unpin_pages(shadow_batch_obj);
2051 	return vma;
2052 }
2053 
2054 static void
2055 add_to_client(struct i915_request *rq, struct drm_file *file)
2056 {
2057 	rq->file_priv = file->driver_priv;
2058 	list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
2059 }
2060 
2061 static int eb_submit(struct i915_execbuffer *eb)
2062 {
2063 	int err;
2064 
2065 	err = eb_move_to_gpu(eb);
2066 	if (err)
2067 		return err;
2068 
2069 	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
2070 		err = i915_reset_gen7_sol_offsets(eb->request);
2071 		if (err)
2072 			return err;
2073 	}
2074 
2075 	/*
2076 	 * After we completed waiting for other engines (using HW semaphores)
2077 	 * then we can signal that this request/batch is ready to run. This
2078 	 * allows us to determine if the batch is still waiting on the GPU
2079 	 * or actually running by checking the breadcrumb.
2080 	 */
2081 	if (eb->engine->emit_init_breadcrumb) {
2082 		err = eb->engine->emit_init_breadcrumb(eb->request);
2083 		if (err)
2084 			return err;
2085 	}
2086 
2087 	err = eb->engine->emit_bb_start(eb->request,
2088 					eb->batch->node.start +
2089 					eb->batch_start_offset,
2090 					eb->batch_len,
2091 					eb->batch_flags);
2092 	if (err)
2093 		return err;
2094 
2095 	return 0;
2096 }
2097 
2098 /*
2099  * Find one BSD ring to dispatch the corresponding BSD command.
2100  * The engine index is returned.
2101  */
2102 static unsigned int
2103 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
2104 			 struct drm_file *file)
2105 {
2106 	struct drm_i915_file_private *file_priv = file->driver_priv;
2107 
2108 	/* Check whether the file_priv has already selected one ring. */
2109 	if ((int)file_priv->bsd_engine < 0)
2110 		file_priv->bsd_engine = atomic_fetch_xor(1,
2111 			 &dev_priv->mm.bsd_engine_dispatch_index);
2112 
2113 	return file_priv->bsd_engine;
2114 }
2115 
2116 static const enum intel_engine_id user_ring_map[] = {
2117 	[I915_EXEC_DEFAULT]	= RCS0,
2118 	[I915_EXEC_RENDER]	= RCS0,
2119 	[I915_EXEC_BLT]		= BCS0,
2120 	[I915_EXEC_BSD]		= VCS0,
2121 	[I915_EXEC_VEBOX]	= VECS0
2122 };
2123 
2124 static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
2125 {
2126 	int err;
2127 
2128 	/*
2129 	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
2130 	 * EIO if the GPU is already wedged.
2131 	 */
2132 	err = i915_terminally_wedged(eb->i915);
2133 	if (err)
2134 		return err;
2135 
2136 	/*
2137 	 * Pinning the contexts may generate requests in order to acquire
2138 	 * GGTT space, so do this first before we reserve a seqno for
2139 	 * ourselves.
2140 	 */
2141 	err = intel_context_pin(ce);
2142 	if (err)
2143 		return err;
2144 
2145 	eb->engine = ce->engine;
2146 	eb->context = ce;
2147 	return 0;
2148 }
2149 
2150 static void eb_unpin_context(struct i915_execbuffer *eb)
2151 {
2152 	intel_context_unpin(eb->context);
2153 }
2154 
2155 static unsigned int
2156 eb_select_legacy_ring(struct i915_execbuffer *eb,
2157 		      struct drm_file *file,
2158 		      struct drm_i915_gem_execbuffer2 *args)
2159 {
2160 	struct drm_i915_private *i915 = eb->i915;
2161 	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
2162 
2163 	if (user_ring_id != I915_EXEC_BSD &&
2164 	    (args->flags & I915_EXEC_BSD_MASK)) {
2165 		DRM_DEBUG("execbuf with non bsd ring but with invalid "
2166 			  "bsd dispatch flags: %d\n", (int)(args->flags));
2167 		return -1;
2168 	}
2169 
2170 	if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(i915, VCS1)) {
2171 		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
2172 
2173 		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2174 			bsd_idx = gen8_dispatch_bsd_engine(i915, file);
2175 		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
2176 			   bsd_idx <= I915_EXEC_BSD_RING2) {
2177 			bsd_idx >>= I915_EXEC_BSD_SHIFT;
2178 			bsd_idx--;
2179 		} else {
2180 			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
2181 				  bsd_idx);
2182 			return -1;
2183 		}
2184 
2185 		return _VCS(bsd_idx);
2186 	}
2187 
2188 	if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
2189 		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
2190 		return -1;
2191 	}
2192 
2193 	return user_ring_map[user_ring_id];
2194 }
2195 
2196 static int
2197 eb_select_engine(struct i915_execbuffer *eb,
2198 		 struct drm_file *file,
2199 		 struct drm_i915_gem_execbuffer2 *args)
2200 {
2201 	struct intel_context *ce;
2202 	unsigned int idx;
2203 	int err;
2204 
2205 	if (i915_gem_context_user_engines(eb->gem_context))
2206 		idx = args->flags & I915_EXEC_RING_MASK;
2207 	else
2208 		idx = eb_select_legacy_ring(eb, file, args);
2209 
2210 	ce = i915_gem_context_get_engine(eb->gem_context, idx);
2211 	if (IS_ERR(ce))
2212 		return PTR_ERR(ce);
2213 
2214 	err = eb_pin_context(eb, ce);
2215 	intel_context_put(ce);
2216 
2217 	return err;
2218 }
2219 
2220 static void
2221 __free_fence_array(struct drm_syncobj **fences, unsigned int n)
2222 {
2223 	while (n--)
2224 		drm_syncobj_put(ptr_mask_bits(fences[n], 2));
2225 	kvfree(fences);
2226 }
2227 
2228 static struct drm_syncobj **
2229 get_fence_array(struct drm_i915_gem_execbuffer2 *args,
2230 		struct drm_file *file)
2231 {
2232 	const unsigned long nfences = args->num_cliprects;
2233 	struct drm_i915_gem_exec_fence __user *user;
2234 	struct drm_syncobj **fences;
2235 	unsigned long n;
2236 	int err;
2237 
2238 	if (!(args->flags & I915_EXEC_FENCE_ARRAY))
2239 		return NULL;
2240 
2241 	/* Check multiplication overflow for access_ok() and kvmalloc_array() */
2242 	BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
2243 	if (nfences > min_t(unsigned long,
2244 			    ULONG_MAX / sizeof(*user),
2245 			    SIZE_MAX / sizeof(*fences)))
2246 		return ERR_PTR(-EINVAL);
2247 
2248 	user = u64_to_user_ptr(args->cliprects_ptr);
2249 	if (!access_ok(user, nfences * sizeof(*user)))
2250 		return ERR_PTR(-EFAULT);
2251 
2252 	fences = kvmalloc_array(nfences, sizeof(*fences),
2253 				__GFP_NOWARN | GFP_KERNEL);
2254 	if (!fences)
2255 		return ERR_PTR(-ENOMEM);
2256 
2257 	for (n = 0; n < nfences; n++) {
2258 		struct drm_i915_gem_exec_fence fence;
2259 		struct drm_syncobj *syncobj;
2260 
2261 		if (__copy_from_user(&fence, user++, sizeof(fence))) {
2262 			err = -EFAULT;
2263 			goto err;
2264 		}
2265 
2266 		if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
2267 			err = -EINVAL;
2268 			goto err;
2269 		}
2270 
2271 		syncobj = drm_syncobj_find(file, fence.handle);
2272 		if (!syncobj) {
2273 			DRM_DEBUG("Invalid syncobj handle provided\n");
2274 			err = -ENOENT;
2275 			goto err;
2276 		}
2277 
2278 		BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
2279 			     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
2280 
2281 		fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
2282 	}
2283 
2284 	return fences;
2285 
2286 err:
2287 	__free_fence_array(fences, n);
2288 	return ERR_PTR(err);
2289 }
2290 
2291 static void
2292 put_fence_array(struct drm_i915_gem_execbuffer2 *args,
2293 		struct drm_syncobj **fences)
2294 {
2295 	if (fences)
2296 		__free_fence_array(fences, args->num_cliprects);
2297 }
2298 
2299 static int
2300 await_fence_array(struct i915_execbuffer *eb,
2301 		  struct drm_syncobj **fences)
2302 {
2303 	const unsigned int nfences = eb->args->num_cliprects;
2304 	unsigned int n;
2305 	int err;
2306 
2307 	for (n = 0; n < nfences; n++) {
2308 		struct drm_syncobj *syncobj;
2309 		struct dma_fence *fence;
2310 		unsigned int flags;
2311 
2312 		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
2313 		if (!(flags & I915_EXEC_FENCE_WAIT))
2314 			continue;
2315 
2316 		fence = drm_syncobj_fence_get(syncobj);
2317 		if (!fence)
2318 			return -EINVAL;
2319 
2320 		err = i915_request_await_dma_fence(eb->request, fence);
2321 		dma_fence_put(fence);
2322 		if (err < 0)
2323 			return err;
2324 	}
2325 
2326 	return 0;
2327 }
2328 
2329 static void
2330 signal_fence_array(struct i915_execbuffer *eb,
2331 		   struct drm_syncobj **fences)
2332 {
2333 	const unsigned int nfences = eb->args->num_cliprects;
2334 	struct dma_fence * const fence = &eb->request->fence;
2335 	unsigned int n;
2336 
2337 	for (n = 0; n < nfences; n++) {
2338 		struct drm_syncobj *syncobj;
2339 		unsigned int flags;
2340 
2341 		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
2342 		if (!(flags & I915_EXEC_FENCE_SIGNAL))
2343 			continue;
2344 
2345 		drm_syncobj_replace_fence(syncobj, fence);
2346 	}
2347 }
2348 
2349 static int
2350 i915_gem_do_execbuffer(struct drm_device *dev,
2351 		       struct drm_file *file,
2352 		       struct drm_i915_gem_execbuffer2 *args,
2353 		       struct drm_i915_gem_exec_object2 *exec,
2354 		       struct drm_syncobj **fences)
2355 {
2356 	struct i915_execbuffer eb;
2357 	struct dma_fence *in_fence = NULL;
2358 	struct dma_fence *exec_fence = NULL;
2359 	struct sync_file *out_fence = NULL;
2360 	int out_fence_fd = -1;
2361 	int err;
2362 
2363 	BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
2364 	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
2365 		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2366 
2367 	eb.i915 = to_i915(dev);
2368 	eb.file = file;
2369 	eb.args = args;
2370 	if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
2371 		args->flags |= __EXEC_HAS_RELOC;
2372 
2373 	eb.exec = exec;
2374 	eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
2375 	eb.vma[0] = NULL;
2376 	eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);
2377 
2378 	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
2379 	reloc_cache_init(&eb.reloc_cache, eb.i915);
2380 
2381 	eb.buffer_count = args->buffer_count;
2382 	eb.batch_start_offset = args->batch_start_offset;
2383 	eb.batch_len = args->batch_len;
2384 
2385 	eb.batch_flags = 0;
2386 	if (args->flags & I915_EXEC_SECURE) {
2387 		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
2388 		    return -EPERM;
2389 
2390 		eb.batch_flags |= I915_DISPATCH_SECURE;
2391 	}
2392 	if (args->flags & I915_EXEC_IS_PINNED)
2393 		eb.batch_flags |= I915_DISPATCH_PINNED;
2394 
2395 	if (args->flags & I915_EXEC_FENCE_IN) {
2396 		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2397 		if (!in_fence)
2398 			return -EINVAL;
2399 	}
2400 
2401 	if (args->flags & I915_EXEC_FENCE_SUBMIT) {
2402 		if (in_fence) {
2403 			err = -EINVAL;
2404 			goto err_in_fence;
2405 		}
2406 
2407 		exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2408 		if (!exec_fence) {
2409 			err = -EINVAL;
2410 			goto err_in_fence;
2411 		}
2412 	}
2413 
2414 	if (args->flags & I915_EXEC_FENCE_OUT) {
2415 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
2416 		if (out_fence_fd < 0) {
2417 			err = out_fence_fd;
2418 			goto err_exec_fence;
2419 		}
2420 	}
2421 
2422 	err = eb_create(&eb);
2423 	if (err)
2424 		goto err_out_fence;
2425 
2426 	GEM_BUG_ON(!eb.lut_size);
2427 
2428 	err = eb_select_context(&eb);
2429 	if (unlikely(err))
2430 		goto err_destroy;
2431 
2432 	/*
2433 	 * Take a local wakeref for preparing to dispatch the execbuf as
2434 	 * we expect to access the hardware fairly frequently in the
2435 	 * process. Upon first dispatch, we acquire another prolonged
2436 	 * wakeref that we hold until the GPU has been idle for at least
2437 	 * 100ms.
2438 	 */
2439 	intel_gt_pm_get(eb.i915);
2440 
2441 	err = i915_mutex_lock_interruptible(dev);
2442 	if (err)
2443 		goto err_rpm;
2444 
2445 	err = eb_select_engine(&eb, file, args);
2446 	if (unlikely(err))
2447 		goto err_unlock;
2448 
2449 	err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
2450 	if (unlikely(err))
2451 		goto err_engine;
2452 
2453 	err = eb_relocate(&eb);
2454 	if (err) {
2455 		/*
2456 		 * If the user expects the execobject.offset and
2457 		 * reloc.presumed_offset to be an exact match,
2458 		 * as for using NO_RELOC, then we cannot update
2459 		 * the execobject.offset until we have completed
2460 		 * relocation.
2461 		 */
2462 		args->flags &= ~__EXEC_HAS_RELOC;
2463 		goto err_vma;
2464 	}
2465 
2466 	if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
2467 		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
2468 		err = -EINVAL;
2469 		goto err_vma;
2470 	}
2471 	if (eb.batch_start_offset > eb.batch->size ||
2472 	    eb.batch_len > eb.batch->size - eb.batch_start_offset) {
2473 		DRM_DEBUG("Attempting to use out-of-bounds batch\n");
2474 		err = -EINVAL;
2475 		goto err_vma;
2476 	}
2477 
2478 	if (eb_use_cmdparser(&eb)) {
2479 		struct i915_vma *vma;
2480 
2481 		vma = eb_parse(&eb, drm_is_current_master(file));
2482 		if (IS_ERR(vma)) {
2483 			err = PTR_ERR(vma);
2484 			goto err_vma;
2485 		}
2486 
2487 		if (vma) {
2488 			/*
2489 			 * Batch parsed and accepted:
2490 			 *
2491 			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
2492 			 * bit from MI_BATCH_BUFFER_START commands issued in
2493 			 * the dispatch_execbuffer implementations. We
2494 			 * specifically don't want that set on batches the
2495 			 * command parser has accepted.
2496 			 */
2497 			eb.batch_flags |= I915_DISPATCH_SECURE;
2498 			eb.batch_start_offset = 0;
2499 			eb.batch = vma;
2500 		}
2501 	}
2502 
2503 	if (eb.batch_len == 0)
2504 		eb.batch_len = eb.batch->size - eb.batch_start_offset;
2505 
2506 	/*
2507 	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2508 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
2509 	 * hsw should have this fixed, but bdw mucks it up again. */
2510 	if (eb.batch_flags & I915_DISPATCH_SECURE) {
2511 		struct i915_vma *vma;
2512 
2513 		/*
2514 		 * So on first glance it looks freaky that we pin the batch here
2515 		 * outside of the reservation loop. But:
2516 		 * - The batch is already pinned into the relevant ppgtt, so we
2517 		 *   already have the backing storage fully allocated.
2518 		 * - No other BO uses the global gtt (well contexts, but meh),
2519 		 *   so we don't really have issues with multiple objects not
2520 		 *   fitting due to fragmentation.
2521 		 * So this is actually safe.
2522 		 */
2523 		vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
2524 		if (IS_ERR(vma)) {
2525 			err = PTR_ERR(vma);
2526 			goto err_vma;
2527 		}
2528 
2529 		eb.batch = vma;
2530 	}
2531 
2532 	/* All GPU relocation batches must be submitted prior to the user rq */
2533 	GEM_BUG_ON(eb.reloc_cache.rq);
2534 
2535 	/* Allocate a request for this batch buffer nice and early. */
2536 	eb.request = i915_request_create(eb.context);
2537 	if (IS_ERR(eb.request)) {
2538 		err = PTR_ERR(eb.request);
2539 		goto err_batch_unpin;
2540 	}
2541 
2542 	if (in_fence) {
2543 		err = i915_request_await_dma_fence(eb.request, in_fence);
2544 		if (err < 0)
2545 			goto err_request;
2546 	}
2547 
2548 	if (exec_fence) {
2549 		err = i915_request_await_execution(eb.request, exec_fence,
2550 						   eb.engine->bond_execute);
2551 		if (err < 0)
2552 			goto err_request;
2553 	}
2554 
2555 	if (fences) {
2556 		err = await_fence_array(&eb, fences);
2557 		if (err)
2558 			goto err_request;
2559 	}
2560 
2561 	if (out_fence_fd != -1) {
2562 		out_fence = sync_file_create(&eb.request->fence);
2563 		if (!out_fence) {
2564 			err = -ENOMEM;
2565 			goto err_request;
2566 		}
2567 	}
2568 
2569 	/*
2570 	 * Whilst this request exists, batch_obj will be on the
2571 	 * active_list, and so will hold the active reference. Only when this
2572 	 * request is retired will the the batch_obj be moved onto the
2573 	 * inactive_list and lose its active reference. Hence we do not need
2574 	 * to explicitly hold another reference here.
2575 	 */
2576 	eb.request->batch = eb.batch;
2577 
2578 	trace_i915_request_queue(eb.request, eb.batch_flags);
2579 	err = eb_submit(&eb);
2580 err_request:
2581 	add_to_client(eb.request, file);
2582 	i915_request_add(eb.request);
2583 
2584 	if (fences)
2585 		signal_fence_array(&eb, fences);
2586 
2587 	if (out_fence) {
2588 		if (err == 0) {
2589 			fd_install(out_fence_fd, out_fence->file);
2590 			args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2591 			args->rsvd2 |= (u64)out_fence_fd << 32;
2592 			out_fence_fd = -1;
2593 		} else {
2594 			fput(out_fence->file);
2595 		}
2596 	}
2597 
2598 err_batch_unpin:
2599 	if (eb.batch_flags & I915_DISPATCH_SECURE)
2600 		i915_vma_unpin(eb.batch);
2601 err_vma:
2602 	if (eb.exec)
2603 		eb_release_vmas(&eb);
2604 err_engine:
2605 	eb_unpin_context(&eb);
2606 err_unlock:
2607 	mutex_unlock(&dev->struct_mutex);
2608 err_rpm:
2609 	intel_gt_pm_put(eb.i915);
2610 	i915_gem_context_put(eb.gem_context);
2611 err_destroy:
2612 	eb_destroy(&eb);
2613 err_out_fence:
2614 	if (out_fence_fd != -1)
2615 		put_unused_fd(out_fence_fd);
2616 err_exec_fence:
2617 	dma_fence_put(exec_fence);
2618 err_in_fence:
2619 	dma_fence_put(in_fence);
2620 	return err;
2621 }
2622 
2623 static size_t eb_element_size(void)
2624 {
2625 	return (sizeof(struct drm_i915_gem_exec_object2) +
2626 		sizeof(struct i915_vma *) +
2627 		sizeof(unsigned int));
2628 }
2629 
2630 static bool check_buffer_count(size_t count)
2631 {
2632 	const size_t sz = eb_element_size();
2633 
2634 	/*
2635 	 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
2636 	 * array size (see eb_create()). Otherwise, we can accept an array as
2637 	 * large as can be addressed (though use large arrays at your peril)!
2638 	 */
2639 
2640 	return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
2641 }
2642 
2643 /*
2644  * Legacy execbuffer just creates an exec2 list from the original exec object
2645  * list array and passes it to the real function.
2646  */
2647 int
2648 i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
2649 			  struct drm_file *file)
2650 {
2651 	struct drm_i915_gem_execbuffer *args = data;
2652 	struct drm_i915_gem_execbuffer2 exec2;
2653 	struct drm_i915_gem_exec_object *exec_list = NULL;
2654 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2655 	const size_t count = args->buffer_count;
2656 	unsigned int i;
2657 	int err;
2658 
2659 	if (!check_buffer_count(count)) {
2660 		DRM_DEBUG("execbuf2 with %zd buffers\n", count);
2661 		return -EINVAL;
2662 	}
2663 
2664 	exec2.buffers_ptr = args->buffers_ptr;
2665 	exec2.buffer_count = args->buffer_count;
2666 	exec2.batch_start_offset = args->batch_start_offset;
2667 	exec2.batch_len = args->batch_len;
2668 	exec2.DR1 = args->DR1;
2669 	exec2.DR4 = args->DR4;
2670 	exec2.num_cliprects = args->num_cliprects;
2671 	exec2.cliprects_ptr = args->cliprects_ptr;
2672 	exec2.flags = I915_EXEC_RENDER;
2673 	i915_execbuffer2_set_context_id(exec2, 0);
2674 
2675 	if (!i915_gem_check_execbuffer(&exec2))
2676 		return -EINVAL;
2677 
2678 	/* Copy in the exec list from userland */
2679 	exec_list = kvmalloc_array(count, sizeof(*exec_list),
2680 				   __GFP_NOWARN | GFP_KERNEL);
2681 	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2682 				    __GFP_NOWARN | GFP_KERNEL);
2683 	if (exec_list == NULL || exec2_list == NULL) {
2684 		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
2685 			  args->buffer_count);
2686 		kvfree(exec_list);
2687 		kvfree(exec2_list);
2688 		return -ENOMEM;
2689 	}
2690 	err = copy_from_user(exec_list,
2691 			     u64_to_user_ptr(args->buffers_ptr),
2692 			     sizeof(*exec_list) * count);
2693 	if (err) {
2694 		DRM_DEBUG("copy %d exec entries failed %d\n",
2695 			  args->buffer_count, err);
2696 		kvfree(exec_list);
2697 		kvfree(exec2_list);
2698 		return -EFAULT;
2699 	}
2700 
2701 	for (i = 0; i < args->buffer_count; i++) {
2702 		exec2_list[i].handle = exec_list[i].handle;
2703 		exec2_list[i].relocation_count = exec_list[i].relocation_count;
2704 		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
2705 		exec2_list[i].alignment = exec_list[i].alignment;
2706 		exec2_list[i].offset = exec_list[i].offset;
2707 		if (INTEL_GEN(to_i915(dev)) < 4)
2708 			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
2709 		else
2710 			exec2_list[i].flags = 0;
2711 	}
2712 
2713 	err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
2714 	if (exec2.flags & __EXEC_HAS_RELOC) {
2715 		struct drm_i915_gem_exec_object __user *user_exec_list =
2716 			u64_to_user_ptr(args->buffers_ptr);
2717 
2718 		/* Copy the new buffer offsets back to the user's exec list. */
2719 		for (i = 0; i < args->buffer_count; i++) {
2720 			if (!(exec2_list[i].offset & UPDATE))
2721 				continue;
2722 
2723 			exec2_list[i].offset =
2724 				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
2725 			exec2_list[i].offset &= PIN_OFFSET_MASK;
2726 			if (__copy_to_user(&user_exec_list[i].offset,
2727 					   &exec2_list[i].offset,
2728 					   sizeof(user_exec_list[i].offset)))
2729 				break;
2730 		}
2731 	}
2732 
2733 	kvfree(exec_list);
2734 	kvfree(exec2_list);
2735 	return err;
2736 }
2737 
2738 int
2739 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
2740 			   struct drm_file *file)
2741 {
2742 	struct drm_i915_gem_execbuffer2 *args = data;
2743 	struct drm_i915_gem_exec_object2 *exec2_list;
2744 	struct drm_syncobj **fences = NULL;
2745 	const size_t count = args->buffer_count;
2746 	int err;
2747 
2748 	if (!check_buffer_count(count)) {
2749 		DRM_DEBUG("execbuf2 with %zd buffers\n", count);
2750 		return -EINVAL;
2751 	}
2752 
2753 	if (!i915_gem_check_execbuffer(args))
2754 		return -EINVAL;
2755 
2756 	/* Allocate an extra slot for use by the command parser */
2757 	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2758 				    __GFP_NOWARN | GFP_KERNEL);
2759 	if (exec2_list == NULL) {
2760 		DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
2761 			  count);
2762 		return -ENOMEM;
2763 	}
2764 	if (copy_from_user(exec2_list,
2765 			   u64_to_user_ptr(args->buffers_ptr),
2766 			   sizeof(*exec2_list) * count)) {
2767 		DRM_DEBUG("copy %zd exec entries failed\n", count);
2768 		kvfree(exec2_list);
2769 		return -EFAULT;
2770 	}
2771 
2772 	if (args->flags & I915_EXEC_FENCE_ARRAY) {
2773 		fences = get_fence_array(args, file);
2774 		if (IS_ERR(fences)) {
2775 			kvfree(exec2_list);
2776 			return PTR_ERR(fences);
2777 		}
2778 	}
2779 
2780 	err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
2781 
2782 	/*
2783 	 * Now that we have begun execution of the batchbuffer, we ignore
2784 	 * any new error after this point. Also given that we have already
2785 	 * updated the associated relocations, we try to write out the current
2786 	 * object locations irrespective of any error.
2787 	 */
2788 	if (args->flags & __EXEC_HAS_RELOC) {
2789 		struct drm_i915_gem_exec_object2 __user *user_exec_list =
2790 			u64_to_user_ptr(args->buffers_ptr);
2791 		unsigned int i;
2792 
2793 		/* Copy the new buffer offsets back to the user's exec list. */
2794 		/*
2795 		 * Note: count * sizeof(*user_exec_list) does not overflow,
2796 		 * because we checked 'count' in check_buffer_count().
2797 		 *
2798 		 * And this range already got effectively checked earlier
2799 		 * when we did the "copy_from_user()" above.
2800 		 */
2801 		if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
2802 			goto end;
2803 
2804 		for (i = 0; i < args->buffer_count; i++) {
2805 			if (!(exec2_list[i].offset & UPDATE))
2806 				continue;
2807 
2808 			exec2_list[i].offset =
2809 				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
2810 			unsafe_put_user(exec2_list[i].offset,
2811 					&user_exec_list[i].offset,
2812 					end_user);
2813 		}
2814 end_user:
2815 		user_access_end();
2816 end:;
2817 	}
2818 
2819 	args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
2820 	put_fence_array(args, fences);
2821 	kvfree(exec2_list);
2822 	return err;
2823 }
2824