1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008,2010 Intel Corporation 5 */ 6 7 #include <linux/intel-iommu.h> 8 #include <linux/dma-resv.h> 9 #include <linux/sync_file.h> 10 #include <linux/uaccess.h> 11 12 #include <drm/drm_syncobj.h> 13 14 #include "display/intel_frontbuffer.h" 15 16 #include "gem/i915_gem_ioctls.h" 17 #include "gt/intel_context.h" 18 #include "gt/intel_gt.h" 19 #include "gt/intel_gt_buffer_pool.h" 20 #include "gt/intel_gt_pm.h" 21 #include "gt/intel_ring.h" 22 23 #include "i915_drv.h" 24 #include "i915_gem_clflush.h" 25 #include "i915_gem_context.h" 26 #include "i915_gem_ioctls.h" 27 #include "i915_sw_fence_work.h" 28 #include "i915_trace.h" 29 30 struct eb_vma { 31 struct i915_vma *vma; 32 unsigned int flags; 33 34 /** This vma's place in the execbuf reservation list */ 35 struct drm_i915_gem_exec_object2 *exec; 36 struct list_head bind_link; 37 struct list_head reloc_link; 38 39 struct hlist_node node; 40 u32 handle; 41 }; 42 43 struct eb_vma_array { 44 struct kref kref; 45 struct eb_vma vma[]; 46 }; 47 48 #define __EXEC_OBJECT_HAS_PIN BIT(31) 49 #define __EXEC_OBJECT_HAS_FENCE BIT(30) 50 #define __EXEC_OBJECT_NEEDS_MAP BIT(29) 51 #define __EXEC_OBJECT_NEEDS_BIAS BIT(28) 52 #define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */ 53 54 #define __EXEC_HAS_RELOC BIT(31) 55 #define __EXEC_INTERNAL_FLAGS (~0u << 31) 56 #define UPDATE PIN_OFFSET_FIXED 57 58 #define BATCH_OFFSET_BIAS (256*1024) 59 60 #define __I915_EXEC_ILLEGAL_FLAGS \ 61 (__I915_EXEC_UNKNOWN_FLAGS | \ 62 I915_EXEC_CONSTANTS_MASK | \ 63 I915_EXEC_RESOURCE_STREAMER) 64 65 /* Catch emission of unexpected errors for CI! */ 66 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 67 #undef EINVAL 68 #define EINVAL ({ \ 69 DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \ 70 22; \ 71 }) 72 #endif 73 74 /** 75 * DOC: User command execution 76 * 77 * Userspace submits commands to be executed on the GPU as an instruction 78 * stream within a GEM object we call a batchbuffer. This instructions may 79 * refer to other GEM objects containing auxiliary state such as kernels, 80 * samplers, render targets and even secondary batchbuffers. Userspace does 81 * not know where in the GPU memory these objects reside and so before the 82 * batchbuffer is passed to the GPU for execution, those addresses in the 83 * batchbuffer and auxiliary objects are updated. This is known as relocation, 84 * or patching. To try and avoid having to relocate each object on the next 85 * execution, userspace is told the location of those objects in this pass, 86 * but this remains just a hint as the kernel may choose a new location for 87 * any object in the future. 88 * 89 * At the level of talking to the hardware, submitting a batchbuffer for the 90 * GPU to execute is to add content to a buffer from which the HW 91 * command streamer is reading. 92 * 93 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e. 94 * Execlists, this command is not placed on the same buffer as the 95 * remaining items. 96 * 97 * 2. Add a command to invalidate caches to the buffer. 98 * 99 * 3. Add a batchbuffer start command to the buffer; the start command is 100 * essentially a token together with the GPU address of the batchbuffer 101 * to be executed. 102 * 103 * 4. Add a pipeline flush to the buffer. 104 * 105 * 5. Add a memory write command to the buffer to record when the GPU 106 * is done executing the batchbuffer. The memory write writes the 107 * global sequence number of the request, ``i915_request::global_seqno``; 108 * the i915 driver uses the current value in the register to determine 109 * if the GPU has completed the batchbuffer. 110 * 111 * 6. Add a user interrupt command to the buffer. This command instructs 112 * the GPU to issue an interrupt when the command, pipeline flush and 113 * memory write are completed. 114 * 115 * 7. Inform the hardware of the additional commands added to the buffer 116 * (by updating the tail pointer). 117 * 118 * Processing an execbuf ioctl is conceptually split up into a few phases. 119 * 120 * 1. Validation - Ensure all the pointers, handles and flags are valid. 121 * 2. Reservation - Assign GPU address space for every object 122 * 3. Relocation - Update any addresses to point to the final locations 123 * 4. Serialisation - Order the request with respect to its dependencies 124 * 5. Construction - Construct a request to execute the batchbuffer 125 * 6. Submission (at some point in the future execution) 126 * 127 * Reserving resources for the execbuf is the most complicated phase. We 128 * neither want to have to migrate the object in the address space, nor do 129 * we want to have to update any relocations pointing to this object. Ideally, 130 * we want to leave the object where it is and for all the existing relocations 131 * to match. If the object is given a new address, or if userspace thinks the 132 * object is elsewhere, we have to parse all the relocation entries and update 133 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that 134 * all the target addresses in all of its objects match the value in the 135 * relocation entries and that they all match the presumed offsets given by the 136 * list of execbuffer objects. Using this knowledge, we know that if we haven't 137 * moved any buffers, all the relocation entries are valid and we can skip 138 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU 139 * hang.) The requirement for using I915_EXEC_NO_RELOC are: 140 * 141 * The addresses written in the objects must match the corresponding 142 * reloc.presumed_offset which in turn must match the corresponding 143 * execobject.offset. 144 * 145 * Any render targets written to in the batch must be flagged with 146 * EXEC_OBJECT_WRITE. 147 * 148 * To avoid stalling, execobject.offset should match the current 149 * address of that object within the active context. 150 * 151 * The reservation is done is multiple phases. First we try and keep any 152 * object already bound in its current location - so as long as meets the 153 * constraints imposed by the new execbuffer. Any object left unbound after the 154 * first pass is then fitted into any available idle space. If an object does 155 * not fit, all objects are removed from the reservation and the process rerun 156 * after sorting the objects into a priority order (more difficult to fit 157 * objects are tried first). Failing that, the entire VM is cleared and we try 158 * to fit the execbuf once last time before concluding that it simply will not 159 * fit. 160 * 161 * A small complication to all of this is that we allow userspace not only to 162 * specify an alignment and a size for the object in the address space, but 163 * we also allow userspace to specify the exact offset. This objects are 164 * simpler to place (the location is known a priori) all we have to do is make 165 * sure the space is available. 166 * 167 * Once all the objects are in place, patching up the buried pointers to point 168 * to the final locations is a fairly simple job of walking over the relocation 169 * entry arrays, looking up the right address and rewriting the value into 170 * the object. Simple! ... The relocation entries are stored in user memory 171 * and so to access them we have to copy them into a local buffer. That copy 172 * has to avoid taking any pagefaults as they may lead back to a GEM object 173 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split 174 * the relocation into multiple passes. First we try to do everything within an 175 * atomic context (avoid the pagefaults) which requires that we never wait. If 176 * we detect that we may wait, or if we need to fault, then we have to fallback 177 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm 178 * bells yet?) Dropping the mutex means that we lose all the state we have 179 * built up so far for the execbuf and we must reset any global data. However, 180 * we do leave the objects pinned in their final locations - which is a 181 * potential issue for concurrent execbufs. Once we have left the mutex, we can 182 * allocate and copy all the relocation entries into a large array at our 183 * leisure, reacquire the mutex, reclaim all the objects and other state and 184 * then proceed to update any incorrect addresses with the objects. 185 * 186 * As we process the relocation entries, we maintain a record of whether the 187 * object is being written to. Using NORELOC, we expect userspace to provide 188 * this information instead. We also check whether we can skip the relocation 189 * by comparing the expected value inside the relocation entry with the target's 190 * final address. If they differ, we have to map the current object and rewrite 191 * the 4 or 8 byte pointer within. 192 * 193 * Serialising an execbuf is quite simple according to the rules of the GEM 194 * ABI. Execution within each context is ordered by the order of submission. 195 * Writes to any GEM object are in order of submission and are exclusive. Reads 196 * from a GEM object are unordered with respect to other reads, but ordered by 197 * writes. A write submitted after a read cannot occur before the read, and 198 * similarly any read submitted after a write cannot occur before the write. 199 * Writes are ordered between engines such that only one write occurs at any 200 * time (completing any reads beforehand) - using semaphores where available 201 * and CPU serialisation otherwise. Other GEM access obey the same rules, any 202 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU 203 * reads before starting, and any read (either using set-domain or pread) must 204 * flush all GPU writes before starting. (Note we only employ a barrier before, 205 * we currently rely on userspace not concurrently starting a new execution 206 * whilst reading or writing to an object. This may be an advantage or not 207 * depending on how much you trust userspace not to shoot themselves in the 208 * foot.) Serialisation may just result in the request being inserted into 209 * a DAG awaiting its turn, but most simple is to wait on the CPU until 210 * all dependencies are resolved. 211 * 212 * After all of that, is just a matter of closing the request and handing it to 213 * the hardware (well, leaving it in a queue to be executed). However, we also 214 * offer the ability for batchbuffers to be run with elevated privileges so 215 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.) 216 * Before any batch is given extra privileges we first must check that it 217 * contains no nefarious instructions, we check that each instruction is from 218 * our whitelist and all registers are also from an allowed list. We first 219 * copy the user's batchbuffer to a shadow (so that the user doesn't have 220 * access to it, either by the CPU or GPU as we scan it) and then parse each 221 * instruction. If everything is ok, we set a flag telling the hardware to run 222 * the batchbuffer in trusted mode, otherwise the ioctl is rejected. 223 */ 224 225 struct i915_execbuffer { 226 struct drm_i915_private *i915; /** i915 backpointer */ 227 struct drm_file *file; /** per-file lookup tables and limits */ 228 struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */ 229 struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */ 230 struct eb_vma *vma; 231 232 struct intel_engine_cs *engine; /** engine to queue the request to */ 233 struct intel_context *context; /* logical state for the request */ 234 struct i915_gem_context *gem_context; /** caller's context */ 235 236 struct i915_request *request; /** our request to build */ 237 struct eb_vma *batch; /** identity of the batch obj/vma */ 238 struct i915_vma *trampoline; /** trampoline used for chaining */ 239 240 /** actual size of execobj[] as we may extend it for the cmdparser */ 241 unsigned int buffer_count; 242 243 /** list of vma not yet bound during reservation phase */ 244 struct list_head unbound; 245 246 /** list of vma that have execobj.relocation_count */ 247 struct list_head relocs; 248 249 /** 250 * Track the most recently used object for relocations, as we 251 * frequently have to perform multiple relocations within the same 252 * obj/page 253 */ 254 struct reloc_cache { 255 struct drm_mm_node node; /** temporary GTT binding */ 256 unsigned int gen; /** Cached value of INTEL_GEN */ 257 bool use_64bit_reloc : 1; 258 bool has_llc : 1; 259 bool has_fence : 1; 260 bool needs_unfenced : 1; 261 262 struct i915_vma *target; 263 struct i915_request *rq; 264 struct i915_vma *rq_vma; 265 u32 *rq_cmd; 266 unsigned int rq_size; 267 } reloc_cache; 268 269 u64 invalid_flags; /** Set of execobj.flags that are invalid */ 270 u32 context_flags; /** Set of execobj.flags to insert from the ctx */ 271 272 u32 batch_start_offset; /** Location within object of batch */ 273 u32 batch_len; /** Length of batch within object */ 274 u32 batch_flags; /** Flags composed for emit_bb_start() */ 275 276 /** 277 * Indicate either the size of the hastable used to resolve 278 * relocation handles, or if negative that we are using a direct 279 * index into the execobj[]. 280 */ 281 int lut_size; 282 struct hlist_head *buckets; /** ht for relocation handles */ 283 struct eb_vma_array *array; 284 }; 285 286 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) 287 { 288 return intel_engine_requires_cmd_parser(eb->engine) || 289 (intel_engine_using_cmd_parser(eb->engine) && 290 eb->args->batch_len); 291 } 292 293 static struct eb_vma_array *eb_vma_array_create(unsigned int count) 294 { 295 struct eb_vma_array *arr; 296 297 arr = kvmalloc(struct_size(arr, vma, count), GFP_KERNEL | __GFP_NOWARN); 298 if (!arr) 299 return NULL; 300 301 kref_init(&arr->kref); 302 arr->vma[0].vma = NULL; 303 304 return arr; 305 } 306 307 static inline void eb_unreserve_vma(struct eb_vma *ev) 308 { 309 struct i915_vma *vma = ev->vma; 310 311 if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE)) 312 __i915_vma_unpin_fence(vma); 313 314 if (ev->flags & __EXEC_OBJECT_HAS_PIN) 315 __i915_vma_unpin(vma); 316 317 ev->flags &= ~(__EXEC_OBJECT_HAS_PIN | 318 __EXEC_OBJECT_HAS_FENCE); 319 } 320 321 static void eb_vma_array_destroy(struct kref *kref) 322 { 323 struct eb_vma_array *arr = container_of(kref, typeof(*arr), kref); 324 struct eb_vma *ev = arr->vma; 325 326 while (ev->vma) { 327 eb_unreserve_vma(ev); 328 i915_vma_put(ev->vma); 329 ev++; 330 } 331 332 kvfree(arr); 333 } 334 335 static void eb_vma_array_put(struct eb_vma_array *arr) 336 { 337 kref_put(&arr->kref, eb_vma_array_destroy); 338 } 339 340 static int eb_create(struct i915_execbuffer *eb) 341 { 342 /* Allocate an extra slot for use by the command parser + sentinel */ 343 eb->array = eb_vma_array_create(eb->buffer_count + 2); 344 if (!eb->array) 345 return -ENOMEM; 346 347 eb->vma = eb->array->vma; 348 349 if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { 350 unsigned int size = 1 + ilog2(eb->buffer_count); 351 352 /* 353 * Without a 1:1 association between relocation handles and 354 * the execobject[] index, we instead create a hashtable. 355 * We size it dynamically based on available memory, starting 356 * first with 1:1 assocative hash and scaling back until 357 * the allocation succeeds. 358 * 359 * Later on we use a positive lut_size to indicate we are 360 * using this hashtable, and a negative value to indicate a 361 * direct lookup. 362 */ 363 do { 364 gfp_t flags; 365 366 /* While we can still reduce the allocation size, don't 367 * raise a warning and allow the allocation to fail. 368 * On the last pass though, we want to try as hard 369 * as possible to perform the allocation and warn 370 * if it fails. 371 */ 372 flags = GFP_KERNEL; 373 if (size > 1) 374 flags |= __GFP_NORETRY | __GFP_NOWARN; 375 376 eb->buckets = kzalloc(sizeof(struct hlist_head) << size, 377 flags); 378 if (eb->buckets) 379 break; 380 } while (--size); 381 382 if (unlikely(!size)) { 383 eb_vma_array_put(eb->array); 384 return -ENOMEM; 385 } 386 387 eb->lut_size = size; 388 } else { 389 eb->lut_size = -eb->buffer_count; 390 } 391 392 return 0; 393 } 394 395 static bool 396 eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, 397 const struct i915_vma *vma, 398 unsigned int flags) 399 { 400 if (vma->node.size < entry->pad_to_size) 401 return true; 402 403 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment)) 404 return true; 405 406 if (flags & EXEC_OBJECT_PINNED && 407 vma->node.start != entry->offset) 408 return true; 409 410 if (flags & __EXEC_OBJECT_NEEDS_BIAS && 411 vma->node.start < BATCH_OFFSET_BIAS) 412 return true; 413 414 if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) && 415 (vma->node.start + vma->node.size - 1) >> 32) 416 return true; 417 418 if (flags & __EXEC_OBJECT_NEEDS_MAP && 419 !i915_vma_is_map_and_fenceable(vma)) 420 return true; 421 422 return false; 423 } 424 425 static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry, 426 unsigned int exec_flags) 427 { 428 u64 pin_flags = 0; 429 430 if (exec_flags & EXEC_OBJECT_NEEDS_GTT) 431 pin_flags |= PIN_GLOBAL; 432 433 /* 434 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset, 435 * limit address to the first 4GBs for unflagged objects. 436 */ 437 if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) 438 pin_flags |= PIN_ZONE_4G; 439 440 if (exec_flags & __EXEC_OBJECT_NEEDS_MAP) 441 pin_flags |= PIN_MAPPABLE; 442 443 if (exec_flags & EXEC_OBJECT_PINNED) 444 pin_flags |= entry->offset | PIN_OFFSET_FIXED; 445 else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) 446 pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; 447 448 return pin_flags; 449 } 450 451 static inline bool 452 eb_pin_vma(struct i915_execbuffer *eb, 453 const struct drm_i915_gem_exec_object2 *entry, 454 struct eb_vma *ev) 455 { 456 struct i915_vma *vma = ev->vma; 457 u64 pin_flags; 458 459 if (vma->node.size) 460 pin_flags = vma->node.start; 461 else 462 pin_flags = entry->offset & PIN_OFFSET_MASK; 463 464 pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED; 465 if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT)) 466 pin_flags |= PIN_GLOBAL; 467 468 /* Attempt to reuse the current location if available */ 469 if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) { 470 if (entry->flags & EXEC_OBJECT_PINNED) 471 return false; 472 473 /* Failing that pick any _free_ space if suitable */ 474 if (unlikely(i915_vma_pin(vma, 475 entry->pad_to_size, 476 entry->alignment, 477 eb_pin_flags(entry, ev->flags) | 478 PIN_USER | PIN_NOEVICT))) 479 return false; 480 } 481 482 if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) { 483 if (unlikely(i915_vma_pin_fence(vma))) { 484 i915_vma_unpin(vma); 485 return false; 486 } 487 488 if (vma->fence) 489 ev->flags |= __EXEC_OBJECT_HAS_FENCE; 490 } 491 492 ev->flags |= __EXEC_OBJECT_HAS_PIN; 493 return !eb_vma_misplaced(entry, vma, ev->flags); 494 } 495 496 static int 497 eb_validate_vma(struct i915_execbuffer *eb, 498 struct drm_i915_gem_exec_object2 *entry, 499 struct i915_vma *vma) 500 { 501 if (unlikely(entry->flags & eb->invalid_flags)) 502 return -EINVAL; 503 504 if (unlikely(entry->alignment && 505 !is_power_of_2_u64(entry->alignment))) 506 return -EINVAL; 507 508 /* 509 * Offset can be used as input (EXEC_OBJECT_PINNED), reject 510 * any non-page-aligned or non-canonical addresses. 511 */ 512 if (unlikely(entry->flags & EXEC_OBJECT_PINNED && 513 entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK))) 514 return -EINVAL; 515 516 /* pad_to_size was once a reserved field, so sanitize it */ 517 if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) { 518 if (unlikely(offset_in_page(entry->pad_to_size))) 519 return -EINVAL; 520 } else { 521 entry->pad_to_size = 0; 522 } 523 /* 524 * From drm_mm perspective address space is continuous, 525 * so from this point we're always using non-canonical 526 * form internally. 527 */ 528 entry->offset = gen8_noncanonical_addr(entry->offset); 529 530 if (!eb->reloc_cache.has_fence) { 531 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; 532 } else { 533 if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE || 534 eb->reloc_cache.needs_unfenced) && 535 i915_gem_object_is_tiled(vma->obj)) 536 entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP; 537 } 538 539 if (!(entry->flags & EXEC_OBJECT_PINNED)) 540 entry->flags |= eb->context_flags; 541 542 return 0; 543 } 544 545 static void 546 eb_add_vma(struct i915_execbuffer *eb, 547 unsigned int i, unsigned batch_idx, 548 struct i915_vma *vma) 549 { 550 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 551 struct eb_vma *ev = &eb->vma[i]; 552 553 GEM_BUG_ON(i915_vma_is_closed(vma)); 554 555 ev->vma = vma; 556 ev->exec = entry; 557 ev->flags = entry->flags; 558 559 if (eb->lut_size > 0) { 560 ev->handle = entry->handle; 561 hlist_add_head(&ev->node, 562 &eb->buckets[hash_32(entry->handle, 563 eb->lut_size)]); 564 } 565 566 if (entry->relocation_count) 567 list_add_tail(&ev->reloc_link, &eb->relocs); 568 569 /* 570 * SNA is doing fancy tricks with compressing batch buffers, which leads 571 * to negative relocation deltas. Usually that works out ok since the 572 * relocate address is still positive, except when the batch is placed 573 * very low in the GTT. Ensure this doesn't happen. 574 * 575 * Note that actual hangs have only been observed on gen7, but for 576 * paranoia do it everywhere. 577 */ 578 if (i == batch_idx) { 579 if (entry->relocation_count && 580 !(ev->flags & EXEC_OBJECT_PINNED)) 581 ev->flags |= __EXEC_OBJECT_NEEDS_BIAS; 582 if (eb->reloc_cache.has_fence) 583 ev->flags |= EXEC_OBJECT_NEEDS_FENCE; 584 585 eb->batch = ev; 586 } 587 588 if (eb_pin_vma(eb, entry, ev)) { 589 if (entry->offset != vma->node.start) { 590 entry->offset = vma->node.start | UPDATE; 591 eb->args->flags |= __EXEC_HAS_RELOC; 592 } 593 } else { 594 eb_unreserve_vma(ev); 595 list_add_tail(&ev->bind_link, &eb->unbound); 596 } 597 } 598 599 static int eb_reserve_vma(const struct i915_execbuffer *eb, 600 struct eb_vma *ev, 601 u64 pin_flags) 602 { 603 struct drm_i915_gem_exec_object2 *entry = ev->exec; 604 struct i915_vma *vma = ev->vma; 605 int err; 606 607 if (drm_mm_node_allocated(&vma->node) && 608 eb_vma_misplaced(entry, vma, ev->flags)) { 609 err = i915_vma_unbind(vma); 610 if (err) 611 return err; 612 } 613 614 err = i915_vma_pin(vma, 615 entry->pad_to_size, entry->alignment, 616 eb_pin_flags(entry, ev->flags) | pin_flags); 617 if (err) 618 return err; 619 620 if (entry->offset != vma->node.start) { 621 entry->offset = vma->node.start | UPDATE; 622 eb->args->flags |= __EXEC_HAS_RELOC; 623 } 624 625 if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) { 626 err = i915_vma_pin_fence(vma); 627 if (unlikely(err)) { 628 i915_vma_unpin(vma); 629 return err; 630 } 631 632 if (vma->fence) 633 ev->flags |= __EXEC_OBJECT_HAS_FENCE; 634 } 635 636 ev->flags |= __EXEC_OBJECT_HAS_PIN; 637 GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags)); 638 639 return 0; 640 } 641 642 static int eb_reserve(struct i915_execbuffer *eb) 643 { 644 const unsigned int count = eb->buffer_count; 645 unsigned int pin_flags = PIN_USER | PIN_NONBLOCK; 646 struct list_head last; 647 struct eb_vma *ev; 648 unsigned int i, pass; 649 int err = 0; 650 651 /* 652 * Attempt to pin all of the buffers into the GTT. 653 * This is done in 3 phases: 654 * 655 * 1a. Unbind all objects that do not match the GTT constraints for 656 * the execbuffer (fenceable, mappable, alignment etc). 657 * 1b. Increment pin count for already bound objects. 658 * 2. Bind new objects. 659 * 3. Decrement pin count. 660 * 661 * This avoid unnecessary unbinding of later objects in order to make 662 * room for the earlier objects *unless* we need to defragment. 663 */ 664 665 if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex)) 666 return -EINTR; 667 668 pass = 0; 669 do { 670 list_for_each_entry(ev, &eb->unbound, bind_link) { 671 err = eb_reserve_vma(eb, ev, pin_flags); 672 if (err) 673 break; 674 } 675 if (!(err == -ENOSPC || err == -EAGAIN)) 676 break; 677 678 /* Resort *all* the objects into priority order */ 679 INIT_LIST_HEAD(&eb->unbound); 680 INIT_LIST_HEAD(&last); 681 for (i = 0; i < count; i++) { 682 unsigned int flags; 683 684 ev = &eb->vma[i]; 685 flags = ev->flags; 686 if (flags & EXEC_OBJECT_PINNED && 687 flags & __EXEC_OBJECT_HAS_PIN) 688 continue; 689 690 eb_unreserve_vma(ev); 691 692 if (flags & EXEC_OBJECT_PINNED) 693 /* Pinned must have their slot */ 694 list_add(&ev->bind_link, &eb->unbound); 695 else if (flags & __EXEC_OBJECT_NEEDS_MAP) 696 /* Map require the lowest 256MiB (aperture) */ 697 list_add_tail(&ev->bind_link, &eb->unbound); 698 else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) 699 /* Prioritise 4GiB region for restricted bo */ 700 list_add(&ev->bind_link, &last); 701 else 702 list_add_tail(&ev->bind_link, &last); 703 } 704 list_splice_tail(&last, &eb->unbound); 705 706 if (err == -EAGAIN) { 707 mutex_unlock(&eb->i915->drm.struct_mutex); 708 flush_workqueue(eb->i915->mm.userptr_wq); 709 mutex_lock(&eb->i915->drm.struct_mutex); 710 continue; 711 } 712 713 switch (pass++) { 714 case 0: 715 break; 716 717 case 1: 718 /* Too fragmented, unbind everything and retry */ 719 mutex_lock(&eb->context->vm->mutex); 720 err = i915_gem_evict_vm(eb->context->vm); 721 mutex_unlock(&eb->context->vm->mutex); 722 if (err) 723 goto unlock; 724 break; 725 726 default: 727 err = -ENOSPC; 728 goto unlock; 729 } 730 731 pin_flags = PIN_USER; 732 } while (1); 733 734 unlock: 735 mutex_unlock(&eb->i915->drm.struct_mutex); 736 return err; 737 } 738 739 static unsigned int eb_batch_index(const struct i915_execbuffer *eb) 740 { 741 if (eb->args->flags & I915_EXEC_BATCH_FIRST) 742 return 0; 743 else 744 return eb->buffer_count - 1; 745 } 746 747 static int eb_select_context(struct i915_execbuffer *eb) 748 { 749 struct i915_gem_context *ctx; 750 751 ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1); 752 if (unlikely(!ctx)) 753 return -ENOENT; 754 755 eb->gem_context = ctx; 756 if (rcu_access_pointer(ctx->vm)) 757 eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; 758 759 eb->context_flags = 0; 760 if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags)) 761 eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS; 762 763 return 0; 764 } 765 766 static int __eb_add_lut(struct i915_execbuffer *eb, 767 u32 handle, struct i915_vma *vma) 768 { 769 struct i915_gem_context *ctx = eb->gem_context; 770 struct i915_lut_handle *lut; 771 int err; 772 773 lut = i915_lut_handle_alloc(); 774 if (unlikely(!lut)) 775 return -ENOMEM; 776 777 i915_vma_get(vma); 778 if (!atomic_fetch_inc(&vma->open_count)) 779 i915_vma_reopen(vma); 780 lut->handle = handle; 781 lut->ctx = ctx; 782 783 /* Check that the context hasn't been closed in the meantime */ 784 err = -EINTR; 785 if (!mutex_lock_interruptible(&ctx->mutex)) { 786 err = -ENOENT; 787 if (likely(!i915_gem_context_is_closed(ctx))) 788 err = radix_tree_insert(&ctx->handles_vma, handle, vma); 789 if (err == 0) { /* And nor has this handle */ 790 struct drm_i915_gem_object *obj = vma->obj; 791 792 i915_gem_object_lock(obj); 793 if (idr_find(&eb->file->object_idr, handle) == obj) { 794 list_add(&lut->obj_link, &obj->lut_list); 795 } else { 796 radix_tree_delete(&ctx->handles_vma, handle); 797 err = -ENOENT; 798 } 799 i915_gem_object_unlock(obj); 800 } 801 mutex_unlock(&ctx->mutex); 802 } 803 if (unlikely(err)) 804 goto err; 805 806 return 0; 807 808 err: 809 i915_vma_close(vma); 810 i915_vma_put(vma); 811 i915_lut_handle_free(lut); 812 return err; 813 } 814 815 static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) 816 { 817 do { 818 struct drm_i915_gem_object *obj; 819 struct i915_vma *vma; 820 int err; 821 822 rcu_read_lock(); 823 vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle); 824 if (likely(vma)) 825 vma = i915_vma_tryget(vma); 826 rcu_read_unlock(); 827 if (likely(vma)) 828 return vma; 829 830 obj = i915_gem_object_lookup(eb->file, handle); 831 if (unlikely(!obj)) 832 return ERR_PTR(-ENOENT); 833 834 vma = i915_vma_instance(obj, eb->context->vm, NULL); 835 if (IS_ERR(vma)) { 836 i915_gem_object_put(obj); 837 return vma; 838 } 839 840 err = __eb_add_lut(eb, handle, vma); 841 if (likely(!err)) 842 return vma; 843 844 i915_gem_object_put(obj); 845 if (err != -EEXIST) 846 return ERR_PTR(err); 847 } while (1); 848 } 849 850 static int eb_lookup_vmas(struct i915_execbuffer *eb) 851 { 852 unsigned int batch = eb_batch_index(eb); 853 unsigned int i; 854 int err = 0; 855 856 INIT_LIST_HEAD(&eb->relocs); 857 INIT_LIST_HEAD(&eb->unbound); 858 859 for (i = 0; i < eb->buffer_count; i++) { 860 struct i915_vma *vma; 861 862 vma = eb_lookup_vma(eb, eb->exec[i].handle); 863 if (IS_ERR(vma)) { 864 err = PTR_ERR(vma); 865 break; 866 } 867 868 err = eb_validate_vma(eb, &eb->exec[i], vma); 869 if (unlikely(err)) { 870 i915_vma_put(vma); 871 break; 872 } 873 874 eb_add_vma(eb, i, batch, vma); 875 } 876 877 eb->vma[i].vma = NULL; 878 return err; 879 } 880 881 static struct eb_vma * 882 eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) 883 { 884 if (eb->lut_size < 0) { 885 if (handle >= -eb->lut_size) 886 return NULL; 887 return &eb->vma[handle]; 888 } else { 889 struct hlist_head *head; 890 struct eb_vma *ev; 891 892 head = &eb->buckets[hash_32(handle, eb->lut_size)]; 893 hlist_for_each_entry(ev, head, node) { 894 if (ev->handle == handle) 895 return ev; 896 } 897 return NULL; 898 } 899 } 900 901 static void eb_destroy(const struct i915_execbuffer *eb) 902 { 903 GEM_BUG_ON(eb->reloc_cache.rq); 904 905 if (eb->array) 906 eb_vma_array_put(eb->array); 907 908 if (eb->lut_size > 0) 909 kfree(eb->buckets); 910 } 911 912 static inline u64 913 relocation_target(const struct drm_i915_gem_relocation_entry *reloc, 914 const struct i915_vma *target) 915 { 916 return gen8_canonical_addr((int)reloc->delta + target->node.start); 917 } 918 919 static void reloc_cache_init(struct reloc_cache *cache, 920 struct drm_i915_private *i915) 921 { 922 /* Must be a variable in the struct to allow GCC to unroll. */ 923 cache->gen = INTEL_GEN(i915); 924 cache->has_llc = HAS_LLC(i915); 925 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915); 926 cache->has_fence = cache->gen < 4; 927 cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; 928 cache->node.flags = 0; 929 cache->rq = NULL; 930 cache->target = NULL; 931 } 932 933 #define RELOC_TAIL 4 934 935 static int reloc_gpu_chain(struct reloc_cache *cache) 936 { 937 struct intel_gt_buffer_pool_node *pool; 938 struct i915_request *rq = cache->rq; 939 struct i915_vma *batch; 940 u32 *cmd; 941 int err; 942 943 pool = intel_gt_get_buffer_pool(rq->engine->gt, PAGE_SIZE); 944 if (IS_ERR(pool)) 945 return PTR_ERR(pool); 946 947 batch = i915_vma_instance(pool->obj, rq->context->vm, NULL); 948 if (IS_ERR(batch)) { 949 err = PTR_ERR(batch); 950 goto out_pool; 951 } 952 953 err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK); 954 if (err) 955 goto out_pool; 956 957 GEM_BUG_ON(cache->rq_size + RELOC_TAIL > PAGE_SIZE / sizeof(u32)); 958 cmd = cache->rq_cmd + cache->rq_size; 959 *cmd++ = MI_ARB_CHECK; 960 if (cache->gen >= 8) 961 *cmd++ = MI_BATCH_BUFFER_START_GEN8; 962 else if (cache->gen >= 6) 963 *cmd++ = MI_BATCH_BUFFER_START; 964 else 965 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 966 *cmd++ = lower_32_bits(batch->node.start); 967 *cmd++ = upper_32_bits(batch->node.start); /* Always 0 for gen<8 */ 968 i915_gem_object_flush_map(cache->rq_vma->obj); 969 i915_gem_object_unpin_map(cache->rq_vma->obj); 970 cache->rq_vma = NULL; 971 972 err = intel_gt_buffer_pool_mark_active(pool, rq); 973 if (err == 0) { 974 i915_vma_lock(batch); 975 err = i915_request_await_object(rq, batch->obj, false); 976 if (err == 0) 977 err = i915_vma_move_to_active(batch, rq, 0); 978 i915_vma_unlock(batch); 979 } 980 i915_vma_unpin(batch); 981 if (err) 982 goto out_pool; 983 984 cmd = i915_gem_object_pin_map(batch->obj, 985 cache->has_llc ? 986 I915_MAP_FORCE_WB : 987 I915_MAP_FORCE_WC); 988 if (IS_ERR(cmd)) { 989 err = PTR_ERR(cmd); 990 goto out_pool; 991 } 992 993 /* Return with batch mapping (cmd) still pinned */ 994 cache->rq_cmd = cmd; 995 cache->rq_size = 0; 996 cache->rq_vma = batch; 997 998 out_pool: 999 intel_gt_buffer_pool_put(pool); 1000 return err; 1001 } 1002 1003 static unsigned int reloc_bb_flags(const struct reloc_cache *cache) 1004 { 1005 return cache->gen > 5 ? 0 : I915_DISPATCH_SECURE; 1006 } 1007 1008 static int reloc_gpu_flush(struct reloc_cache *cache) 1009 { 1010 struct i915_request *rq; 1011 int err; 1012 1013 rq = fetch_and_zero(&cache->rq); 1014 if (!rq) 1015 return 0; 1016 1017 if (cache->rq_vma) { 1018 struct drm_i915_gem_object *obj = cache->rq_vma->obj; 1019 1020 GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32)); 1021 cache->rq_cmd[cache->rq_size++] = MI_BATCH_BUFFER_END; 1022 1023 __i915_gem_object_flush_map(obj, 1024 0, sizeof(u32) * cache->rq_size); 1025 i915_gem_object_unpin_map(obj); 1026 } 1027 1028 err = 0; 1029 if (rq->engine->emit_init_breadcrumb) 1030 err = rq->engine->emit_init_breadcrumb(rq); 1031 if (!err) 1032 err = rq->engine->emit_bb_start(rq, 1033 rq->batch->node.start, 1034 PAGE_SIZE, 1035 reloc_bb_flags(cache)); 1036 if (err) 1037 i915_request_set_error_once(rq, err); 1038 1039 intel_gt_chipset_flush(rq->engine->gt); 1040 i915_request_add(rq); 1041 1042 return err; 1043 } 1044 1045 static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma) 1046 { 1047 struct drm_i915_gem_object *obj = vma->obj; 1048 int err; 1049 1050 i915_vma_lock(vma); 1051 1052 if (obj->cache_dirty & ~obj->cache_coherent) 1053 i915_gem_clflush_object(obj, 0); 1054 obj->write_domain = 0; 1055 1056 err = i915_request_await_object(rq, vma->obj, true); 1057 if (err == 0) 1058 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 1059 1060 i915_vma_unlock(vma); 1061 1062 return err; 1063 } 1064 1065 static int __reloc_gpu_alloc(struct i915_execbuffer *eb, 1066 struct intel_engine_cs *engine, 1067 unsigned int len) 1068 { 1069 struct reloc_cache *cache = &eb->reloc_cache; 1070 struct intel_gt_buffer_pool_node *pool; 1071 struct i915_request *rq; 1072 struct i915_vma *batch; 1073 u32 *cmd; 1074 int err; 1075 1076 pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE); 1077 if (IS_ERR(pool)) 1078 return PTR_ERR(pool); 1079 1080 cmd = i915_gem_object_pin_map(pool->obj, 1081 cache->has_llc ? 1082 I915_MAP_FORCE_WB : 1083 I915_MAP_FORCE_WC); 1084 if (IS_ERR(cmd)) { 1085 err = PTR_ERR(cmd); 1086 goto out_pool; 1087 } 1088 1089 batch = i915_vma_instance(pool->obj, eb->context->vm, NULL); 1090 if (IS_ERR(batch)) { 1091 err = PTR_ERR(batch); 1092 goto err_unmap; 1093 } 1094 1095 err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK); 1096 if (err) 1097 goto err_unmap; 1098 1099 if (engine == eb->context->engine) { 1100 rq = i915_request_create(eb->context); 1101 } else { 1102 struct intel_context *ce; 1103 1104 ce = intel_context_create(engine); 1105 if (IS_ERR(ce)) { 1106 err = PTR_ERR(ce); 1107 goto err_unpin; 1108 } 1109 1110 i915_vm_put(ce->vm); 1111 ce->vm = i915_vm_get(eb->context->vm); 1112 1113 rq = intel_context_create_request(ce); 1114 intel_context_put(ce); 1115 } 1116 if (IS_ERR(rq)) { 1117 err = PTR_ERR(rq); 1118 goto err_unpin; 1119 } 1120 1121 err = intel_gt_buffer_pool_mark_active(pool, rq); 1122 if (err) 1123 goto err_request; 1124 1125 i915_vma_lock(batch); 1126 err = i915_request_await_object(rq, batch->obj, false); 1127 if (err == 0) 1128 err = i915_vma_move_to_active(batch, rq, 0); 1129 i915_vma_unlock(batch); 1130 if (err) 1131 goto skip_request; 1132 1133 rq->batch = batch; 1134 i915_vma_unpin(batch); 1135 1136 cache->rq = rq; 1137 cache->rq_cmd = cmd; 1138 cache->rq_size = 0; 1139 cache->rq_vma = batch; 1140 1141 /* Return with batch mapping (cmd) still pinned */ 1142 goto out_pool; 1143 1144 skip_request: 1145 i915_request_set_error_once(rq, err); 1146 err_request: 1147 i915_request_add(rq); 1148 err_unpin: 1149 i915_vma_unpin(batch); 1150 err_unmap: 1151 i915_gem_object_unpin_map(pool->obj); 1152 out_pool: 1153 intel_gt_buffer_pool_put(pool); 1154 return err; 1155 } 1156 1157 static bool reloc_can_use_engine(const struct intel_engine_cs *engine) 1158 { 1159 return engine->class != VIDEO_DECODE_CLASS || !IS_GEN(engine->i915, 6); 1160 } 1161 1162 static u32 *reloc_gpu(struct i915_execbuffer *eb, 1163 struct i915_vma *vma, 1164 unsigned int len) 1165 { 1166 struct reloc_cache *cache = &eb->reloc_cache; 1167 u32 *cmd; 1168 int err; 1169 1170 if (unlikely(!cache->rq)) { 1171 struct intel_engine_cs *engine = eb->engine; 1172 1173 if (!reloc_can_use_engine(engine)) { 1174 engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0]; 1175 if (!engine) 1176 return ERR_PTR(-ENODEV); 1177 } 1178 1179 err = __reloc_gpu_alloc(eb, engine, len); 1180 if (unlikely(err)) 1181 return ERR_PTR(err); 1182 } 1183 1184 if (vma != cache->target) { 1185 err = reloc_move_to_gpu(cache->rq, vma); 1186 if (unlikely(err)) { 1187 i915_request_set_error_once(cache->rq, err); 1188 return ERR_PTR(err); 1189 } 1190 1191 cache->target = vma; 1192 } 1193 1194 if (unlikely(cache->rq_size + len > 1195 PAGE_SIZE / sizeof(u32) - RELOC_TAIL)) { 1196 err = reloc_gpu_chain(cache); 1197 if (unlikely(err)) { 1198 i915_request_set_error_once(cache->rq, err); 1199 return ERR_PTR(err); 1200 } 1201 } 1202 1203 GEM_BUG_ON(cache->rq_size + len >= PAGE_SIZE / sizeof(u32)); 1204 cmd = cache->rq_cmd + cache->rq_size; 1205 cache->rq_size += len; 1206 1207 return cmd; 1208 } 1209 1210 static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset) 1211 { 1212 struct page *page; 1213 unsigned long addr; 1214 1215 GEM_BUG_ON(vma->pages != vma->obj->mm.pages); 1216 1217 page = i915_gem_object_get_page(vma->obj, offset >> PAGE_SHIFT); 1218 addr = PFN_PHYS(page_to_pfn(page)); 1219 GEM_BUG_ON(overflows_type(addr, u32)); /* expected dma32 */ 1220 1221 return addr + offset_in_page(offset); 1222 } 1223 1224 static int __reloc_entry_gpu(struct i915_execbuffer *eb, 1225 struct i915_vma *vma, 1226 u64 offset, 1227 u64 target_addr) 1228 { 1229 const unsigned int gen = eb->reloc_cache.gen; 1230 unsigned int len; 1231 u32 *batch; 1232 u64 addr; 1233 1234 if (gen >= 8) 1235 len = offset & 7 ? 8 : 5; 1236 else if (gen >= 4) 1237 len = 4; 1238 else 1239 len = 3; 1240 1241 batch = reloc_gpu(eb, vma, len); 1242 if (IS_ERR(batch)) 1243 return PTR_ERR(batch); 1244 1245 addr = gen8_canonical_addr(vma->node.start + offset); 1246 if (gen >= 8) { 1247 if (offset & 7) { 1248 *batch++ = MI_STORE_DWORD_IMM_GEN4; 1249 *batch++ = lower_32_bits(addr); 1250 *batch++ = upper_32_bits(addr); 1251 *batch++ = lower_32_bits(target_addr); 1252 1253 addr = gen8_canonical_addr(addr + 4); 1254 1255 *batch++ = MI_STORE_DWORD_IMM_GEN4; 1256 *batch++ = lower_32_bits(addr); 1257 *batch++ = upper_32_bits(addr); 1258 *batch++ = upper_32_bits(target_addr); 1259 } else { 1260 *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1; 1261 *batch++ = lower_32_bits(addr); 1262 *batch++ = upper_32_bits(addr); 1263 *batch++ = lower_32_bits(target_addr); 1264 *batch++ = upper_32_bits(target_addr); 1265 } 1266 } else if (gen >= 6) { 1267 *batch++ = MI_STORE_DWORD_IMM_GEN4; 1268 *batch++ = 0; 1269 *batch++ = addr; 1270 *batch++ = target_addr; 1271 } else if (IS_I965G(eb->i915)) { 1272 *batch++ = MI_STORE_DWORD_IMM_GEN4; 1273 *batch++ = 0; 1274 *batch++ = vma_phys_addr(vma, offset); 1275 *batch++ = target_addr; 1276 } else if (gen >= 4) { 1277 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 1278 *batch++ = 0; 1279 *batch++ = addr; 1280 *batch++ = target_addr; 1281 } else if (gen >= 3 && 1282 !(IS_I915G(eb->i915) || IS_I915GM(eb->i915))) { 1283 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; 1284 *batch++ = addr; 1285 *batch++ = target_addr; 1286 } else { 1287 *batch++ = MI_STORE_DWORD_IMM; 1288 *batch++ = vma_phys_addr(vma, offset); 1289 *batch++ = target_addr; 1290 } 1291 1292 return 0; 1293 } 1294 1295 static u64 1296 relocate_entry(struct i915_execbuffer *eb, 1297 struct i915_vma *vma, 1298 const struct drm_i915_gem_relocation_entry *reloc, 1299 const struct i915_vma *target) 1300 { 1301 u64 target_addr = relocation_target(reloc, target); 1302 int err; 1303 1304 err = __reloc_entry_gpu(eb, vma, reloc->offset, target_addr); 1305 if (err) 1306 return err; 1307 1308 return target->node.start | UPDATE; 1309 } 1310 1311 static u64 1312 eb_relocate_entry(struct i915_execbuffer *eb, 1313 struct eb_vma *ev, 1314 const struct drm_i915_gem_relocation_entry *reloc) 1315 { 1316 struct drm_i915_private *i915 = eb->i915; 1317 struct eb_vma *target; 1318 int err; 1319 1320 /* we've already hold a reference to all valid objects */ 1321 target = eb_get_vma(eb, reloc->target_handle); 1322 if (unlikely(!target)) 1323 return -ENOENT; 1324 1325 /* Validate that the target is in a valid r/w GPU domain */ 1326 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { 1327 drm_dbg(&i915->drm, "reloc with multiple write domains: " 1328 "target %d offset %d " 1329 "read %08x write %08x", 1330 reloc->target_handle, 1331 (int) reloc->offset, 1332 reloc->read_domains, 1333 reloc->write_domain); 1334 return -EINVAL; 1335 } 1336 if (unlikely((reloc->write_domain | reloc->read_domains) 1337 & ~I915_GEM_GPU_DOMAINS)) { 1338 drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: " 1339 "target %d offset %d " 1340 "read %08x write %08x", 1341 reloc->target_handle, 1342 (int) reloc->offset, 1343 reloc->read_domains, 1344 reloc->write_domain); 1345 return -EINVAL; 1346 } 1347 1348 if (reloc->write_domain) { 1349 target->flags |= EXEC_OBJECT_WRITE; 1350 1351 /* 1352 * Sandybridge PPGTT errata: We need a global gtt mapping 1353 * for MI and pipe_control writes because the gpu doesn't 1354 * properly redirect them through the ppgtt for non_secure 1355 * batchbuffers. 1356 */ 1357 if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && 1358 IS_GEN(eb->i915, 6)) { 1359 err = i915_vma_bind(target->vma, 1360 target->vma->obj->cache_level, 1361 PIN_GLOBAL, NULL); 1362 if (err) 1363 return err; 1364 } 1365 } 1366 1367 /* 1368 * If the relocation already has the right value in it, no 1369 * more work needs to be done. 1370 */ 1371 if (gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset) 1372 return 0; 1373 1374 /* Check that the relocation address is valid... */ 1375 if (unlikely(reloc->offset > 1376 ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) { 1377 drm_dbg(&i915->drm, "Relocation beyond object bounds: " 1378 "target %d offset %d size %d.\n", 1379 reloc->target_handle, 1380 (int)reloc->offset, 1381 (int)ev->vma->size); 1382 return -EINVAL; 1383 } 1384 if (unlikely(reloc->offset & 3)) { 1385 drm_dbg(&i915->drm, "Relocation not 4-byte aligned: " 1386 "target %d offset %d.\n", 1387 reloc->target_handle, 1388 (int)reloc->offset); 1389 return -EINVAL; 1390 } 1391 1392 /* 1393 * If we write into the object, we need to force the synchronisation 1394 * barrier, either with an asynchronous clflush or if we executed the 1395 * patching using the GPU (though that should be serialised by the 1396 * timeline). To be completely sure, and since we are required to 1397 * do relocations we are already stalling, disable the user's opt 1398 * out of our synchronisation. 1399 */ 1400 ev->flags &= ~EXEC_OBJECT_ASYNC; 1401 1402 /* and update the user's relocation entry */ 1403 return relocate_entry(eb, ev->vma, reloc, target->vma); 1404 } 1405 1406 static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) 1407 { 1408 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) 1409 struct drm_i915_gem_relocation_entry stack[N_RELOC(512)]; 1410 const struct drm_i915_gem_exec_object2 *entry = ev->exec; 1411 struct drm_i915_gem_relocation_entry __user *urelocs = 1412 u64_to_user_ptr(entry->relocs_ptr); 1413 unsigned long remain = entry->relocation_count; 1414 1415 if (unlikely(remain > N_RELOC(ULONG_MAX))) 1416 return -EINVAL; 1417 1418 /* 1419 * We must check that the entire relocation array is safe 1420 * to read. However, if the array is not writable the user loses 1421 * the updated relocation values. 1422 */ 1423 if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs)))) 1424 return -EFAULT; 1425 1426 do { 1427 struct drm_i915_gem_relocation_entry *r = stack; 1428 unsigned int count = 1429 min_t(unsigned long, remain, ARRAY_SIZE(stack)); 1430 unsigned int copied; 1431 1432 /* 1433 * This is the fast path and we cannot handle a pagefault 1434 * whilst holding the struct mutex lest the user pass in the 1435 * relocations contained within a mmaped bo. For in such a case 1436 * we, the page fault handler would call i915_gem_fault() and 1437 * we would try to acquire the struct mutex again. Obviously 1438 * this is bad and so lockdep complains vehemently. 1439 */ 1440 copied = __copy_from_user(r, urelocs, count * sizeof(r[0])); 1441 if (unlikely(copied)) 1442 return -EFAULT; 1443 1444 remain -= count; 1445 do { 1446 u64 offset = eb_relocate_entry(eb, ev, r); 1447 1448 if (likely(offset == 0)) { 1449 } else if ((s64)offset < 0) { 1450 return (int)offset; 1451 } else { 1452 /* 1453 * Note that reporting an error now 1454 * leaves everything in an inconsistent 1455 * state as we have *already* changed 1456 * the relocation value inside the 1457 * object. As we have not changed the 1458 * reloc.presumed_offset or will not 1459 * change the execobject.offset, on the 1460 * call we may not rewrite the value 1461 * inside the object, leaving it 1462 * dangling and causing a GPU hang. Unless 1463 * userspace dynamically rebuilds the 1464 * relocations on each execbuf rather than 1465 * presume a static tree. 1466 * 1467 * We did previously check if the relocations 1468 * were writable (access_ok), an error now 1469 * would be a strange race with mprotect, 1470 * having already demonstrated that we 1471 * can read from this userspace address. 1472 */ 1473 offset = gen8_canonical_addr(offset & ~UPDATE); 1474 __put_user(offset, 1475 &urelocs[r - stack].presumed_offset); 1476 } 1477 } while (r++, --count); 1478 urelocs += ARRAY_SIZE(stack); 1479 } while (remain); 1480 1481 return 0; 1482 } 1483 1484 static int eb_relocate(struct i915_execbuffer *eb) 1485 { 1486 int err; 1487 1488 err = eb_lookup_vmas(eb); 1489 if (err) 1490 return err; 1491 1492 if (!list_empty(&eb->unbound)) { 1493 err = eb_reserve(eb); 1494 if (err) 1495 return err; 1496 } 1497 1498 /* The objects are in their final locations, apply the relocations. */ 1499 if (eb->args->flags & __EXEC_HAS_RELOC) { 1500 struct eb_vma *ev; 1501 int flush; 1502 1503 list_for_each_entry(ev, &eb->relocs, reloc_link) { 1504 err = eb_relocate_vma(eb, ev); 1505 if (err) 1506 break; 1507 } 1508 1509 flush = reloc_gpu_flush(&eb->reloc_cache); 1510 if (!err) 1511 err = flush; 1512 } 1513 1514 return err; 1515 } 1516 1517 static int eb_move_to_gpu(struct i915_execbuffer *eb) 1518 { 1519 const unsigned int count = eb->buffer_count; 1520 struct ww_acquire_ctx acquire; 1521 unsigned int i; 1522 int err = 0; 1523 1524 ww_acquire_init(&acquire, &reservation_ww_class); 1525 1526 for (i = 0; i < count; i++) { 1527 struct eb_vma *ev = &eb->vma[i]; 1528 struct i915_vma *vma = ev->vma; 1529 1530 err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire); 1531 if (err == -EDEADLK) { 1532 GEM_BUG_ON(i == 0); 1533 do { 1534 int j = i - 1; 1535 1536 ww_mutex_unlock(&eb->vma[j].vma->resv->lock); 1537 1538 swap(eb->vma[i], eb->vma[j]); 1539 } while (--i); 1540 1541 err = ww_mutex_lock_slow_interruptible(&vma->resv->lock, 1542 &acquire); 1543 } 1544 if (err) 1545 break; 1546 } 1547 ww_acquire_done(&acquire); 1548 1549 while (i--) { 1550 struct eb_vma *ev = &eb->vma[i]; 1551 struct i915_vma *vma = ev->vma; 1552 unsigned int flags = ev->flags; 1553 struct drm_i915_gem_object *obj = vma->obj; 1554 1555 assert_vma_held(vma); 1556 1557 if (flags & EXEC_OBJECT_CAPTURE) { 1558 struct i915_capture_list *capture; 1559 1560 capture = kmalloc(sizeof(*capture), GFP_KERNEL); 1561 if (capture) { 1562 capture->next = eb->request->capture_list; 1563 capture->vma = vma; 1564 eb->request->capture_list = capture; 1565 } 1566 } 1567 1568 /* 1569 * If the GPU is not _reading_ through the CPU cache, we need 1570 * to make sure that any writes (both previous GPU writes from 1571 * before a change in snooping levels and normal CPU writes) 1572 * caught in that cache are flushed to main memory. 1573 * 1574 * We want to say 1575 * obj->cache_dirty && 1576 * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ) 1577 * but gcc's optimiser doesn't handle that as well and emits 1578 * two jumps instead of one. Maybe one day... 1579 */ 1580 if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) { 1581 if (i915_gem_clflush_object(obj, 0)) 1582 flags &= ~EXEC_OBJECT_ASYNC; 1583 } 1584 1585 if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) { 1586 err = i915_request_await_object 1587 (eb->request, obj, flags & EXEC_OBJECT_WRITE); 1588 } 1589 1590 if (err == 0) 1591 err = i915_vma_move_to_active(vma, eb->request, flags); 1592 1593 i915_vma_unlock(vma); 1594 eb_unreserve_vma(ev); 1595 } 1596 ww_acquire_fini(&acquire); 1597 1598 eb_vma_array_put(fetch_and_zero(&eb->array)); 1599 1600 if (unlikely(err)) 1601 goto err_skip; 1602 1603 /* Unconditionally flush any chipset caches (for streaming writes). */ 1604 intel_gt_chipset_flush(eb->engine->gt); 1605 return 0; 1606 1607 err_skip: 1608 i915_request_set_error_once(eb->request, err); 1609 return err; 1610 } 1611 1612 static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) 1613 { 1614 if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS) 1615 return -EINVAL; 1616 1617 /* Kernel clipping was a DRI1 misfeature */ 1618 if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) { 1619 if (exec->num_cliprects || exec->cliprects_ptr) 1620 return -EINVAL; 1621 } 1622 1623 if (exec->DR4 == 0xffffffff) { 1624 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); 1625 exec->DR4 = 0; 1626 } 1627 if (exec->DR1 || exec->DR4) 1628 return -EINVAL; 1629 1630 if ((exec->batch_start_offset | exec->batch_len) & 0x7) 1631 return -EINVAL; 1632 1633 return 0; 1634 } 1635 1636 static int i915_reset_gen7_sol_offsets(struct i915_request *rq) 1637 { 1638 u32 *cs; 1639 int i; 1640 1641 if (!IS_GEN(rq->engine->i915, 7) || rq->engine->id != RCS0) { 1642 drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n"); 1643 return -EINVAL; 1644 } 1645 1646 cs = intel_ring_begin(rq, 4 * 2 + 2); 1647 if (IS_ERR(cs)) 1648 return PTR_ERR(cs); 1649 1650 *cs++ = MI_LOAD_REGISTER_IMM(4); 1651 for (i = 0; i < 4; i++) { 1652 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i)); 1653 *cs++ = 0; 1654 } 1655 *cs++ = MI_NOOP; 1656 intel_ring_advance(rq, cs); 1657 1658 return 0; 1659 } 1660 1661 static struct i915_vma * 1662 shadow_batch_pin(struct drm_i915_gem_object *obj, 1663 struct i915_address_space *vm, 1664 unsigned int flags) 1665 { 1666 struct i915_vma *vma; 1667 int err; 1668 1669 vma = i915_vma_instance(obj, vm, NULL); 1670 if (IS_ERR(vma)) 1671 return vma; 1672 1673 err = i915_vma_pin(vma, 0, 0, flags); 1674 if (err) 1675 return ERR_PTR(err); 1676 1677 return vma; 1678 } 1679 1680 struct eb_parse_work { 1681 struct dma_fence_work base; 1682 struct intel_engine_cs *engine; 1683 struct i915_vma *batch; 1684 struct i915_vma *shadow; 1685 struct i915_vma *trampoline; 1686 unsigned int batch_offset; 1687 unsigned int batch_length; 1688 }; 1689 1690 static int __eb_parse(struct dma_fence_work *work) 1691 { 1692 struct eb_parse_work *pw = container_of(work, typeof(*pw), base); 1693 1694 return intel_engine_cmd_parser(pw->engine, 1695 pw->batch, 1696 pw->batch_offset, 1697 pw->batch_length, 1698 pw->shadow, 1699 pw->trampoline); 1700 } 1701 1702 static void __eb_parse_release(struct dma_fence_work *work) 1703 { 1704 struct eb_parse_work *pw = container_of(work, typeof(*pw), base); 1705 1706 if (pw->trampoline) 1707 i915_active_release(&pw->trampoline->active); 1708 i915_active_release(&pw->shadow->active); 1709 i915_active_release(&pw->batch->active); 1710 } 1711 1712 static const struct dma_fence_work_ops eb_parse_ops = { 1713 .name = "eb_parse", 1714 .work = __eb_parse, 1715 .release = __eb_parse_release, 1716 }; 1717 1718 static inline int 1719 __parser_mark_active(struct i915_vma *vma, 1720 struct intel_timeline *tl, 1721 struct dma_fence *fence) 1722 { 1723 struct intel_gt_buffer_pool_node *node = vma->private; 1724 1725 return i915_active_ref(&node->active, tl, fence); 1726 } 1727 1728 static int 1729 parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl) 1730 { 1731 int err; 1732 1733 mutex_lock(&tl->mutex); 1734 1735 err = __parser_mark_active(pw->shadow, tl, &pw->base.dma); 1736 if (err) 1737 goto unlock; 1738 1739 if (pw->trampoline) { 1740 err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma); 1741 if (err) 1742 goto unlock; 1743 } 1744 1745 unlock: 1746 mutex_unlock(&tl->mutex); 1747 return err; 1748 } 1749 1750 static int eb_parse_pipeline(struct i915_execbuffer *eb, 1751 struct i915_vma *shadow, 1752 struct i915_vma *trampoline) 1753 { 1754 struct eb_parse_work *pw; 1755 int err; 1756 1757 pw = kzalloc(sizeof(*pw), GFP_KERNEL); 1758 if (!pw) 1759 return -ENOMEM; 1760 1761 err = i915_active_acquire(&eb->batch->vma->active); 1762 if (err) 1763 goto err_free; 1764 1765 err = i915_active_acquire(&shadow->active); 1766 if (err) 1767 goto err_batch; 1768 1769 if (trampoline) { 1770 err = i915_active_acquire(&trampoline->active); 1771 if (err) 1772 goto err_shadow; 1773 } 1774 1775 dma_fence_work_init(&pw->base, &eb_parse_ops); 1776 1777 pw->engine = eb->engine; 1778 pw->batch = eb->batch->vma; 1779 pw->batch_offset = eb->batch_start_offset; 1780 pw->batch_length = eb->batch_len; 1781 pw->shadow = shadow; 1782 pw->trampoline = trampoline; 1783 1784 /* Mark active refs early for this worker, in case we get interrupted */ 1785 err = parser_mark_active(pw, eb->context->timeline); 1786 if (err) 1787 goto err_commit; 1788 1789 err = dma_resv_lock_interruptible(pw->batch->resv, NULL); 1790 if (err) 1791 goto err_commit; 1792 1793 err = dma_resv_reserve_shared(pw->batch->resv, 1); 1794 if (err) 1795 goto err_commit_unlock; 1796 1797 /* Wait for all writes (and relocs) into the batch to complete */ 1798 err = i915_sw_fence_await_reservation(&pw->base.chain, 1799 pw->batch->resv, NULL, false, 1800 0, I915_FENCE_GFP); 1801 if (err < 0) 1802 goto err_commit_unlock; 1803 1804 /* Keep the batch alive and unwritten as we parse */ 1805 dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma); 1806 1807 dma_resv_unlock(pw->batch->resv); 1808 1809 /* Force execution to wait for completion of the parser */ 1810 dma_resv_lock(shadow->resv, NULL); 1811 dma_resv_add_excl_fence(shadow->resv, &pw->base.dma); 1812 dma_resv_unlock(shadow->resv); 1813 1814 dma_fence_work_commit_imm(&pw->base); 1815 return 0; 1816 1817 err_commit_unlock: 1818 dma_resv_unlock(pw->batch->resv); 1819 err_commit: 1820 i915_sw_fence_set_error_once(&pw->base.chain, err); 1821 dma_fence_work_commit_imm(&pw->base); 1822 return err; 1823 1824 err_shadow: 1825 i915_active_release(&shadow->active); 1826 err_batch: 1827 i915_active_release(&eb->batch->vma->active); 1828 err_free: 1829 kfree(pw); 1830 return err; 1831 } 1832 1833 static int eb_parse(struct i915_execbuffer *eb) 1834 { 1835 struct drm_i915_private *i915 = eb->i915; 1836 struct intel_gt_buffer_pool_node *pool; 1837 struct i915_vma *shadow, *trampoline; 1838 unsigned int len; 1839 int err; 1840 1841 if (!eb_use_cmdparser(eb)) 1842 return 0; 1843 1844 len = eb->batch_len; 1845 if (!CMDPARSER_USES_GGTT(eb->i915)) { 1846 /* 1847 * ppGTT backed shadow buffers must be mapped RO, to prevent 1848 * post-scan tampering 1849 */ 1850 if (!eb->context->vm->has_read_only) { 1851 drm_dbg(&i915->drm, 1852 "Cannot prevent post-scan tampering without RO capable vm\n"); 1853 return -EINVAL; 1854 } 1855 } else { 1856 len += I915_CMD_PARSER_TRAMPOLINE_SIZE; 1857 } 1858 1859 pool = intel_gt_get_buffer_pool(eb->engine->gt, len); 1860 if (IS_ERR(pool)) 1861 return PTR_ERR(pool); 1862 1863 shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER); 1864 if (IS_ERR(shadow)) { 1865 err = PTR_ERR(shadow); 1866 goto err; 1867 } 1868 i915_gem_object_set_readonly(shadow->obj); 1869 shadow->private = pool; 1870 1871 trampoline = NULL; 1872 if (CMDPARSER_USES_GGTT(eb->i915)) { 1873 trampoline = shadow; 1874 1875 shadow = shadow_batch_pin(pool->obj, 1876 &eb->engine->gt->ggtt->vm, 1877 PIN_GLOBAL); 1878 if (IS_ERR(shadow)) { 1879 err = PTR_ERR(shadow); 1880 shadow = trampoline; 1881 goto err_shadow; 1882 } 1883 shadow->private = pool; 1884 1885 eb->batch_flags |= I915_DISPATCH_SECURE; 1886 } 1887 1888 err = eb_parse_pipeline(eb, shadow, trampoline); 1889 if (err) 1890 goto err_trampoline; 1891 1892 eb->vma[eb->buffer_count].vma = i915_vma_get(shadow); 1893 eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN; 1894 eb->batch = &eb->vma[eb->buffer_count++]; 1895 eb->vma[eb->buffer_count].vma = NULL; 1896 1897 eb->trampoline = trampoline; 1898 eb->batch_start_offset = 0; 1899 1900 return 0; 1901 1902 err_trampoline: 1903 if (trampoline) 1904 i915_vma_unpin(trampoline); 1905 err_shadow: 1906 i915_vma_unpin(shadow); 1907 err: 1908 intel_gt_buffer_pool_put(pool); 1909 return err; 1910 } 1911 1912 static void 1913 add_to_client(struct i915_request *rq, struct drm_file *file) 1914 { 1915 struct drm_i915_file_private *file_priv = file->driver_priv; 1916 1917 rq->file_priv = file_priv; 1918 1919 spin_lock(&file_priv->mm.lock); 1920 list_add_tail(&rq->client_link, &file_priv->mm.request_list); 1921 spin_unlock(&file_priv->mm.lock); 1922 } 1923 1924 static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) 1925 { 1926 int err; 1927 1928 err = eb_move_to_gpu(eb); 1929 if (err) 1930 return err; 1931 1932 if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) { 1933 err = i915_reset_gen7_sol_offsets(eb->request); 1934 if (err) 1935 return err; 1936 } 1937 1938 /* 1939 * After we completed waiting for other engines (using HW semaphores) 1940 * then we can signal that this request/batch is ready to run. This 1941 * allows us to determine if the batch is still waiting on the GPU 1942 * or actually running by checking the breadcrumb. 1943 */ 1944 if (eb->engine->emit_init_breadcrumb) { 1945 err = eb->engine->emit_init_breadcrumb(eb->request); 1946 if (err) 1947 return err; 1948 } 1949 1950 err = eb->engine->emit_bb_start(eb->request, 1951 batch->node.start + 1952 eb->batch_start_offset, 1953 eb->batch_len, 1954 eb->batch_flags); 1955 if (err) 1956 return err; 1957 1958 if (eb->trampoline) { 1959 GEM_BUG_ON(eb->batch_start_offset); 1960 err = eb->engine->emit_bb_start(eb->request, 1961 eb->trampoline->node.start + 1962 eb->batch_len, 1963 0, 0); 1964 if (err) 1965 return err; 1966 } 1967 1968 if (intel_context_nopreempt(eb->context)) 1969 __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags); 1970 1971 return 0; 1972 } 1973 1974 static int num_vcs_engines(const struct drm_i915_private *i915) 1975 { 1976 return hweight64(INTEL_INFO(i915)->engine_mask & 1977 GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0)); 1978 } 1979 1980 /* 1981 * Find one BSD ring to dispatch the corresponding BSD command. 1982 * The engine index is returned. 1983 */ 1984 static unsigned int 1985 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, 1986 struct drm_file *file) 1987 { 1988 struct drm_i915_file_private *file_priv = file->driver_priv; 1989 1990 /* Check whether the file_priv has already selected one ring. */ 1991 if ((int)file_priv->bsd_engine < 0) 1992 file_priv->bsd_engine = 1993 get_random_int() % num_vcs_engines(dev_priv); 1994 1995 return file_priv->bsd_engine; 1996 } 1997 1998 static const enum intel_engine_id user_ring_map[] = { 1999 [I915_EXEC_DEFAULT] = RCS0, 2000 [I915_EXEC_RENDER] = RCS0, 2001 [I915_EXEC_BLT] = BCS0, 2002 [I915_EXEC_BSD] = VCS0, 2003 [I915_EXEC_VEBOX] = VECS0 2004 }; 2005 2006 static struct i915_request *eb_throttle(struct intel_context *ce) 2007 { 2008 struct intel_ring *ring = ce->ring; 2009 struct intel_timeline *tl = ce->timeline; 2010 struct i915_request *rq; 2011 2012 /* 2013 * Completely unscientific finger-in-the-air estimates for suitable 2014 * maximum user request size (to avoid blocking) and then backoff. 2015 */ 2016 if (intel_ring_update_space(ring) >= PAGE_SIZE) 2017 return NULL; 2018 2019 /* 2020 * Find a request that after waiting upon, there will be at least half 2021 * the ring available. The hysteresis allows us to compete for the 2022 * shared ring and should mean that we sleep less often prior to 2023 * claiming our resources, but not so long that the ring completely 2024 * drains before we can submit our next request. 2025 */ 2026 list_for_each_entry(rq, &tl->requests, link) { 2027 if (rq->ring != ring) 2028 continue; 2029 2030 if (__intel_ring_space(rq->postfix, 2031 ring->emit, ring->size) > ring->size / 2) 2032 break; 2033 } 2034 if (&rq->link == &tl->requests) 2035 return NULL; /* weird, we will check again later for real */ 2036 2037 return i915_request_get(rq); 2038 } 2039 2040 static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce) 2041 { 2042 struct intel_timeline *tl; 2043 struct i915_request *rq; 2044 int err; 2045 2046 /* 2047 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report 2048 * EIO if the GPU is already wedged. 2049 */ 2050 err = intel_gt_terminally_wedged(ce->engine->gt); 2051 if (err) 2052 return err; 2053 2054 if (unlikely(intel_context_is_banned(ce))) 2055 return -EIO; 2056 2057 /* 2058 * Pinning the contexts may generate requests in order to acquire 2059 * GGTT space, so do this first before we reserve a seqno for 2060 * ourselves. 2061 */ 2062 err = intel_context_pin(ce); 2063 if (err) 2064 return err; 2065 2066 /* 2067 * Take a local wakeref for preparing to dispatch the execbuf as 2068 * we expect to access the hardware fairly frequently in the 2069 * process, and require the engine to be kept awake between accesses. 2070 * Upon dispatch, we acquire another prolonged wakeref that we hold 2071 * until the timeline is idle, which in turn releases the wakeref 2072 * taken on the engine, and the parent device. 2073 */ 2074 tl = intel_context_timeline_lock(ce); 2075 if (IS_ERR(tl)) { 2076 err = PTR_ERR(tl); 2077 goto err_unpin; 2078 } 2079 2080 intel_context_enter(ce); 2081 rq = eb_throttle(ce); 2082 2083 intel_context_timeline_unlock(tl); 2084 2085 if (rq) { 2086 bool nonblock = eb->file->filp->f_flags & O_NONBLOCK; 2087 long timeout; 2088 2089 timeout = MAX_SCHEDULE_TIMEOUT; 2090 if (nonblock) 2091 timeout = 0; 2092 2093 timeout = i915_request_wait(rq, 2094 I915_WAIT_INTERRUPTIBLE, 2095 timeout); 2096 i915_request_put(rq); 2097 2098 if (timeout < 0) { 2099 err = nonblock ? -EWOULDBLOCK : timeout; 2100 goto err_exit; 2101 } 2102 } 2103 2104 eb->engine = ce->engine; 2105 eb->context = ce; 2106 return 0; 2107 2108 err_exit: 2109 mutex_lock(&tl->mutex); 2110 intel_context_exit(ce); 2111 intel_context_timeline_unlock(tl); 2112 err_unpin: 2113 intel_context_unpin(ce); 2114 return err; 2115 } 2116 2117 static void eb_unpin_engine(struct i915_execbuffer *eb) 2118 { 2119 struct intel_context *ce = eb->context; 2120 struct intel_timeline *tl = ce->timeline; 2121 2122 mutex_lock(&tl->mutex); 2123 intel_context_exit(ce); 2124 mutex_unlock(&tl->mutex); 2125 2126 intel_context_unpin(ce); 2127 } 2128 2129 static unsigned int 2130 eb_select_legacy_ring(struct i915_execbuffer *eb, 2131 struct drm_file *file, 2132 struct drm_i915_gem_execbuffer2 *args) 2133 { 2134 struct drm_i915_private *i915 = eb->i915; 2135 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK; 2136 2137 if (user_ring_id != I915_EXEC_BSD && 2138 (args->flags & I915_EXEC_BSD_MASK)) { 2139 drm_dbg(&i915->drm, 2140 "execbuf with non bsd ring but with invalid " 2141 "bsd dispatch flags: %d\n", (int)(args->flags)); 2142 return -1; 2143 } 2144 2145 if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) { 2146 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; 2147 2148 if (bsd_idx == I915_EXEC_BSD_DEFAULT) { 2149 bsd_idx = gen8_dispatch_bsd_engine(i915, file); 2150 } else if (bsd_idx >= I915_EXEC_BSD_RING1 && 2151 bsd_idx <= I915_EXEC_BSD_RING2) { 2152 bsd_idx >>= I915_EXEC_BSD_SHIFT; 2153 bsd_idx--; 2154 } else { 2155 drm_dbg(&i915->drm, 2156 "execbuf with unknown bsd ring: %u\n", 2157 bsd_idx); 2158 return -1; 2159 } 2160 2161 return _VCS(bsd_idx); 2162 } 2163 2164 if (user_ring_id >= ARRAY_SIZE(user_ring_map)) { 2165 drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n", 2166 user_ring_id); 2167 return -1; 2168 } 2169 2170 return user_ring_map[user_ring_id]; 2171 } 2172 2173 static int 2174 eb_pin_engine(struct i915_execbuffer *eb, 2175 struct drm_file *file, 2176 struct drm_i915_gem_execbuffer2 *args) 2177 { 2178 struct intel_context *ce; 2179 unsigned int idx; 2180 int err; 2181 2182 if (i915_gem_context_user_engines(eb->gem_context)) 2183 idx = args->flags & I915_EXEC_RING_MASK; 2184 else 2185 idx = eb_select_legacy_ring(eb, file, args); 2186 2187 ce = i915_gem_context_get_engine(eb->gem_context, idx); 2188 if (IS_ERR(ce)) 2189 return PTR_ERR(ce); 2190 2191 err = __eb_pin_engine(eb, ce); 2192 intel_context_put(ce); 2193 2194 return err; 2195 } 2196 2197 static void 2198 __free_fence_array(struct drm_syncobj **fences, unsigned int n) 2199 { 2200 while (n--) 2201 drm_syncobj_put(ptr_mask_bits(fences[n], 2)); 2202 kvfree(fences); 2203 } 2204 2205 static struct drm_syncobj ** 2206 get_fence_array(struct drm_i915_gem_execbuffer2 *args, 2207 struct drm_file *file) 2208 { 2209 const unsigned long nfences = args->num_cliprects; 2210 struct drm_i915_gem_exec_fence __user *user; 2211 struct drm_syncobj **fences; 2212 unsigned long n; 2213 int err; 2214 2215 if (!(args->flags & I915_EXEC_FENCE_ARRAY)) 2216 return NULL; 2217 2218 /* Check multiplication overflow for access_ok() and kvmalloc_array() */ 2219 BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long)); 2220 if (nfences > min_t(unsigned long, 2221 ULONG_MAX / sizeof(*user), 2222 SIZE_MAX / sizeof(*fences))) 2223 return ERR_PTR(-EINVAL); 2224 2225 user = u64_to_user_ptr(args->cliprects_ptr); 2226 if (!access_ok(user, nfences * sizeof(*user))) 2227 return ERR_PTR(-EFAULT); 2228 2229 fences = kvmalloc_array(nfences, sizeof(*fences), 2230 __GFP_NOWARN | GFP_KERNEL); 2231 if (!fences) 2232 return ERR_PTR(-ENOMEM); 2233 2234 for (n = 0; n < nfences; n++) { 2235 struct drm_i915_gem_exec_fence fence; 2236 struct drm_syncobj *syncobj; 2237 2238 if (__copy_from_user(&fence, user++, sizeof(fence))) { 2239 err = -EFAULT; 2240 goto err; 2241 } 2242 2243 if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) { 2244 err = -EINVAL; 2245 goto err; 2246 } 2247 2248 syncobj = drm_syncobj_find(file, fence.handle); 2249 if (!syncobj) { 2250 DRM_DEBUG("Invalid syncobj handle provided\n"); 2251 err = -ENOENT; 2252 goto err; 2253 } 2254 2255 BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) & 2256 ~__I915_EXEC_FENCE_UNKNOWN_FLAGS); 2257 2258 fences[n] = ptr_pack_bits(syncobj, fence.flags, 2); 2259 } 2260 2261 return fences; 2262 2263 err: 2264 __free_fence_array(fences, n); 2265 return ERR_PTR(err); 2266 } 2267 2268 static void 2269 put_fence_array(struct drm_i915_gem_execbuffer2 *args, 2270 struct drm_syncobj **fences) 2271 { 2272 if (fences) 2273 __free_fence_array(fences, args->num_cliprects); 2274 } 2275 2276 static int 2277 await_fence_array(struct i915_execbuffer *eb, 2278 struct drm_syncobj **fences) 2279 { 2280 const unsigned int nfences = eb->args->num_cliprects; 2281 unsigned int n; 2282 int err; 2283 2284 for (n = 0; n < nfences; n++) { 2285 struct drm_syncobj *syncobj; 2286 struct dma_fence *fence; 2287 unsigned int flags; 2288 2289 syncobj = ptr_unpack_bits(fences[n], &flags, 2); 2290 if (!(flags & I915_EXEC_FENCE_WAIT)) 2291 continue; 2292 2293 fence = drm_syncobj_fence_get(syncobj); 2294 if (!fence) 2295 return -EINVAL; 2296 2297 err = i915_request_await_dma_fence(eb->request, fence); 2298 dma_fence_put(fence); 2299 if (err < 0) 2300 return err; 2301 } 2302 2303 return 0; 2304 } 2305 2306 static void 2307 signal_fence_array(struct i915_execbuffer *eb, 2308 struct drm_syncobj **fences) 2309 { 2310 const unsigned int nfences = eb->args->num_cliprects; 2311 struct dma_fence * const fence = &eb->request->fence; 2312 unsigned int n; 2313 2314 for (n = 0; n < nfences; n++) { 2315 struct drm_syncobj *syncobj; 2316 unsigned int flags; 2317 2318 syncobj = ptr_unpack_bits(fences[n], &flags, 2); 2319 if (!(flags & I915_EXEC_FENCE_SIGNAL)) 2320 continue; 2321 2322 drm_syncobj_replace_fence(syncobj, fence); 2323 } 2324 } 2325 2326 static void retire_requests(struct intel_timeline *tl, struct i915_request *end) 2327 { 2328 struct i915_request *rq, *rn; 2329 2330 list_for_each_entry_safe(rq, rn, &tl->requests, link) 2331 if (rq == end || !i915_request_retire(rq)) 2332 break; 2333 } 2334 2335 static void eb_request_add(struct i915_execbuffer *eb) 2336 { 2337 struct i915_request *rq = eb->request; 2338 struct intel_timeline * const tl = i915_request_timeline(rq); 2339 struct i915_sched_attr attr = {}; 2340 struct i915_request *prev; 2341 2342 lockdep_assert_held(&tl->mutex); 2343 lockdep_unpin_lock(&tl->mutex, rq->cookie); 2344 2345 trace_i915_request_add(rq); 2346 2347 prev = __i915_request_commit(rq); 2348 2349 /* Check that the context wasn't destroyed before submission */ 2350 if (likely(!intel_context_is_closed(eb->context))) { 2351 attr = eb->gem_context->sched; 2352 } else { 2353 /* Serialise with context_close via the add_to_timeline */ 2354 i915_request_set_error_once(rq, -ENOENT); 2355 __i915_request_skip(rq); 2356 } 2357 2358 __i915_request_queue(rq, &attr); 2359 2360 /* Try to clean up the client's timeline after submitting the request */ 2361 if (prev) 2362 retire_requests(tl, prev); 2363 2364 mutex_unlock(&tl->mutex); 2365 } 2366 2367 static int 2368 i915_gem_do_execbuffer(struct drm_device *dev, 2369 struct drm_file *file, 2370 struct drm_i915_gem_execbuffer2 *args, 2371 struct drm_i915_gem_exec_object2 *exec, 2372 struct drm_syncobj **fences) 2373 { 2374 struct drm_i915_private *i915 = to_i915(dev); 2375 struct i915_execbuffer eb; 2376 struct dma_fence *in_fence = NULL; 2377 struct sync_file *out_fence = NULL; 2378 struct i915_vma *batch; 2379 int out_fence_fd = -1; 2380 int err; 2381 2382 BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS); 2383 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & 2384 ~__EXEC_OBJECT_UNKNOWN_FLAGS); 2385 2386 eb.i915 = i915; 2387 eb.file = file; 2388 eb.args = args; 2389 if (!(args->flags & I915_EXEC_NO_RELOC)) 2390 args->flags |= __EXEC_HAS_RELOC; 2391 2392 eb.exec = exec; 2393 2394 eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; 2395 reloc_cache_init(&eb.reloc_cache, eb.i915); 2396 2397 eb.buffer_count = args->buffer_count; 2398 eb.batch_start_offset = args->batch_start_offset; 2399 eb.batch_len = args->batch_len; 2400 eb.trampoline = NULL; 2401 2402 eb.batch_flags = 0; 2403 if (args->flags & I915_EXEC_SECURE) { 2404 if (INTEL_GEN(i915) >= 11) 2405 return -ENODEV; 2406 2407 /* Return -EPERM to trigger fallback code on old binaries. */ 2408 if (!HAS_SECURE_BATCHES(i915)) 2409 return -EPERM; 2410 2411 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) 2412 return -EPERM; 2413 2414 eb.batch_flags |= I915_DISPATCH_SECURE; 2415 } 2416 if (args->flags & I915_EXEC_IS_PINNED) 2417 eb.batch_flags |= I915_DISPATCH_PINNED; 2418 2419 #define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT) 2420 if (args->flags & IN_FENCES) { 2421 if ((args->flags & IN_FENCES) == IN_FENCES) 2422 return -EINVAL; 2423 2424 in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); 2425 if (!in_fence) 2426 return -EINVAL; 2427 } 2428 #undef IN_FENCES 2429 2430 if (args->flags & I915_EXEC_FENCE_OUT) { 2431 out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 2432 if (out_fence_fd < 0) { 2433 err = out_fence_fd; 2434 goto err_in_fence; 2435 } 2436 } 2437 2438 err = eb_create(&eb); 2439 if (err) 2440 goto err_out_fence; 2441 2442 GEM_BUG_ON(!eb.lut_size); 2443 2444 err = eb_select_context(&eb); 2445 if (unlikely(err)) 2446 goto err_destroy; 2447 2448 err = eb_pin_engine(&eb, file, args); 2449 if (unlikely(err)) 2450 goto err_context; 2451 2452 err = eb_relocate(&eb); 2453 if (err) { 2454 /* 2455 * If the user expects the execobject.offset and 2456 * reloc.presumed_offset to be an exact match, 2457 * as for using NO_RELOC, then we cannot update 2458 * the execobject.offset until we have completed 2459 * relocation. 2460 */ 2461 args->flags &= ~__EXEC_HAS_RELOC; 2462 goto err_vma; 2463 } 2464 2465 if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) { 2466 drm_dbg(&i915->drm, 2467 "Attempting to use self-modifying batch buffer\n"); 2468 err = -EINVAL; 2469 goto err_vma; 2470 } 2471 2472 if (range_overflows_t(u64, 2473 eb.batch_start_offset, eb.batch_len, 2474 eb.batch->vma->size)) { 2475 drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n"); 2476 err = -EINVAL; 2477 goto err_vma; 2478 } 2479 2480 if (eb.batch_len == 0) 2481 eb.batch_len = eb.batch->vma->size - eb.batch_start_offset; 2482 2483 err = eb_parse(&eb); 2484 if (err) 2485 goto err_vma; 2486 2487 /* 2488 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 2489 * batch" bit. Hence we need to pin secure batches into the global gtt. 2490 * hsw should have this fixed, but bdw mucks it up again. */ 2491 batch = eb.batch->vma; 2492 if (eb.batch_flags & I915_DISPATCH_SECURE) { 2493 struct i915_vma *vma; 2494 2495 /* 2496 * So on first glance it looks freaky that we pin the batch here 2497 * outside of the reservation loop. But: 2498 * - The batch is already pinned into the relevant ppgtt, so we 2499 * already have the backing storage fully allocated. 2500 * - No other BO uses the global gtt (well contexts, but meh), 2501 * so we don't really have issues with multiple objects not 2502 * fitting due to fragmentation. 2503 * So this is actually safe. 2504 */ 2505 vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0); 2506 if (IS_ERR(vma)) { 2507 err = PTR_ERR(vma); 2508 goto err_parse; 2509 } 2510 2511 batch = vma; 2512 } 2513 2514 /* All GPU relocation batches must be submitted prior to the user rq */ 2515 GEM_BUG_ON(eb.reloc_cache.rq); 2516 2517 /* Allocate a request for this batch buffer nice and early. */ 2518 eb.request = i915_request_create(eb.context); 2519 if (IS_ERR(eb.request)) { 2520 err = PTR_ERR(eb.request); 2521 goto err_batch_unpin; 2522 } 2523 2524 if (in_fence) { 2525 if (args->flags & I915_EXEC_FENCE_SUBMIT) 2526 err = i915_request_await_execution(eb.request, 2527 in_fence, 2528 eb.engine->bond_execute); 2529 else 2530 err = i915_request_await_dma_fence(eb.request, 2531 in_fence); 2532 if (err < 0) 2533 goto err_request; 2534 } 2535 2536 if (fences) { 2537 err = await_fence_array(&eb, fences); 2538 if (err) 2539 goto err_request; 2540 } 2541 2542 if (out_fence_fd != -1) { 2543 out_fence = sync_file_create(&eb.request->fence); 2544 if (!out_fence) { 2545 err = -ENOMEM; 2546 goto err_request; 2547 } 2548 } 2549 2550 /* 2551 * Whilst this request exists, batch_obj will be on the 2552 * active_list, and so will hold the active reference. Only when this 2553 * request is retired will the the batch_obj be moved onto the 2554 * inactive_list and lose its active reference. Hence we do not need 2555 * to explicitly hold another reference here. 2556 */ 2557 eb.request->batch = batch; 2558 if (batch->private) 2559 intel_gt_buffer_pool_mark_active(batch->private, eb.request); 2560 2561 trace_i915_request_queue(eb.request, eb.batch_flags); 2562 err = eb_submit(&eb, batch); 2563 err_request: 2564 add_to_client(eb.request, file); 2565 i915_request_get(eb.request); 2566 eb_request_add(&eb); 2567 2568 if (fences) 2569 signal_fence_array(&eb, fences); 2570 2571 if (out_fence) { 2572 if (err == 0) { 2573 fd_install(out_fence_fd, out_fence->file); 2574 args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */ 2575 args->rsvd2 |= (u64)out_fence_fd << 32; 2576 out_fence_fd = -1; 2577 } else { 2578 fput(out_fence->file); 2579 } 2580 } 2581 i915_request_put(eb.request); 2582 2583 err_batch_unpin: 2584 if (eb.batch_flags & I915_DISPATCH_SECURE) 2585 i915_vma_unpin(batch); 2586 err_parse: 2587 if (batch->private) 2588 intel_gt_buffer_pool_put(batch->private); 2589 err_vma: 2590 if (eb.trampoline) 2591 i915_vma_unpin(eb.trampoline); 2592 eb_unpin_engine(&eb); 2593 err_context: 2594 i915_gem_context_put(eb.gem_context); 2595 err_destroy: 2596 eb_destroy(&eb); 2597 err_out_fence: 2598 if (out_fence_fd != -1) 2599 put_unused_fd(out_fence_fd); 2600 err_in_fence: 2601 dma_fence_put(in_fence); 2602 return err; 2603 } 2604 2605 static size_t eb_element_size(void) 2606 { 2607 return sizeof(struct drm_i915_gem_exec_object2); 2608 } 2609 2610 static bool check_buffer_count(size_t count) 2611 { 2612 const size_t sz = eb_element_size(); 2613 2614 /* 2615 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup 2616 * array size (see eb_create()). Otherwise, we can accept an array as 2617 * large as can be addressed (though use large arrays at your peril)! 2618 */ 2619 2620 return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1); 2621 } 2622 2623 /* 2624 * Legacy execbuffer just creates an exec2 list from the original exec object 2625 * list array and passes it to the real function. 2626 */ 2627 int 2628 i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, 2629 struct drm_file *file) 2630 { 2631 struct drm_i915_private *i915 = to_i915(dev); 2632 struct drm_i915_gem_execbuffer *args = data; 2633 struct drm_i915_gem_execbuffer2 exec2; 2634 struct drm_i915_gem_exec_object *exec_list = NULL; 2635 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 2636 const size_t count = args->buffer_count; 2637 unsigned int i; 2638 int err; 2639 2640 if (!check_buffer_count(count)) { 2641 drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count); 2642 return -EINVAL; 2643 } 2644 2645 exec2.buffers_ptr = args->buffers_ptr; 2646 exec2.buffer_count = args->buffer_count; 2647 exec2.batch_start_offset = args->batch_start_offset; 2648 exec2.batch_len = args->batch_len; 2649 exec2.DR1 = args->DR1; 2650 exec2.DR4 = args->DR4; 2651 exec2.num_cliprects = args->num_cliprects; 2652 exec2.cliprects_ptr = args->cliprects_ptr; 2653 exec2.flags = I915_EXEC_RENDER; 2654 i915_execbuffer2_set_context_id(exec2, 0); 2655 2656 err = i915_gem_check_execbuffer(&exec2); 2657 if (err) 2658 return err; 2659 2660 /* Copy in the exec list from userland */ 2661 exec_list = kvmalloc_array(count, sizeof(*exec_list), 2662 __GFP_NOWARN | GFP_KERNEL); 2663 exec2_list = kvmalloc_array(count, eb_element_size(), 2664 __GFP_NOWARN | GFP_KERNEL); 2665 if (exec_list == NULL || exec2_list == NULL) { 2666 drm_dbg(&i915->drm, 2667 "Failed to allocate exec list for %d buffers\n", 2668 args->buffer_count); 2669 kvfree(exec_list); 2670 kvfree(exec2_list); 2671 return -ENOMEM; 2672 } 2673 err = copy_from_user(exec_list, 2674 u64_to_user_ptr(args->buffers_ptr), 2675 sizeof(*exec_list) * count); 2676 if (err) { 2677 drm_dbg(&i915->drm, "copy %d exec entries failed %d\n", 2678 args->buffer_count, err); 2679 kvfree(exec_list); 2680 kvfree(exec2_list); 2681 return -EFAULT; 2682 } 2683 2684 for (i = 0; i < args->buffer_count; i++) { 2685 exec2_list[i].handle = exec_list[i].handle; 2686 exec2_list[i].relocation_count = exec_list[i].relocation_count; 2687 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; 2688 exec2_list[i].alignment = exec_list[i].alignment; 2689 exec2_list[i].offset = exec_list[i].offset; 2690 if (INTEL_GEN(to_i915(dev)) < 4) 2691 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; 2692 else 2693 exec2_list[i].flags = 0; 2694 } 2695 2696 err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL); 2697 if (exec2.flags & __EXEC_HAS_RELOC) { 2698 struct drm_i915_gem_exec_object __user *user_exec_list = 2699 u64_to_user_ptr(args->buffers_ptr); 2700 2701 /* Copy the new buffer offsets back to the user's exec list. */ 2702 for (i = 0; i < args->buffer_count; i++) { 2703 if (!(exec2_list[i].offset & UPDATE)) 2704 continue; 2705 2706 exec2_list[i].offset = 2707 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK); 2708 exec2_list[i].offset &= PIN_OFFSET_MASK; 2709 if (__copy_to_user(&user_exec_list[i].offset, 2710 &exec2_list[i].offset, 2711 sizeof(user_exec_list[i].offset))) 2712 break; 2713 } 2714 } 2715 2716 kvfree(exec_list); 2717 kvfree(exec2_list); 2718 return err; 2719 } 2720 2721 int 2722 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, 2723 struct drm_file *file) 2724 { 2725 struct drm_i915_private *i915 = to_i915(dev); 2726 struct drm_i915_gem_execbuffer2 *args = data; 2727 struct drm_i915_gem_exec_object2 *exec2_list; 2728 struct drm_syncobj **fences = NULL; 2729 const size_t count = args->buffer_count; 2730 int err; 2731 2732 if (!check_buffer_count(count)) { 2733 drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count); 2734 return -EINVAL; 2735 } 2736 2737 err = i915_gem_check_execbuffer(args); 2738 if (err) 2739 return err; 2740 2741 exec2_list = kvmalloc_array(count, eb_element_size(), 2742 __GFP_NOWARN | GFP_KERNEL); 2743 if (exec2_list == NULL) { 2744 drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n", 2745 count); 2746 return -ENOMEM; 2747 } 2748 if (copy_from_user(exec2_list, 2749 u64_to_user_ptr(args->buffers_ptr), 2750 sizeof(*exec2_list) * count)) { 2751 drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count); 2752 kvfree(exec2_list); 2753 return -EFAULT; 2754 } 2755 2756 if (args->flags & I915_EXEC_FENCE_ARRAY) { 2757 fences = get_fence_array(args, file); 2758 if (IS_ERR(fences)) { 2759 kvfree(exec2_list); 2760 return PTR_ERR(fences); 2761 } 2762 } 2763 2764 err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences); 2765 2766 /* 2767 * Now that we have begun execution of the batchbuffer, we ignore 2768 * any new error after this point. Also given that we have already 2769 * updated the associated relocations, we try to write out the current 2770 * object locations irrespective of any error. 2771 */ 2772 if (args->flags & __EXEC_HAS_RELOC) { 2773 struct drm_i915_gem_exec_object2 __user *user_exec_list = 2774 u64_to_user_ptr(args->buffers_ptr); 2775 unsigned int i; 2776 2777 /* Copy the new buffer offsets back to the user's exec list. */ 2778 /* 2779 * Note: count * sizeof(*user_exec_list) does not overflow, 2780 * because we checked 'count' in check_buffer_count(). 2781 * 2782 * And this range already got effectively checked earlier 2783 * when we did the "copy_from_user()" above. 2784 */ 2785 if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list))) 2786 goto end; 2787 2788 for (i = 0; i < args->buffer_count; i++) { 2789 if (!(exec2_list[i].offset & UPDATE)) 2790 continue; 2791 2792 exec2_list[i].offset = 2793 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK); 2794 unsafe_put_user(exec2_list[i].offset, 2795 &user_exec_list[i].offset, 2796 end_user); 2797 } 2798 end_user: 2799 user_access_end(); 2800 end:; 2801 } 2802 2803 args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS; 2804 put_fence_array(args, fences); 2805 kvfree(exec2_list); 2806 return err; 2807 } 2808 2809 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2810 #include "selftests/i915_gem_execbuffer.c" 2811 #endif 2812