1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2008-2021 Intel Corporation 4 */ 5 6 #include <drm/drm_cache.h> 7 8 #include "gem/i915_gem_internal.h" 9 10 #include "gen2_engine_cs.h" 11 #include "gen6_engine_cs.h" 12 #include "gen6_ppgtt.h" 13 #include "gen7_renderclear.h" 14 #include "i915_drv.h" 15 #include "i915_irq.h" 16 #include "i915_mitigations.h" 17 #include "i915_reg.h" 18 #include "intel_breadcrumbs.h" 19 #include "intel_context.h" 20 #include "intel_engine_regs.h" 21 #include "intel_gt.h" 22 #include "intel_gt_irq.h" 23 #include "intel_gt_regs.h" 24 #include "intel_reset.h" 25 #include "intel_ring.h" 26 #include "shmem_utils.h" 27 #include "intel_engine_heartbeat.h" 28 #include "intel_engine_pm.h" 29 #include "intel_gt_print.h" 30 31 /* Rough estimate of the typical request size, performing a flush, 32 * set-context and then emitting the batch. 33 */ 34 #define LEGACY_REQUEST_SIZE 200 35 36 static void set_hwstam(struct intel_engine_cs *engine, u32 mask) 37 { 38 /* 39 * Keep the render interrupt unmasked as this papers over 40 * lost interrupts following a reset. 41 */ 42 if (engine->class == RENDER_CLASS) { 43 if (GRAPHICS_VER(engine->i915) >= 6) 44 mask &= ~BIT(0); 45 else 46 mask &= ~I915_USER_INTERRUPT; 47 } 48 49 intel_engine_set_hwsp_writemask(engine, mask); 50 } 51 52 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) 53 { 54 u32 addr; 55 56 addr = lower_32_bits(phys); 57 if (GRAPHICS_VER(engine->i915) >= 4) 58 addr |= (phys >> 28) & 0xf0; 59 60 intel_uncore_write(engine->uncore, HWS_PGA, addr); 61 } 62 63 static struct page *status_page(struct intel_engine_cs *engine) 64 { 65 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; 66 67 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 68 return sg_page(obj->mm.pages->sgl); 69 } 70 71 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 72 { 73 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); 74 set_hwstam(engine, ~0u); 75 } 76 77 static void set_hwsp(struct intel_engine_cs *engine, u32 offset) 78 { 79 i915_reg_t hwsp; 80 81 /* 82 * The ring status page addresses are no longer next to the rest of 83 * the ring registers as of gen7. 84 */ 85 if (GRAPHICS_VER(engine->i915) == 7) { 86 switch (engine->id) { 87 /* 88 * No more rings exist on Gen7. Default case is only to shut up 89 * gcc switch check warning. 90 */ 91 default: 92 GEM_BUG_ON(engine->id); 93 fallthrough; 94 case RCS0: 95 hwsp = RENDER_HWS_PGA_GEN7; 96 break; 97 case BCS0: 98 hwsp = BLT_HWS_PGA_GEN7; 99 break; 100 case VCS0: 101 hwsp = BSD_HWS_PGA_GEN7; 102 break; 103 case VECS0: 104 hwsp = VEBOX_HWS_PGA_GEN7; 105 break; 106 } 107 } else if (GRAPHICS_VER(engine->i915) == 6) { 108 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); 109 } else { 110 hwsp = RING_HWS_PGA(engine->mmio_base); 111 } 112 113 intel_uncore_write_fw(engine->uncore, hwsp, offset); 114 intel_uncore_posting_read_fw(engine->uncore, hwsp); 115 } 116 117 static void flush_cs_tlb(struct intel_engine_cs *engine) 118 { 119 if (!IS_GRAPHICS_VER(engine->i915, 6, 7)) 120 return; 121 122 /* ring should be idle before issuing a sync flush*/ 123 if ((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0) 124 drm_warn(&engine->i915->drm, "%s not idle before sync flush!\n", 125 engine->name); 126 127 ENGINE_WRITE_FW(engine, RING_INSTPM, 128 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 129 INSTPM_SYNC_FLUSH)); 130 if (__intel_wait_for_register_fw(engine->uncore, 131 RING_INSTPM(engine->mmio_base), 132 INSTPM_SYNC_FLUSH, 0, 133 2000, 0, NULL)) 134 ENGINE_TRACE(engine, 135 "wait for SyncFlush to complete for TLB invalidation timed out\n"); 136 } 137 138 static void ring_setup_status_page(struct intel_engine_cs *engine) 139 { 140 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); 141 set_hwstam(engine, ~0u); 142 143 flush_cs_tlb(engine); 144 } 145 146 static struct i915_address_space *vm_alias(struct i915_address_space *vm) 147 { 148 if (i915_is_ggtt(vm)) 149 vm = &i915_vm_to_ggtt(vm)->alias->vm; 150 151 return vm; 152 } 153 154 static u32 pp_dir(struct i915_address_space *vm) 155 { 156 return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir; 157 } 158 159 static void set_pp_dir(struct intel_engine_cs *engine) 160 { 161 struct i915_address_space *vm = vm_alias(engine->gt->vm); 162 163 if (!vm) 164 return; 165 166 ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); 167 ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm)); 168 169 if (GRAPHICS_VER(engine->i915) >= 7) { 170 ENGINE_WRITE_FW(engine, 171 RING_MODE_GEN7, 172 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 173 } 174 } 175 176 static bool stop_ring(struct intel_engine_cs *engine) 177 { 178 /* Empty the ring by skipping to the end */ 179 ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL)); 180 ENGINE_POSTING_READ(engine, RING_HEAD); 181 182 /* The ring must be empty before it is disabled */ 183 ENGINE_WRITE_FW(engine, RING_CTL, 0); 184 ENGINE_POSTING_READ(engine, RING_CTL); 185 186 /* Then reset the disabled ring */ 187 ENGINE_WRITE_FW(engine, RING_HEAD, 0); 188 ENGINE_WRITE_FW(engine, RING_TAIL, 0); 189 190 return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0; 191 } 192 193 static int xcs_resume(struct intel_engine_cs *engine) 194 { 195 struct intel_ring *ring = engine->legacy.ring; 196 ktime_t kt; 197 198 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", 199 ring->head, ring->tail); 200 201 /* 202 * Double check the ring is empty & disabled before we resume. Called 203 * from atomic context during PCI probe, so _hardirq(). 204 */ 205 intel_synchronize_hardirq(engine->i915); 206 if (!stop_ring(engine)) 207 goto err; 208 209 if (HWS_NEEDS_PHYSICAL(engine->i915)) 210 ring_setup_phys_status_page(engine); 211 else 212 ring_setup_status_page(engine); 213 214 intel_breadcrumbs_reset(engine->breadcrumbs); 215 216 /* Enforce ordering by reading HEAD register back */ 217 ENGINE_POSTING_READ(engine, RING_HEAD); 218 219 /* 220 * Initialize the ring. This must happen _after_ we've cleared the ring 221 * registers with the above sequence (the readback of the HEAD registers 222 * also enforces ordering), otherwise the hw might lose the new ring 223 * register values. 224 */ 225 ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma)); 226 227 /* Check that the ring offsets point within the ring! */ 228 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); 229 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 230 intel_ring_update_space(ring); 231 232 set_pp_dir(engine); 233 234 /* 235 * First wake the ring up to an empty/idle ring. 236 * Use 50ms of delay to let the engine write successfully 237 * for all platforms. Experimented with different values and 238 * determined that 50ms works best based on testing. 239 */ 240 for ((kt) = ktime_get() + (50 * NSEC_PER_MSEC); 241 ktime_before(ktime_get(), (kt)); cpu_relax()) { 242 /* 243 * In case of resets fails because engine resumes from 244 * incorrect RING_HEAD and then GPU may be then fed 245 * to invalid instructions, which may lead to unrecoverable 246 * hang. So at first write doesn't succeed then try again. 247 */ 248 ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); 249 if (ENGINE_READ_FW(engine, RING_HEAD) == ring->head) 250 break; 251 } 252 253 ENGINE_WRITE_FW(engine, RING_TAIL, ring->head); 254 if (ENGINE_READ_FW(engine, RING_HEAD) != ENGINE_READ_FW(engine, RING_TAIL)) { 255 ENGINE_TRACE(engine, "failed to reset empty ring: [%x, %x]: %x\n", 256 ENGINE_READ_FW(engine, RING_HEAD), 257 ENGINE_READ_FW(engine, RING_TAIL), 258 ring->head); 259 goto err; 260 } 261 262 ENGINE_WRITE_FW(engine, RING_CTL, 263 RING_CTL_SIZE(ring->size) | RING_VALID); 264 265 /* If the head is still not zero, the ring is dead */ 266 if (__intel_wait_for_register_fw(engine->uncore, 267 RING_CTL(engine->mmio_base), 268 RING_VALID, RING_VALID, 269 5000, 0, NULL)) { 270 ENGINE_TRACE(engine, "failed to restart\n"); 271 goto err; 272 } 273 274 if (GRAPHICS_VER(engine->i915) > 2) { 275 ENGINE_WRITE_FW(engine, 276 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 277 ENGINE_POSTING_READ(engine, RING_MI_MODE); 278 } 279 280 /* Now awake, let it get started */ 281 if (ring->tail != ring->head) { 282 ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail); 283 ENGINE_POSTING_READ(engine, RING_TAIL); 284 } 285 286 /* Papering over lost _interrupts_ immediately following the restart */ 287 intel_engine_signal_breadcrumbs(engine); 288 return 0; 289 290 err: 291 gt_err(engine->gt, "%s initialization failed\n", engine->name); 292 ENGINE_TRACE(engine, 293 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 294 ENGINE_READ(engine, RING_CTL), 295 ENGINE_READ(engine, RING_CTL) & RING_VALID, 296 ENGINE_READ(engine, RING_HEAD), ring->head, 297 ENGINE_READ(engine, RING_TAIL), ring->tail, 298 ENGINE_READ(engine, RING_START), 299 i915_ggtt_offset(ring->vma)); 300 GEM_TRACE_DUMP(); 301 return -EIO; 302 } 303 304 static void sanitize_hwsp(struct intel_engine_cs *engine) 305 { 306 struct intel_timeline *tl; 307 308 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) 309 intel_timeline_reset_seqno(tl); 310 } 311 312 static void xcs_sanitize(struct intel_engine_cs *engine) 313 { 314 /* 315 * Poison residual state on resume, in case the suspend didn't! 316 * 317 * We have to assume that across suspend/resume (or other loss 318 * of control) that the contents of our pinned buffers has been 319 * lost, replaced by garbage. Since this doesn't always happen, 320 * let's poison such state so that we more quickly spot when 321 * we falsely assume it has been preserved. 322 */ 323 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 324 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); 325 326 /* 327 * The kernel_context HWSP is stored in the status_page. As above, 328 * that may be lost on resume/initialisation, and so we need to 329 * reset the value in the HWSP. 330 */ 331 sanitize_hwsp(engine); 332 333 /* And scrub the dirty cachelines for the HWSP */ 334 drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE); 335 336 intel_engine_reset_pinned_contexts(engine); 337 } 338 339 static void reset_prepare(struct intel_engine_cs *engine) 340 { 341 /* 342 * We stop engines, otherwise we might get failed reset and a 343 * dead gpu (on elk). Also as modern gpu as kbl can suffer 344 * from system hang if batchbuffer is progressing when 345 * the reset is issued, regardless of READY_TO_RESET ack. 346 * Thus assume it is best to stop engines on all gens 347 * where we have a gpu reset. 348 * 349 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) 350 * 351 * WaMediaResetMainRingCleanup:ctg,elk (presumably) 352 * WaClearRingBufHeadRegAtInit:ctg,elk 353 * 354 * FIXME: Wa for more modern gens needs to be validated 355 */ 356 ENGINE_TRACE(engine, "\n"); 357 intel_engine_stop_cs(engine); 358 359 if (!stop_ring(engine)) { 360 /* G45 ring initialization often fails to reset head to zero */ 361 ENGINE_TRACE(engine, 362 "HEAD not reset to zero, " 363 "{ CTL:%08x, HEAD:%08x, TAIL:%08x, START:%08x }\n", 364 ENGINE_READ_FW(engine, RING_CTL), 365 ENGINE_READ_FW(engine, RING_HEAD), 366 ENGINE_READ_FW(engine, RING_TAIL), 367 ENGINE_READ_FW(engine, RING_START)); 368 /* 369 * Sometimes engine head failed to set to zero even after writing into it. 370 * Use wait_for_atomic() with 20ms delay to let engine resumes from 371 * correct RING_HEAD. Experimented different values and determined 372 * that 20ms works best based on testing. 373 */ 374 if (wait_for_atomic((!stop_ring(engine) == 0), 20)) { 375 drm_err(&engine->i915->drm, 376 "failed to set %s head to zero " 377 "ctl %08x head %08x tail %08x start %08x\n", 378 engine->name, 379 ENGINE_READ_FW(engine, RING_CTL), 380 ENGINE_READ_FW(engine, RING_HEAD), 381 ENGINE_READ_FW(engine, RING_TAIL), 382 ENGINE_READ_FW(engine, RING_START)); 383 } 384 } 385 } 386 387 static void reset_rewind(struct intel_engine_cs *engine, bool stalled) 388 { 389 struct i915_request *pos, *rq; 390 unsigned long flags; 391 u32 head; 392 393 rq = NULL; 394 spin_lock_irqsave(&engine->sched_engine->lock, flags); 395 rcu_read_lock(); 396 list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) { 397 if (!__i915_request_is_complete(pos)) { 398 rq = pos; 399 break; 400 } 401 } 402 rcu_read_unlock(); 403 404 /* 405 * The guilty request will get skipped on a hung engine. 406 * 407 * Users of client default contexts do not rely on logical 408 * state preserved between batches so it is safe to execute 409 * queued requests following the hang. Non default contexts 410 * rely on preserved state, so skipping a batch loses the 411 * evolution of the state and it needs to be considered corrupted. 412 * Executing more queued batches on top of corrupted state is 413 * risky. But we take the risk by trying to advance through 414 * the queued requests in order to make the client behaviour 415 * more predictable around resets, by not throwing away random 416 * amount of batches it has prepared for execution. Sophisticated 417 * clients can use gem_reset_stats_ioctl and dma fence status 418 * (exported via sync_file info ioctl on explicit fences) to observe 419 * when it loses the context state and should rebuild accordingly. 420 * 421 * The context ban, and ultimately the client ban, mechanism are safety 422 * valves if client submission ends up resulting in nothing more than 423 * subsequent hangs. 424 */ 425 426 if (rq) { 427 /* 428 * Try to restore the logical GPU state to match the 429 * continuation of the request queue. If we skip the 430 * context/PD restore, then the next request may try to execute 431 * assuming that its context is valid and loaded on the GPU and 432 * so may try to access invalid memory, prompting repeated GPU 433 * hangs. 434 * 435 * If the request was guilty, we still restore the logical 436 * state in case the next request requires it (e.g. the 437 * aliasing ppgtt), but skip over the hung batch. 438 * 439 * If the request was innocent, we try to replay the request 440 * with the restored context. 441 */ 442 __i915_request_reset(rq, stalled); 443 444 GEM_BUG_ON(rq->ring != engine->legacy.ring); 445 head = rq->head; 446 } else { 447 head = engine->legacy.ring->tail; 448 } 449 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head); 450 451 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 452 } 453 454 static void reset_finish(struct intel_engine_cs *engine) 455 { 456 } 457 458 static void reset_cancel(struct intel_engine_cs *engine) 459 { 460 struct i915_request *request; 461 unsigned long flags; 462 463 spin_lock_irqsave(&engine->sched_engine->lock, flags); 464 465 /* Mark all submitted requests as skipped. */ 466 list_for_each_entry(request, &engine->sched_engine->requests, sched.link) 467 i915_request_put(i915_request_mark_eio(request)); 468 intel_engine_signal_breadcrumbs(engine); 469 470 /* Remaining _unready_ requests will be nop'ed when submitted */ 471 472 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 473 } 474 475 static void i9xx_submit_request(struct i915_request *request) 476 { 477 i915_request_submit(request); 478 wmb(); /* paranoid flush writes out of the WCB before mmio */ 479 480 ENGINE_WRITE(request->engine, RING_TAIL, 481 intel_ring_set_tail(request->ring, request->tail)); 482 } 483 484 static void __ring_context_fini(struct intel_context *ce) 485 { 486 i915_vma_put(ce->state); 487 } 488 489 static void ring_context_destroy(struct kref *ref) 490 { 491 struct intel_context *ce = container_of(ref, typeof(*ce), ref); 492 493 GEM_BUG_ON(intel_context_is_pinned(ce)); 494 495 if (ce->state) 496 __ring_context_fini(ce); 497 498 intel_context_fini(ce); 499 intel_context_free(ce); 500 } 501 502 static int ring_context_init_default_state(struct intel_context *ce, 503 struct i915_gem_ww_ctx *ww) 504 { 505 struct drm_i915_gem_object *obj = ce->state->obj; 506 void *vaddr; 507 508 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 509 if (IS_ERR(vaddr)) 510 return PTR_ERR(vaddr); 511 512 shmem_read(ce->default_state, 0, vaddr, ce->engine->context_size); 513 514 i915_gem_object_flush_map(obj); 515 __i915_gem_object_release_map(obj); 516 517 __set_bit(CONTEXT_VALID_BIT, &ce->flags); 518 return 0; 519 } 520 521 static int ring_context_pre_pin(struct intel_context *ce, 522 struct i915_gem_ww_ctx *ww, 523 void **unused) 524 { 525 struct i915_address_space *vm; 526 int err = 0; 527 528 if (ce->default_state && 529 !test_bit(CONTEXT_VALID_BIT, &ce->flags)) { 530 err = ring_context_init_default_state(ce, ww); 531 if (err) 532 return err; 533 } 534 535 vm = vm_alias(ce->vm); 536 if (vm) 537 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww); 538 539 return err; 540 } 541 542 static void __context_unpin_ppgtt(struct intel_context *ce) 543 { 544 struct i915_address_space *vm; 545 546 vm = vm_alias(ce->vm); 547 if (vm) 548 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); 549 } 550 551 static void ring_context_unpin(struct intel_context *ce) 552 { 553 } 554 555 static void ring_context_post_unpin(struct intel_context *ce) 556 { 557 __context_unpin_ppgtt(ce); 558 } 559 560 static struct i915_vma * 561 alloc_context_vma(struct intel_engine_cs *engine) 562 { 563 struct drm_i915_private *i915 = engine->i915; 564 struct drm_i915_gem_object *obj; 565 struct i915_vma *vma; 566 int err; 567 568 obj = i915_gem_object_create_shmem(i915, engine->context_size); 569 if (IS_ERR(obj)) 570 return ERR_CAST(obj); 571 572 /* 573 * Try to make the context utilize L3 as well as LLC. 574 * 575 * On VLV we don't have L3 controls in the PTEs so we 576 * shouldn't touch the cache level, especially as that 577 * would make the object snooped which might have a 578 * negative performance impact. 579 * 580 * Snooping is required on non-llc platforms in execlist 581 * mode, but since all GGTT accesses use PAT entry 0 we 582 * get snooping anyway regardless of cache_level. 583 * 584 * This is only applicable for Ivy Bridge devices since 585 * later platforms don't have L3 control bits in the PTE. 586 */ 587 if (IS_IVYBRIDGE(i915)) 588 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); 589 590 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 591 if (IS_ERR(vma)) { 592 err = PTR_ERR(vma); 593 goto err_obj; 594 } 595 596 return vma; 597 598 err_obj: 599 i915_gem_object_put(obj); 600 return ERR_PTR(err); 601 } 602 603 static int ring_context_alloc(struct intel_context *ce) 604 { 605 struct intel_engine_cs *engine = ce->engine; 606 607 if (!intel_context_has_own_state(ce)) 608 ce->default_state = engine->default_state; 609 610 /* One ringbuffer to rule them all */ 611 GEM_BUG_ON(!engine->legacy.ring); 612 ce->ring = engine->legacy.ring; 613 614 GEM_BUG_ON(ce->state); 615 if (engine->context_size) { 616 struct i915_vma *vma; 617 618 vma = alloc_context_vma(engine); 619 if (IS_ERR(vma)) 620 return PTR_ERR(vma); 621 622 ce->state = vma; 623 } 624 625 ce->timeline = intel_timeline_get(engine->legacy.timeline); 626 627 return 0; 628 } 629 630 static int ring_context_pin(struct intel_context *ce, void *unused) 631 { 632 return 0; 633 } 634 635 static void ring_context_reset(struct intel_context *ce) 636 { 637 intel_ring_reset(ce->ring, ce->ring->emit); 638 clear_bit(CONTEXT_VALID_BIT, &ce->flags); 639 } 640 641 static void ring_context_revoke(struct intel_context *ce, 642 struct i915_request *rq, 643 unsigned int preempt_timeout_ms) 644 { 645 struct intel_engine_cs *engine; 646 647 if (!rq || !i915_request_is_active(rq)) 648 return; 649 650 engine = rq->engine; 651 lockdep_assert_held(&engine->sched_engine->lock); 652 list_for_each_entry_continue(rq, &engine->sched_engine->requests, 653 sched.link) 654 if (rq->context == ce) { 655 i915_request_set_error_once(rq, -EIO); 656 __i915_request_skip(rq); 657 } 658 } 659 660 static void ring_context_cancel_request(struct intel_context *ce, 661 struct i915_request *rq) 662 { 663 struct intel_engine_cs *engine = NULL; 664 665 i915_request_active_engine(rq, &engine); 666 667 if (engine && intel_engine_pulse(engine)) 668 intel_gt_handle_error(engine->gt, engine->mask, 0, 669 "request cancellation by %s", 670 current->comm); 671 } 672 673 static const struct intel_context_ops ring_context_ops = { 674 .alloc = ring_context_alloc, 675 676 .cancel_request = ring_context_cancel_request, 677 678 .revoke = ring_context_revoke, 679 680 .pre_pin = ring_context_pre_pin, 681 .pin = ring_context_pin, 682 .unpin = ring_context_unpin, 683 .post_unpin = ring_context_post_unpin, 684 685 .enter = intel_context_enter_engine, 686 .exit = intel_context_exit_engine, 687 688 .reset = ring_context_reset, 689 .destroy = ring_context_destroy, 690 }; 691 692 static int load_pd_dir(struct i915_request *rq, 693 struct i915_address_space *vm, 694 u32 valid) 695 { 696 const struct intel_engine_cs * const engine = rq->engine; 697 u32 *cs; 698 699 cs = intel_ring_begin(rq, 12); 700 if (IS_ERR(cs)) 701 return PTR_ERR(cs); 702 703 *cs++ = MI_LOAD_REGISTER_IMM(1); 704 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); 705 *cs++ = valid; 706 707 *cs++ = MI_LOAD_REGISTER_IMM(1); 708 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 709 *cs++ = pp_dir(vm); 710 711 /* Stall until the page table load is complete? */ 712 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 713 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 714 *cs++ = intel_gt_scratch_offset(engine->gt, 715 INTEL_GT_SCRATCH_FIELD_DEFAULT); 716 717 *cs++ = MI_LOAD_REGISTER_IMM(1); 718 *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base)); 719 *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE); 720 721 intel_ring_advance(rq, cs); 722 723 return rq->engine->emit_flush(rq, EMIT_FLUSH); 724 } 725 726 static int mi_set_context(struct i915_request *rq, 727 struct intel_context *ce, 728 u32 flags) 729 { 730 struct intel_engine_cs *engine = rq->engine; 731 struct drm_i915_private *i915 = engine->i915; 732 enum intel_engine_id id; 733 const int num_engines = 734 IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0; 735 bool force_restore = false; 736 int len; 737 u32 *cs; 738 739 len = 4; 740 if (GRAPHICS_VER(i915) == 7) 741 len += 2 + (num_engines ? 4 * num_engines + 6 : 0); 742 else if (GRAPHICS_VER(i915) == 5) 743 len += 2; 744 if (flags & MI_FORCE_RESTORE) { 745 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); 746 flags &= ~MI_FORCE_RESTORE; 747 force_restore = true; 748 len += 2; 749 } 750 751 cs = intel_ring_begin(rq, len); 752 if (IS_ERR(cs)) 753 return PTR_ERR(cs); 754 755 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 756 if (GRAPHICS_VER(i915) == 7) { 757 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 758 if (num_engines) { 759 struct intel_engine_cs *signaller; 760 761 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 762 for_each_engine(signaller, engine->gt, id) { 763 if (signaller == engine) 764 continue; 765 766 *cs++ = i915_mmio_reg_offset( 767 RING_PSMI_CTL(signaller->mmio_base)); 768 *cs++ = _MASKED_BIT_ENABLE( 769 GEN6_PSMI_SLEEP_MSG_DISABLE); 770 } 771 } 772 } else if (GRAPHICS_VER(i915) == 5) { 773 /* 774 * This w/a is only listed for pre-production ilk a/b steppings, 775 * but is also mentioned for programming the powerctx. To be 776 * safe, just apply the workaround; we do not use SyncFlush so 777 * this should never take effect and so be a no-op! 778 */ 779 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN; 780 } 781 782 if (force_restore) { 783 /* 784 * The HW doesn't handle being told to restore the current 785 * context very well. Quite often it likes goes to go off and 786 * sulk, especially when it is meant to be reloading PP_DIR. 787 * A very simple fix to force the reload is to simply switch 788 * away from the current context and back again. 789 * 790 * Note that the kernel_context will contain random state 791 * following the INHIBIT_RESTORE. We accept this since we 792 * never use the kernel_context state; it is merely a 793 * placeholder we use to flush other contexts. 794 */ 795 *cs++ = MI_SET_CONTEXT; 796 *cs++ = i915_ggtt_offset(engine->kernel_context->state) | 797 MI_MM_SPACE_GTT | 798 MI_RESTORE_INHIBIT; 799 } 800 801 *cs++ = MI_NOOP; 802 *cs++ = MI_SET_CONTEXT; 803 *cs++ = i915_ggtt_offset(ce->state) | flags; 804 /* 805 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 806 * WaMiSetContext_Hang:snb,ivb,vlv 807 */ 808 *cs++ = MI_NOOP; 809 810 if (GRAPHICS_VER(i915) == 7) { 811 if (num_engines) { 812 struct intel_engine_cs *signaller; 813 i915_reg_t last_reg = INVALID_MMIO_REG; /* keep gcc quiet */ 814 815 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 816 for_each_engine(signaller, engine->gt, id) { 817 if (signaller == engine) 818 continue; 819 820 last_reg = RING_PSMI_CTL(signaller->mmio_base); 821 *cs++ = i915_mmio_reg_offset(last_reg); 822 *cs++ = _MASKED_BIT_DISABLE( 823 GEN6_PSMI_SLEEP_MSG_DISABLE); 824 } 825 826 /* Insert a delay before the next switch! */ 827 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 828 *cs++ = i915_mmio_reg_offset(last_reg); 829 *cs++ = intel_gt_scratch_offset(engine->gt, 830 INTEL_GT_SCRATCH_FIELD_DEFAULT); 831 *cs++ = MI_NOOP; 832 } 833 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 834 } else if (GRAPHICS_VER(i915) == 5) { 835 *cs++ = MI_SUSPEND_FLUSH; 836 } 837 838 intel_ring_advance(rq, cs); 839 840 return 0; 841 } 842 843 static int remap_l3_slice(struct i915_request *rq, int slice) 844 { 845 #define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32)) 846 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; 847 int i; 848 849 if (!remap_info) 850 return 0; 851 852 cs = intel_ring_begin(rq, L3LOG_DW * 2 + 2); 853 if (IS_ERR(cs)) 854 return PTR_ERR(cs); 855 856 /* 857 * Note: We do not worry about the concurrent register cacheline hang 858 * here because no other code should access these registers other than 859 * at initialization time. 860 */ 861 *cs++ = MI_LOAD_REGISTER_IMM(L3LOG_DW); 862 for (i = 0; i < L3LOG_DW; i++) { 863 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)); 864 *cs++ = remap_info[i]; 865 } 866 *cs++ = MI_NOOP; 867 intel_ring_advance(rq, cs); 868 869 return 0; 870 #undef L3LOG_DW 871 } 872 873 static int remap_l3(struct i915_request *rq) 874 { 875 struct i915_gem_context *ctx = i915_request_gem_context(rq); 876 int i, err; 877 878 if (!ctx || !ctx->remap_slice) 879 return 0; 880 881 for (i = 0; i < MAX_L3_SLICES; i++) { 882 if (!(ctx->remap_slice & BIT(i))) 883 continue; 884 885 err = remap_l3_slice(rq, i); 886 if (err) 887 return err; 888 } 889 890 ctx->remap_slice = 0; 891 return 0; 892 } 893 894 static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) 895 { 896 int ret; 897 898 if (!vm) 899 return 0; 900 901 ret = rq->engine->emit_flush(rq, EMIT_FLUSH); 902 if (ret) 903 return ret; 904 905 /* 906 * Not only do we need a full barrier (post-sync write) after 907 * invalidating the TLBs, but we need to wait a little bit 908 * longer. Whether this is merely delaying us, or the 909 * subsequent flush is a key part of serialising with the 910 * post-sync op, this extra pass appears vital before a 911 * mm switch! 912 */ 913 ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G); 914 if (ret) 915 return ret; 916 917 return rq->engine->emit_flush(rq, EMIT_INVALIDATE); 918 } 919 920 static int clear_residuals(struct i915_request *rq) 921 { 922 struct intel_engine_cs *engine = rq->engine; 923 int ret; 924 925 ret = switch_mm(rq, vm_alias(engine->kernel_context->vm)); 926 if (ret) 927 return ret; 928 929 if (engine->kernel_context->state) { 930 ret = mi_set_context(rq, 931 engine->kernel_context, 932 MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT); 933 if (ret) 934 return ret; 935 } 936 937 ret = engine->emit_bb_start(rq, 938 i915_vma_offset(engine->wa_ctx.vma), 0, 939 0); 940 if (ret) 941 return ret; 942 943 ret = engine->emit_flush(rq, EMIT_FLUSH); 944 if (ret) 945 return ret; 946 947 /* Always invalidate before the next switch_mm() */ 948 return engine->emit_flush(rq, EMIT_INVALIDATE); 949 } 950 951 static int switch_context(struct i915_request *rq) 952 { 953 struct intel_engine_cs *engine = rq->engine; 954 struct intel_context *ce = rq->context; 955 void **residuals = NULL; 956 int ret; 957 958 GEM_BUG_ON(HAS_EXECLISTS(engine->i915)); 959 960 if (engine->wa_ctx.vma && ce != engine->kernel_context) { 961 if (engine->wa_ctx.vma->private != ce && 962 i915_mitigate_clear_residuals()) { 963 ret = clear_residuals(rq); 964 if (ret) 965 return ret; 966 967 residuals = &engine->wa_ctx.vma->private; 968 } 969 } 970 971 ret = switch_mm(rq, vm_alias(ce->vm)); 972 if (ret) 973 return ret; 974 975 if (ce->state) { 976 u32 flags; 977 978 GEM_BUG_ON(engine->id != RCS0); 979 980 /* For resource streamer on HSW+ and power context elsewhere */ 981 BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN); 982 BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN); 983 984 flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT; 985 if (test_bit(CONTEXT_VALID_BIT, &ce->flags)) 986 flags |= MI_RESTORE_EXT_STATE_EN; 987 else 988 flags |= MI_RESTORE_INHIBIT; 989 990 ret = mi_set_context(rq, ce, flags); 991 if (ret) 992 return ret; 993 } 994 995 ret = remap_l3(rq); 996 if (ret) 997 return ret; 998 999 /* 1000 * Now past the point of no return, this request _will_ be emitted. 1001 * 1002 * Or at least this preamble will be emitted, the request may be 1003 * interrupted prior to submitting the user payload. If so, we 1004 * still submit the "empty" request in order to preserve global 1005 * state tracking such as this, our tracking of the current 1006 * dirty context. 1007 */ 1008 if (residuals) { 1009 intel_context_put(*residuals); 1010 *residuals = intel_context_get(ce); 1011 } 1012 1013 return 0; 1014 } 1015 1016 static int ring_request_alloc(struct i915_request *request) 1017 { 1018 int ret; 1019 1020 GEM_BUG_ON(!intel_context_is_pinned(request->context)); 1021 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); 1022 1023 /* 1024 * Flush enough space to reduce the likelihood of waiting after 1025 * we start building the request - in which case we will just 1026 * have to repeat work. 1027 */ 1028 request->reserved_space += LEGACY_REQUEST_SIZE; 1029 1030 /* Unconditionally invalidate GPU caches and TLBs. */ 1031 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 1032 if (ret) 1033 return ret; 1034 1035 ret = switch_context(request); 1036 if (ret) 1037 return ret; 1038 1039 request->reserved_space -= LEGACY_REQUEST_SIZE; 1040 return 0; 1041 } 1042 1043 static void gen6_bsd_submit_request(struct i915_request *request) 1044 { 1045 struct intel_uncore *uncore = request->engine->uncore; 1046 1047 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1048 1049 /* Every tail move must follow the sequence below */ 1050 1051 /* Disable notification that the ring is IDLE. The GT 1052 * will then assume that it is busy and bring it out of rc6. 1053 */ 1054 intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE), 1055 _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 1056 1057 /* Clear the context id. Here be magic! */ 1058 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); 1059 1060 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1061 if (__intel_wait_for_register_fw(uncore, 1062 RING_PSMI_CTL(GEN6_BSD_RING_BASE), 1063 GEN6_BSD_SLEEP_INDICATOR, 1064 0, 1065 1000, 0, NULL)) 1066 drm_err(&uncore->i915->drm, 1067 "timed out waiting for the BSD ring to wake up\n"); 1068 1069 /* Now that the ring is fully powered up, update the tail */ 1070 i9xx_submit_request(request); 1071 1072 /* Let the ring send IDLE messages to the GT again, 1073 * and so let it sleep to conserve power when idle. 1074 */ 1075 intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE), 1076 _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 1077 1078 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1079 } 1080 1081 static void i9xx_set_default_submission(struct intel_engine_cs *engine) 1082 { 1083 engine->submit_request = i9xx_submit_request; 1084 } 1085 1086 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) 1087 { 1088 engine->submit_request = gen6_bsd_submit_request; 1089 } 1090 1091 static void ring_release(struct intel_engine_cs *engine) 1092 { 1093 struct drm_i915_private *i915 = engine->i915; 1094 1095 drm_WARN_ON(&i915->drm, GRAPHICS_VER(i915) > 2 && 1096 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 1097 1098 intel_engine_cleanup_common(engine); 1099 1100 if (engine->wa_ctx.vma) { 1101 intel_context_put(engine->wa_ctx.vma->private); 1102 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); 1103 } 1104 1105 intel_ring_unpin(engine->legacy.ring); 1106 intel_ring_put(engine->legacy.ring); 1107 1108 intel_timeline_unpin(engine->legacy.timeline); 1109 intel_timeline_put(engine->legacy.timeline); 1110 } 1111 1112 static void irq_handler(struct intel_engine_cs *engine, u16 iir) 1113 { 1114 intel_engine_signal_breadcrumbs(engine); 1115 } 1116 1117 static void setup_irq(struct intel_engine_cs *engine) 1118 { 1119 struct drm_i915_private *i915 = engine->i915; 1120 1121 intel_engine_set_irq_handler(engine, irq_handler); 1122 1123 if (GRAPHICS_VER(i915) >= 6) { 1124 engine->irq_enable = gen6_irq_enable; 1125 engine->irq_disable = gen6_irq_disable; 1126 } else if (GRAPHICS_VER(i915) >= 5) { 1127 engine->irq_enable = gen5_irq_enable; 1128 engine->irq_disable = gen5_irq_disable; 1129 } else { 1130 engine->irq_enable = gen2_irq_enable; 1131 engine->irq_disable = gen2_irq_disable; 1132 } 1133 } 1134 1135 static void add_to_engine(struct i915_request *rq) 1136 { 1137 lockdep_assert_held(&rq->engine->sched_engine->lock); 1138 list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests); 1139 } 1140 1141 static void remove_from_engine(struct i915_request *rq) 1142 { 1143 spin_lock_irq(&rq->engine->sched_engine->lock); 1144 list_del_init(&rq->sched.link); 1145 1146 /* Prevent further __await_execution() registering a cb, then flush */ 1147 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); 1148 1149 spin_unlock_irq(&rq->engine->sched_engine->lock); 1150 1151 i915_request_notify_execute_cb_imm(rq); 1152 } 1153 1154 static void setup_common(struct intel_engine_cs *engine) 1155 { 1156 struct drm_i915_private *i915 = engine->i915; 1157 1158 /* gen8+ are only supported with execlists */ 1159 GEM_BUG_ON(GRAPHICS_VER(i915) >= 8); 1160 1161 setup_irq(engine); 1162 1163 engine->resume = xcs_resume; 1164 engine->sanitize = xcs_sanitize; 1165 1166 engine->reset.prepare = reset_prepare; 1167 engine->reset.rewind = reset_rewind; 1168 engine->reset.cancel = reset_cancel; 1169 engine->reset.finish = reset_finish; 1170 1171 engine->add_active_request = add_to_engine; 1172 engine->remove_active_request = remove_from_engine; 1173 1174 engine->cops = &ring_context_ops; 1175 engine->request_alloc = ring_request_alloc; 1176 1177 /* 1178 * Using a global execution timeline; the previous final breadcrumb is 1179 * equivalent to our next initial bread so we can elide 1180 * engine->emit_init_breadcrumb(). 1181 */ 1182 engine->emit_fini_breadcrumb = gen2_emit_breadcrumb; 1183 if (GRAPHICS_VER(i915) == 5) 1184 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; 1185 1186 engine->set_default_submission = i9xx_set_default_submission; 1187 1188 if (GRAPHICS_VER(i915) >= 6) 1189 engine->emit_bb_start = gen6_emit_bb_start; 1190 else if (GRAPHICS_VER(i915) >= 4) 1191 engine->emit_bb_start = gen4_emit_bb_start; 1192 else if (IS_I830(i915) || IS_I845G(i915)) 1193 engine->emit_bb_start = i830_emit_bb_start; 1194 else 1195 engine->emit_bb_start = gen2_emit_bb_start; 1196 } 1197 1198 static void setup_rcs(struct intel_engine_cs *engine) 1199 { 1200 struct drm_i915_private *i915 = engine->i915; 1201 1202 if (HAS_L3_DPF(i915)) 1203 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1204 1205 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1206 1207 if (GRAPHICS_VER(i915) >= 7) { 1208 engine->emit_flush = gen7_emit_flush_rcs; 1209 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs; 1210 } else if (GRAPHICS_VER(i915) == 6) { 1211 engine->emit_flush = gen6_emit_flush_rcs; 1212 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs; 1213 } else if (GRAPHICS_VER(i915) == 5) { 1214 engine->emit_flush = gen4_emit_flush_rcs; 1215 } else { 1216 if (GRAPHICS_VER(i915) < 4) 1217 engine->emit_flush = gen2_emit_flush; 1218 else 1219 engine->emit_flush = gen4_emit_flush_rcs; 1220 engine->irq_enable_mask = I915_USER_INTERRUPT; 1221 } 1222 1223 if (IS_HASWELL(i915)) 1224 engine->emit_bb_start = hsw_emit_bb_start; 1225 } 1226 1227 static void setup_vcs(struct intel_engine_cs *engine) 1228 { 1229 struct drm_i915_private *i915 = engine->i915; 1230 1231 if (GRAPHICS_VER(i915) >= 6) { 1232 /* gen6 bsd needs a special wa for tail updates */ 1233 if (GRAPHICS_VER(i915) == 6) 1234 engine->set_default_submission = gen6_bsd_set_default_submission; 1235 engine->emit_flush = gen6_emit_flush_vcs; 1236 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1237 1238 if (GRAPHICS_VER(i915) == 6) 1239 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; 1240 else 1241 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1242 } else { 1243 engine->emit_flush = gen4_emit_flush_vcs; 1244 if (GRAPHICS_VER(i915) == 5) 1245 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 1246 else 1247 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 1248 } 1249 } 1250 1251 static void setup_bcs(struct intel_engine_cs *engine) 1252 { 1253 struct drm_i915_private *i915 = engine->i915; 1254 1255 engine->emit_flush = gen6_emit_flush_xcs; 1256 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 1257 1258 if (GRAPHICS_VER(i915) == 6) 1259 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; 1260 else 1261 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1262 } 1263 1264 static void setup_vecs(struct intel_engine_cs *engine) 1265 { 1266 struct drm_i915_private *i915 = engine->i915; 1267 1268 GEM_BUG_ON(GRAPHICS_VER(i915) < 7); 1269 1270 engine->emit_flush = gen6_emit_flush_xcs; 1271 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 1272 engine->irq_enable = hsw_irq_enable_vecs; 1273 engine->irq_disable = hsw_irq_disable_vecs; 1274 1275 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1276 } 1277 1278 static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine, 1279 struct i915_vma * const vma) 1280 { 1281 return gen7_setup_clear_gpr_bb(engine, vma); 1282 } 1283 1284 static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine, 1285 struct i915_gem_ww_ctx *ww, 1286 struct i915_vma *vma) 1287 { 1288 int err; 1289 1290 err = i915_vma_pin_ww(vma, ww, 0, 0, PIN_USER | PIN_HIGH); 1291 if (err) 1292 return err; 1293 1294 err = i915_vma_sync(vma); 1295 if (err) 1296 goto err_unpin; 1297 1298 err = gen7_ctx_switch_bb_setup(engine, vma); 1299 if (err) 1300 goto err_unpin; 1301 1302 engine->wa_ctx.vma = vma; 1303 return 0; 1304 1305 err_unpin: 1306 i915_vma_unpin(vma); 1307 return err; 1308 } 1309 1310 static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine) 1311 { 1312 struct drm_i915_gem_object *obj; 1313 struct i915_vma *vma; 1314 int size, err; 1315 1316 if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS) 1317 return NULL; 1318 1319 err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */); 1320 if (err < 0) 1321 return ERR_PTR(err); 1322 if (!err) 1323 return NULL; 1324 1325 size = ALIGN(err, PAGE_SIZE); 1326 1327 obj = i915_gem_object_create_internal(engine->i915, size); 1328 if (IS_ERR(obj)) 1329 return ERR_CAST(obj); 1330 1331 vma = i915_vma_instance(obj, engine->gt->vm, NULL); 1332 if (IS_ERR(vma)) { 1333 i915_gem_object_put(obj); 1334 return ERR_CAST(vma); 1335 } 1336 1337 vma->private = intel_context_create(engine); /* dummy residuals */ 1338 if (IS_ERR(vma->private)) { 1339 err = PTR_ERR(vma->private); 1340 vma->private = NULL; 1341 i915_gem_object_put(obj); 1342 return ERR_PTR(err); 1343 } 1344 1345 return vma; 1346 } 1347 1348 int intel_ring_submission_setup(struct intel_engine_cs *engine) 1349 { 1350 struct i915_gem_ww_ctx ww; 1351 struct intel_timeline *timeline; 1352 struct intel_ring *ring; 1353 struct i915_vma *gen7_wa_vma; 1354 int err; 1355 1356 setup_common(engine); 1357 1358 switch (engine->class) { 1359 case RENDER_CLASS: 1360 setup_rcs(engine); 1361 break; 1362 case VIDEO_DECODE_CLASS: 1363 setup_vcs(engine); 1364 break; 1365 case COPY_ENGINE_CLASS: 1366 setup_bcs(engine); 1367 break; 1368 case VIDEO_ENHANCEMENT_CLASS: 1369 setup_vecs(engine); 1370 break; 1371 default: 1372 MISSING_CASE(engine->class); 1373 return -ENODEV; 1374 } 1375 1376 timeline = intel_timeline_create_from_engine(engine, 1377 I915_GEM_HWS_SEQNO_ADDR); 1378 if (IS_ERR(timeline)) { 1379 err = PTR_ERR(timeline); 1380 goto err; 1381 } 1382 GEM_BUG_ON(timeline->has_initial_breadcrumb); 1383 1384 ring = intel_engine_create_ring(engine, SZ_16K); 1385 if (IS_ERR(ring)) { 1386 err = PTR_ERR(ring); 1387 goto err_timeline; 1388 } 1389 1390 GEM_BUG_ON(engine->legacy.ring); 1391 engine->legacy.ring = ring; 1392 engine->legacy.timeline = timeline; 1393 1394 gen7_wa_vma = gen7_ctx_vma(engine); 1395 if (IS_ERR(gen7_wa_vma)) { 1396 err = PTR_ERR(gen7_wa_vma); 1397 goto err_ring; 1398 } 1399 1400 i915_gem_ww_ctx_init(&ww, false); 1401 1402 retry: 1403 err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww); 1404 if (!err && gen7_wa_vma) 1405 err = i915_gem_object_lock(gen7_wa_vma->obj, &ww); 1406 if (!err) 1407 err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww); 1408 if (!err) 1409 err = intel_timeline_pin(timeline, &ww); 1410 if (!err) { 1411 err = intel_ring_pin(ring, &ww); 1412 if (err) 1413 intel_timeline_unpin(timeline); 1414 } 1415 if (err) 1416 goto out; 1417 1418 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); 1419 1420 if (gen7_wa_vma) { 1421 err = gen7_ctx_switch_bb_init(engine, &ww, gen7_wa_vma); 1422 if (err) { 1423 intel_ring_unpin(ring); 1424 intel_timeline_unpin(timeline); 1425 } 1426 } 1427 1428 out: 1429 if (err == -EDEADLK) { 1430 err = i915_gem_ww_ctx_backoff(&ww); 1431 if (!err) 1432 goto retry; 1433 } 1434 i915_gem_ww_ctx_fini(&ww); 1435 if (err) 1436 goto err_gen7_put; 1437 1438 /* Finally, take ownership and responsibility for cleanup! */ 1439 engine->release = ring_release; 1440 1441 return 0; 1442 1443 err_gen7_put: 1444 if (gen7_wa_vma) { 1445 intel_context_put(gen7_wa_vma->private); 1446 i915_gem_object_put(gen7_wa_vma->obj); 1447 } 1448 err_ring: 1449 intel_ring_put(ring); 1450 err_timeline: 1451 intel_timeline_put(timeline); 1452 err: 1453 intel_engine_cleanup_common(engine); 1454 return err; 1455 } 1456 1457 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1458 #include "selftest_ring_submission.c" 1459 #endif 1460