1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2018 Intel Corporation 4 */ 5 6 #include <linux/prime_numbers.h> 7 8 #include <drm/drm_print.h> 9 10 #include "gem/i915_gem_internal.h" 11 #include "gem/i915_gem_pm.h" 12 #include "gt/intel_engine_heartbeat.h" 13 #include "gt/intel_reset.h" 14 #include "gt/selftest_engine_heartbeat.h" 15 16 #include "i915_selftest.h" 17 #include "selftests/i915_random.h" 18 #include "selftests/igt_flush_test.h" 19 #include "selftests/igt_live_test.h" 20 #include "selftests/igt_spinner.h" 21 #include "selftests/lib_sw_fence.h" 22 23 #include "gem/selftests/igt_gem_utils.h" 24 #include "gem/selftests/mock_context.h" 25 26 #define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4) 27 #define NUM_GPR 16 28 #define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */ 29 30 static bool is_active(struct i915_request *rq) 31 { 32 if (i915_request_is_active(rq)) 33 return true; 34 35 if (i915_request_on_hold(rq)) 36 return true; 37 38 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) 39 return true; 40 41 return false; 42 } 43 44 static int wait_for_submit(struct intel_engine_cs *engine, 45 struct i915_request *rq, 46 unsigned long timeout) 47 { 48 /* Ignore our own attempts to suppress excess tasklets */ 49 tasklet_hi_schedule(&engine->sched_engine->tasklet); 50 51 timeout += jiffies; 52 do { 53 bool done = time_after(jiffies, timeout); 54 55 if (i915_request_completed(rq)) /* that was quick! */ 56 return 0; 57 58 /* Wait until the HW has acknowledged the submission (or err) */ 59 intel_engine_flush_submission(engine); 60 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) 61 return 0; 62 63 if (done) 64 return -ETIME; 65 66 cond_resched(); 67 } while (1); 68 } 69 70 static int wait_for_reset(struct intel_engine_cs *engine, 71 struct i915_request *rq, 72 unsigned long timeout) 73 { 74 timeout += jiffies; 75 76 do { 77 cond_resched(); 78 intel_engine_flush_submission(engine); 79 80 if (READ_ONCE(engine->execlists.pending[0])) 81 continue; 82 83 if (i915_request_completed(rq)) 84 break; 85 86 if (READ_ONCE(rq->fence.error)) 87 break; 88 } while (time_before(jiffies, timeout)); 89 90 if (rq->fence.error != -EIO) { 91 pr_err("%s: hanging request %llx:%lld not reset\n", 92 engine->name, 93 rq->fence.context, 94 rq->fence.seqno); 95 return -EINVAL; 96 } 97 98 /* Give the request a jiffy to complete after flushing the worker */ 99 if (i915_request_wait(rq, 0, 100 max(0l, (long)(timeout - jiffies)) + 1) < 0) { 101 pr_err("%s: hanging request %llx:%lld did not complete\n", 102 engine->name, 103 rq->fence.context, 104 rq->fence.seqno); 105 return -ETIME; 106 } 107 108 return 0; 109 } 110 111 static int live_sanitycheck(void *arg) 112 { 113 struct intel_gt *gt = arg; 114 struct intel_engine_cs *engine; 115 enum intel_engine_id id; 116 struct igt_spinner spin; 117 int err = 0; 118 119 if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915)) 120 return 0; 121 122 if (igt_spinner_init(&spin, gt)) 123 return -ENOMEM; 124 125 for_each_engine(engine, gt, id) { 126 struct intel_context *ce; 127 struct i915_request *rq; 128 129 ce = intel_context_create(engine); 130 if (IS_ERR(ce)) { 131 err = PTR_ERR(ce); 132 break; 133 } 134 135 rq = igt_spinner_create_request(&spin, ce, MI_NOOP); 136 if (IS_ERR(rq)) { 137 err = PTR_ERR(rq); 138 goto out_ctx; 139 } 140 141 i915_request_add(rq); 142 if (!igt_wait_for_spinner(&spin, rq)) { 143 GEM_TRACE("spinner failed to start\n"); 144 GEM_TRACE_DUMP(); 145 intel_gt_set_wedged(gt); 146 err = -EIO; 147 goto out_ctx; 148 } 149 150 igt_spinner_end(&spin); 151 if (igt_flush_test(gt->i915)) { 152 err = -EIO; 153 goto out_ctx; 154 } 155 156 out_ctx: 157 intel_context_put(ce); 158 if (err) 159 break; 160 } 161 162 igt_spinner_fini(&spin); 163 return err; 164 } 165 166 static int live_unlite_restore(struct intel_gt *gt, int prio) 167 { 168 struct intel_engine_cs *engine; 169 enum intel_engine_id id; 170 struct igt_spinner spin; 171 int err = -ENOMEM; 172 173 /* 174 * Check that we can correctly context switch between 2 instances 175 * on the same engine from the same parent context. 176 */ 177 178 if (igt_spinner_init(&spin, gt)) 179 return err; 180 181 err = 0; 182 for_each_engine(engine, gt, id) { 183 struct intel_context *ce[2] = {}; 184 struct i915_request *rq[2]; 185 struct igt_live_test t; 186 int n; 187 188 if (prio && !intel_engine_has_preemption(engine)) 189 continue; 190 191 if (!intel_engine_can_store_dword(engine)) 192 continue; 193 194 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { 195 err = -EIO; 196 break; 197 } 198 st_engine_heartbeat_disable(engine); 199 200 for (n = 0; n < ARRAY_SIZE(ce); n++) { 201 struct intel_context *tmp; 202 203 tmp = intel_context_create(engine); 204 if (IS_ERR(tmp)) { 205 err = PTR_ERR(tmp); 206 goto err_ce; 207 } 208 209 err = intel_context_pin(tmp); 210 if (err) { 211 intel_context_put(tmp); 212 goto err_ce; 213 } 214 215 /* 216 * Setup the pair of contexts such that if we 217 * lite-restore using the RING_TAIL from ce[1] it 218 * will execute garbage from ce[0]->ring. 219 */ 220 memset(tmp->ring->vaddr, 221 POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */ 222 tmp->ring->vma->size); 223 224 ce[n] = tmp; 225 } 226 GEM_BUG_ON(!ce[1]->ring->size); 227 intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2); 228 lrc_update_regs(ce[1], engine, ce[1]->ring->head); 229 230 rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); 231 if (IS_ERR(rq[0])) { 232 err = PTR_ERR(rq[0]); 233 goto err_ce; 234 } 235 236 i915_request_get(rq[0]); 237 i915_request_add(rq[0]); 238 GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit); 239 240 if (!igt_wait_for_spinner(&spin, rq[0])) { 241 i915_request_put(rq[0]); 242 goto err_ce; 243 } 244 245 rq[1] = i915_request_create(ce[1]); 246 if (IS_ERR(rq[1])) { 247 err = PTR_ERR(rq[1]); 248 i915_request_put(rq[0]); 249 goto err_ce; 250 } 251 252 if (!prio) { 253 /* 254 * Ensure we do the switch to ce[1] on completion. 255 * 256 * rq[0] is already submitted, so this should reduce 257 * to a no-op (a wait on a request on the same engine 258 * uses the submit fence, not the completion fence), 259 * but it will install a dependency on rq[1] for rq[0] 260 * that will prevent the pair being reordered by 261 * timeslicing. 262 */ 263 i915_request_await_dma_fence(rq[1], &rq[0]->fence); 264 } 265 266 i915_request_get(rq[1]); 267 i915_request_add(rq[1]); 268 GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix); 269 i915_request_put(rq[0]); 270 271 if (prio) { 272 struct i915_sched_attr attr = { 273 .priority = prio, 274 }; 275 276 /* Alternatively preempt the spinner with ce[1] */ 277 engine->sched_engine->schedule(rq[1], &attr); 278 } 279 280 /* And switch back to ce[0] for good measure */ 281 rq[0] = i915_request_create(ce[0]); 282 if (IS_ERR(rq[0])) { 283 err = PTR_ERR(rq[0]); 284 i915_request_put(rq[1]); 285 goto err_ce; 286 } 287 288 i915_request_await_dma_fence(rq[0], &rq[1]->fence); 289 i915_request_get(rq[0]); 290 i915_request_add(rq[0]); 291 GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix); 292 i915_request_put(rq[1]); 293 i915_request_put(rq[0]); 294 295 err_ce: 296 intel_engine_flush_submission(engine); 297 igt_spinner_end(&spin); 298 for (n = 0; n < ARRAY_SIZE(ce); n++) { 299 if (IS_ERR_OR_NULL(ce[n])) 300 break; 301 302 intel_context_unpin(ce[n]); 303 intel_context_put(ce[n]); 304 } 305 306 st_engine_heartbeat_enable(engine); 307 if (igt_live_test_end(&t)) 308 err = -EIO; 309 if (err) 310 break; 311 } 312 313 igt_spinner_fini(&spin); 314 return err; 315 } 316 317 static int live_unlite_switch(void *arg) 318 { 319 return live_unlite_restore(arg, 0); 320 } 321 322 static int live_unlite_preempt(void *arg) 323 { 324 return live_unlite_restore(arg, I915_PRIORITY_MAX); 325 } 326 327 static int live_unlite_ring(void *arg) 328 { 329 struct intel_gt *gt = arg; 330 struct intel_engine_cs *engine; 331 struct igt_spinner spin; 332 enum intel_engine_id id; 333 int err = 0; 334 335 /* 336 * Setup a preemption event that will cause almost the entire ring 337 * to be unwound, potentially fooling our intel_ring_direction() 338 * into emitting a forward lite-restore instead of the rollback. 339 */ 340 341 if (igt_spinner_init(&spin, gt)) 342 return -ENOMEM; 343 344 for_each_engine(engine, gt, id) { 345 struct intel_context *ce[2] = {}; 346 struct i915_request *rq; 347 struct igt_live_test t; 348 int n; 349 350 if (!intel_engine_has_preemption(engine)) 351 continue; 352 353 if (!intel_engine_can_store_dword(engine)) 354 continue; 355 356 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { 357 err = -EIO; 358 break; 359 } 360 st_engine_heartbeat_disable(engine); 361 362 for (n = 0; n < ARRAY_SIZE(ce); n++) { 363 struct intel_context *tmp; 364 365 tmp = intel_context_create(engine); 366 if (IS_ERR(tmp)) { 367 err = PTR_ERR(tmp); 368 goto err_ce; 369 } 370 371 err = intel_context_pin(tmp); 372 if (err) { 373 intel_context_put(tmp); 374 goto err_ce; 375 } 376 377 memset32(tmp->ring->vaddr, 378 0xdeadbeef, /* trigger a hang if executed */ 379 tmp->ring->vma->size / sizeof(u32)); 380 381 ce[n] = tmp; 382 } 383 384 /* Create max prio spinner, followed by N low prio nops */ 385 rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); 386 if (IS_ERR(rq)) { 387 err = PTR_ERR(rq); 388 goto err_ce; 389 } 390 391 i915_request_get(rq); 392 rq->sched.attr.priority = I915_PRIORITY_BARRIER; 393 i915_request_add(rq); 394 395 if (!igt_wait_for_spinner(&spin, rq)) { 396 intel_gt_set_wedged(gt); 397 i915_request_put(rq); 398 err = -ETIME; 399 goto err_ce; 400 } 401 402 /* Fill the ring, until we will cause a wrap */ 403 n = 0; 404 while (intel_ring_direction(ce[0]->ring, 405 rq->wa_tail, 406 ce[0]->ring->tail) <= 0) { 407 struct i915_request *tmp; 408 409 tmp = intel_context_create_request(ce[0]); 410 if (IS_ERR(tmp)) { 411 err = PTR_ERR(tmp); 412 i915_request_put(rq); 413 goto err_ce; 414 } 415 416 i915_request_add(tmp); 417 intel_engine_flush_submission(engine); 418 n++; 419 } 420 intel_engine_flush_submission(engine); 421 pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n", 422 engine->name, n, 423 ce[0]->ring->size, 424 ce[0]->ring->tail, 425 ce[0]->ring->emit, 426 rq->tail); 427 GEM_BUG_ON(intel_ring_direction(ce[0]->ring, 428 rq->tail, 429 ce[0]->ring->tail) <= 0); 430 i915_request_put(rq); 431 432 /* Create a second ring to preempt the first ring after rq[0] */ 433 rq = intel_context_create_request(ce[1]); 434 if (IS_ERR(rq)) { 435 err = PTR_ERR(rq); 436 goto err_ce; 437 } 438 439 rq->sched.attr.priority = I915_PRIORITY_BARRIER; 440 i915_request_get(rq); 441 i915_request_add(rq); 442 443 err = wait_for_submit(engine, rq, HZ / 2); 444 i915_request_put(rq); 445 if (err) { 446 pr_err("%s: preemption request was not submitted\n", 447 engine->name); 448 err = -ETIME; 449 } 450 451 pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n", 452 engine->name, 453 ce[0]->ring->tail, ce[0]->ring->emit, 454 ce[1]->ring->tail, ce[1]->ring->emit); 455 456 err_ce: 457 intel_engine_flush_submission(engine); 458 igt_spinner_end(&spin); 459 for (n = 0; n < ARRAY_SIZE(ce); n++) { 460 if (IS_ERR_OR_NULL(ce[n])) 461 break; 462 463 intel_context_unpin(ce[n]); 464 intel_context_put(ce[n]); 465 } 466 st_engine_heartbeat_enable(engine); 467 if (igt_live_test_end(&t)) 468 err = -EIO; 469 if (err) 470 break; 471 } 472 473 igt_spinner_fini(&spin); 474 return err; 475 } 476 477 static int live_pin_rewind(void *arg) 478 { 479 struct intel_gt *gt = arg; 480 struct intel_engine_cs *engine; 481 enum intel_engine_id id; 482 int err = 0; 483 484 /* 485 * We have to be careful not to trust intel_ring too much, for example 486 * ring->head is updated upon retire which is out of sync with pinning 487 * the context. Thus we cannot use ring->head to set CTX_RING_HEAD, 488 * or else we risk writing an older, stale value. 489 * 490 * To simulate this, let's apply a bit of deliberate sabotague. 491 */ 492 493 for_each_engine(engine, gt, id) { 494 struct intel_context *ce; 495 struct i915_request *rq; 496 struct intel_ring *ring; 497 struct igt_live_test t; 498 499 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { 500 err = -EIO; 501 break; 502 } 503 504 ce = intel_context_create(engine); 505 if (IS_ERR(ce)) { 506 err = PTR_ERR(ce); 507 break; 508 } 509 510 err = intel_context_pin(ce); 511 if (err) { 512 intel_context_put(ce); 513 break; 514 } 515 516 /* Keep the context awake while we play games */ 517 err = i915_active_acquire(&ce->active); 518 if (err) { 519 intel_context_unpin(ce); 520 intel_context_put(ce); 521 break; 522 } 523 ring = ce->ring; 524 525 /* Poison the ring, and offset the next request from HEAD */ 526 memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32)); 527 ring->emit = ring->size / 2; 528 ring->tail = ring->emit; 529 GEM_BUG_ON(ring->head); 530 531 intel_context_unpin(ce); 532 533 /* Submit a simple nop request */ 534 GEM_BUG_ON(intel_context_is_pinned(ce)); 535 rq = intel_context_create_request(ce); 536 i915_active_release(&ce->active); /* e.g. async retire */ 537 intel_context_put(ce); 538 if (IS_ERR(rq)) { 539 err = PTR_ERR(rq); 540 break; 541 } 542 GEM_BUG_ON(!rq->head); 543 i915_request_add(rq); 544 545 /* Expect not to hang! */ 546 if (igt_live_test_end(&t)) { 547 err = -EIO; 548 break; 549 } 550 } 551 552 return err; 553 } 554 555 static int engine_lock_reset_tasklet(struct intel_engine_cs *engine) 556 { 557 tasklet_disable(&engine->sched_engine->tasklet); 558 local_bh_disable(); 559 560 if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 561 &engine->gt->reset.flags)) { 562 local_bh_enable(); 563 tasklet_enable(&engine->sched_engine->tasklet); 564 565 intel_gt_set_wedged(engine->gt); 566 return -EBUSY; 567 } 568 569 return 0; 570 } 571 572 static void engine_unlock_reset_tasklet(struct intel_engine_cs *engine) 573 { 574 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, 575 &engine->gt->reset.flags); 576 577 local_bh_enable(); 578 tasklet_enable(&engine->sched_engine->tasklet); 579 } 580 581 static int live_hold_reset(void *arg) 582 { 583 struct intel_gt *gt = arg; 584 struct intel_engine_cs *engine; 585 enum intel_engine_id id; 586 struct igt_spinner spin; 587 int err = 0; 588 589 /* 590 * In order to support offline error capture for fast preempt reset, 591 * we need to decouple the guilty request and ensure that it and its 592 * descendents are not executed while the capture is in progress. 593 */ 594 595 if (!intel_has_reset_engine(gt)) 596 return 0; 597 598 if (igt_spinner_init(&spin, gt)) 599 return -ENOMEM; 600 601 for_each_engine(engine, gt, id) { 602 struct intel_context *ce; 603 struct i915_request *rq; 604 605 ce = intel_context_create(engine); 606 if (IS_ERR(ce)) { 607 err = PTR_ERR(ce); 608 break; 609 } 610 611 st_engine_heartbeat_disable(engine); 612 613 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); 614 if (IS_ERR(rq)) { 615 err = PTR_ERR(rq); 616 goto out; 617 } 618 i915_request_add(rq); 619 620 if (!igt_wait_for_spinner(&spin, rq)) { 621 intel_gt_set_wedged(gt); 622 err = -ETIME; 623 goto out; 624 } 625 626 /* We have our request executing, now remove it and reset */ 627 628 err = engine_lock_reset_tasklet(engine); 629 if (err) 630 goto out; 631 632 engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet); 633 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); 634 635 i915_request_get(rq); 636 execlists_hold(engine, rq); 637 GEM_BUG_ON(!i915_request_on_hold(rq)); 638 639 __intel_engine_reset_bh(engine, NULL); 640 GEM_BUG_ON(rq->fence.error != -EIO); 641 642 engine_unlock_reset_tasklet(engine); 643 644 /* Check that we do not resubmit the held request */ 645 if (!i915_request_wait(rq, 0, HZ / 5)) { 646 pr_err("%s: on hold request completed!\n", 647 engine->name); 648 i915_request_put(rq); 649 err = -EIO; 650 goto out; 651 } 652 GEM_BUG_ON(!i915_request_on_hold(rq)); 653 654 /* But is resubmitted on release */ 655 execlists_unhold(engine, rq); 656 if (i915_request_wait(rq, 0, HZ / 5) < 0) { 657 pr_err("%s: held request did not complete!\n", 658 engine->name); 659 intel_gt_set_wedged(gt); 660 err = -ETIME; 661 } 662 i915_request_put(rq); 663 664 out: 665 st_engine_heartbeat_enable(engine); 666 intel_context_put(ce); 667 if (err) 668 break; 669 } 670 671 igt_spinner_fini(&spin); 672 return err; 673 } 674 675 static const char *error_repr(int err) 676 { 677 return err ? "bad" : "good"; 678 } 679 680 static int live_error_interrupt(void *arg) 681 { 682 static const struct error_phase { 683 enum { GOOD = 0, BAD = -EIO } error[2]; 684 } phases[] = { 685 { { BAD, GOOD } }, 686 { { BAD, BAD } }, 687 { { BAD, GOOD } }, 688 { { GOOD, GOOD } }, /* sentinel */ 689 }; 690 struct intel_gt *gt = arg; 691 struct intel_engine_cs *engine; 692 enum intel_engine_id id; 693 694 /* 695 * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning 696 * of invalid commands in user batches that will cause a GPU hang. 697 * This is a faster mechanism than using hangcheck/heartbeats, but 698 * only detects problems the HW knows about -- it will not warn when 699 * we kill the HW! 700 * 701 * To verify our detection and reset, we throw some invalid commands 702 * at the HW and wait for the interrupt. 703 */ 704 705 if (!intel_has_reset_engine(gt)) 706 return 0; 707 708 for_each_engine(engine, gt, id) { 709 const struct error_phase *p; 710 int err = 0; 711 712 st_engine_heartbeat_disable(engine); 713 714 for (p = phases; p->error[0] != GOOD; p++) { 715 struct i915_request *client[ARRAY_SIZE(phases->error)]; 716 u32 *cs; 717 int i; 718 719 memset(client, 0, sizeof(*client)); 720 for (i = 0; i < ARRAY_SIZE(client); i++) { 721 struct intel_context *ce; 722 struct i915_request *rq; 723 724 ce = intel_context_create(engine); 725 if (IS_ERR(ce)) { 726 err = PTR_ERR(ce); 727 goto out; 728 } 729 730 rq = intel_context_create_request(ce); 731 intel_context_put(ce); 732 if (IS_ERR(rq)) { 733 err = PTR_ERR(rq); 734 goto out; 735 } 736 737 if (rq->engine->emit_init_breadcrumb) { 738 err = rq->engine->emit_init_breadcrumb(rq); 739 if (err) { 740 i915_request_add(rq); 741 goto out; 742 } 743 } 744 745 cs = intel_ring_begin(rq, 2); 746 if (IS_ERR(cs)) { 747 i915_request_add(rq); 748 err = PTR_ERR(cs); 749 goto out; 750 } 751 752 if (p->error[i]) { 753 *cs++ = 0xdeadbeef; 754 *cs++ = 0xdeadbeef; 755 } else { 756 *cs++ = MI_NOOP; 757 *cs++ = MI_NOOP; 758 } 759 760 client[i] = i915_request_get(rq); 761 i915_request_add(rq); 762 } 763 764 err = wait_for_submit(engine, client[0], HZ / 2); 765 if (err) { 766 pr_err("%s: first request did not start within time!\n", 767 engine->name); 768 err = -ETIME; 769 goto out; 770 } 771 772 for (i = 0; i < ARRAY_SIZE(client); i++) { 773 if (i915_request_wait(client[i], 0, HZ / 5) < 0) 774 pr_debug("%s: %s request incomplete!\n", 775 engine->name, 776 error_repr(p->error[i])); 777 778 if (!i915_request_started(client[i])) { 779 pr_err("%s: %s request not started!\n", 780 engine->name, 781 error_repr(p->error[i])); 782 err = -ETIME; 783 goto out; 784 } 785 786 /* Kick the tasklet to process the error */ 787 intel_engine_flush_submission(engine); 788 if (client[i]->fence.error != p->error[i]) { 789 pr_err("%s: %s request (%s) with wrong error code: %d\n", 790 engine->name, 791 error_repr(p->error[i]), 792 i915_request_completed(client[i]) ? "completed" : "running", 793 client[i]->fence.error); 794 err = -EINVAL; 795 goto out; 796 } 797 } 798 799 out: 800 for (i = 0; i < ARRAY_SIZE(client); i++) 801 if (client[i]) 802 i915_request_put(client[i]); 803 if (err) { 804 pr_err("%s: failed at phase[%zd] { %d, %d }\n", 805 engine->name, p - phases, 806 p->error[0], p->error[1]); 807 break; 808 } 809 } 810 811 st_engine_heartbeat_enable(engine); 812 if (err) { 813 intel_gt_set_wedged(gt); 814 return err; 815 } 816 } 817 818 return 0; 819 } 820 821 static int 822 emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) 823 { 824 u32 *cs; 825 826 cs = intel_ring_begin(rq, 10); 827 if (IS_ERR(cs)) 828 return PTR_ERR(cs); 829 830 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 831 832 *cs++ = MI_SEMAPHORE_WAIT | 833 MI_SEMAPHORE_GLOBAL_GTT | 834 MI_SEMAPHORE_POLL | 835 MI_SEMAPHORE_SAD_NEQ_SDD; 836 *cs++ = 0; 837 *cs++ = i915_ggtt_offset(vma) + 4 * idx; 838 *cs++ = 0; 839 840 if (idx > 0) { 841 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 842 *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); 843 *cs++ = 0; 844 *cs++ = 1; 845 } else { 846 *cs++ = MI_NOOP; 847 *cs++ = MI_NOOP; 848 *cs++ = MI_NOOP; 849 *cs++ = MI_NOOP; 850 } 851 852 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 853 854 intel_ring_advance(rq, cs); 855 return 0; 856 } 857 858 static struct i915_request * 859 semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx) 860 { 861 struct intel_context *ce; 862 struct i915_request *rq; 863 int err; 864 865 ce = intel_context_create(engine); 866 if (IS_ERR(ce)) 867 return ERR_CAST(ce); 868 869 rq = intel_context_create_request(ce); 870 if (IS_ERR(rq)) 871 goto out_ce; 872 873 err = 0; 874 if (rq->engine->emit_init_breadcrumb) 875 err = rq->engine->emit_init_breadcrumb(rq); 876 if (err == 0) 877 err = emit_semaphore_chain(rq, vma, idx); 878 if (err == 0) 879 i915_request_get(rq); 880 i915_request_add(rq); 881 if (err) 882 rq = ERR_PTR(err); 883 884 out_ce: 885 intel_context_put(ce); 886 return rq; 887 } 888 889 static int 890 release_queue(struct intel_engine_cs *engine, 891 struct i915_vma *vma, 892 int idx, int prio) 893 { 894 struct i915_sched_attr attr = { 895 .priority = prio, 896 }; 897 struct i915_request *rq; 898 u32 *cs; 899 900 rq = intel_engine_create_kernel_request(engine); 901 if (IS_ERR(rq)) 902 return PTR_ERR(rq); 903 904 cs = intel_ring_begin(rq, 4); 905 if (IS_ERR(cs)) { 906 i915_request_add(rq); 907 return PTR_ERR(cs); 908 } 909 910 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 911 *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); 912 *cs++ = 0; 913 *cs++ = 1; 914 915 intel_ring_advance(rq, cs); 916 917 i915_request_get(rq); 918 i915_request_add(rq); 919 920 local_bh_disable(); 921 engine->sched_engine->schedule(rq, &attr); 922 local_bh_enable(); /* kick tasklet */ 923 924 i915_request_put(rq); 925 926 return 0; 927 } 928 929 static int 930 slice_semaphore_queue(struct intel_engine_cs *outer, 931 struct i915_vma *vma, 932 int count) 933 { 934 struct intel_engine_cs *engine; 935 struct i915_request *head; 936 enum intel_engine_id id; 937 int err, i, n = 0; 938 939 head = semaphore_queue(outer, vma, n++); 940 if (IS_ERR(head)) 941 return PTR_ERR(head); 942 943 for_each_engine(engine, outer->gt, id) { 944 if (!intel_engine_has_preemption(engine)) 945 continue; 946 947 for (i = 0; i < count; i++) { 948 struct i915_request *rq; 949 950 rq = semaphore_queue(engine, vma, n++); 951 if (IS_ERR(rq)) { 952 err = PTR_ERR(rq); 953 goto out; 954 } 955 956 i915_request_put(rq); 957 } 958 } 959 960 err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER); 961 if (err) 962 goto out; 963 964 if (i915_request_wait(head, 0, 965 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) { 966 pr_err("%s: Failed to slice along semaphore chain of length (%d, %d)!\n", 967 outer->name, count, n); 968 GEM_TRACE_DUMP(); 969 intel_gt_set_wedged(outer->gt); 970 err = -EIO; 971 } 972 973 out: 974 i915_request_put(head); 975 return err; 976 } 977 978 static int live_timeslice_preempt(void *arg) 979 { 980 struct intel_gt *gt = arg; 981 struct drm_i915_gem_object *obj; 982 struct intel_engine_cs *engine; 983 enum intel_engine_id id; 984 struct i915_vma *vma; 985 void *vaddr; 986 int err = 0; 987 988 /* 989 * If a request takes too long, we would like to give other users 990 * a fair go on the GPU. In particular, users may create batches 991 * that wait upon external input, where that input may even be 992 * supplied by another GPU job. To avoid blocking forever, we 993 * need to preempt the current task and replace it with another 994 * ready task. 995 */ 996 if (!CONFIG_DRM_I915_TIMESLICE_DURATION) 997 return 0; 998 999 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); 1000 if (IS_ERR(obj)) 1001 return PTR_ERR(obj); 1002 1003 vma = i915_vma_instance(obj, >->ggtt->vm, NULL); 1004 if (IS_ERR(vma)) { 1005 err = PTR_ERR(vma); 1006 goto err_obj; 1007 } 1008 1009 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 1010 if (IS_ERR(vaddr)) { 1011 err = PTR_ERR(vaddr); 1012 goto err_obj; 1013 } 1014 1015 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); 1016 if (err) 1017 goto err_map; 1018 1019 err = i915_vma_sync(vma); 1020 if (err) 1021 goto err_pin; 1022 1023 for_each_engine(engine, gt, id) { 1024 if (!intel_engine_has_preemption(engine)) 1025 continue; 1026 1027 memset(vaddr, 0, PAGE_SIZE); 1028 1029 st_engine_heartbeat_disable(engine); 1030 err = slice_semaphore_queue(engine, vma, 5); 1031 st_engine_heartbeat_enable(engine); 1032 if (err) 1033 goto err_pin; 1034 1035 if (igt_flush_test(gt->i915)) { 1036 err = -EIO; 1037 goto err_pin; 1038 } 1039 } 1040 1041 err_pin: 1042 i915_vma_unpin(vma); 1043 err_map: 1044 i915_gem_object_unpin_map(obj); 1045 err_obj: 1046 i915_gem_object_put(obj); 1047 return err; 1048 } 1049 1050 static struct i915_request * 1051 create_rewinder(struct intel_context *ce, 1052 struct i915_request *wait, 1053 void *slot, int idx) 1054 { 1055 const u32 offset = 1056 i915_ggtt_offset(ce->engine->status_page.vma) + 1057 offset_in_page(slot); 1058 struct i915_request *rq; 1059 u32 *cs; 1060 int err; 1061 1062 rq = intel_context_create_request(ce); 1063 if (IS_ERR(rq)) 1064 return rq; 1065 1066 if (wait) { 1067 err = i915_request_await_dma_fence(rq, &wait->fence); 1068 if (err) 1069 goto err; 1070 } 1071 1072 cs = intel_ring_begin(rq, 14); 1073 if (IS_ERR(cs)) { 1074 err = PTR_ERR(cs); 1075 goto err; 1076 } 1077 1078 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 1079 *cs++ = MI_NOOP; 1080 1081 *cs++ = MI_SEMAPHORE_WAIT | 1082 MI_SEMAPHORE_GLOBAL_GTT | 1083 MI_SEMAPHORE_POLL | 1084 MI_SEMAPHORE_SAD_GTE_SDD; 1085 *cs++ = idx; 1086 *cs++ = offset; 1087 *cs++ = 0; 1088 1089 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; 1090 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); 1091 *cs++ = offset + idx * sizeof(u32); 1092 *cs++ = 0; 1093 1094 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 1095 *cs++ = offset; 1096 *cs++ = 0; 1097 *cs++ = idx + 1; 1098 1099 intel_ring_advance(rq, cs); 1100 1101 err = 0; 1102 err: 1103 i915_request_get(rq); 1104 i915_request_add(rq); 1105 if (err) { 1106 i915_request_put(rq); 1107 return ERR_PTR(err); 1108 } 1109 1110 return rq; 1111 } 1112 1113 static int live_timeslice_rewind(void *arg) 1114 { 1115 struct intel_gt *gt = arg; 1116 struct intel_engine_cs *engine; 1117 enum intel_engine_id id; 1118 1119 /* 1120 * The usual presumption on timeslice expiration is that we replace 1121 * the active context with another. However, given a chain of 1122 * dependencies we may end up with replacing the context with itself, 1123 * but only a few of those requests, forcing us to rewind the 1124 * RING_TAIL of the original request. 1125 */ 1126 if (!CONFIG_DRM_I915_TIMESLICE_DURATION) 1127 return 0; 1128 1129 for_each_engine(engine, gt, id) { 1130 enum { A1, A2, B1 }; 1131 enum { X = 1, Z, Y }; 1132 struct i915_request *rq[3] = {}; 1133 struct intel_context *ce; 1134 unsigned long timeslice; 1135 int i, err = 0; 1136 u32 *slot; 1137 1138 if (!intel_engine_has_timeslices(engine)) 1139 continue; 1140 1141 /* 1142 * A:rq1 -- semaphore wait, timestamp X 1143 * A:rq2 -- write timestamp Y 1144 * 1145 * B:rq1 [await A:rq1] -- write timestamp Z 1146 * 1147 * Force timeslice, release semaphore. 1148 * 1149 * Expect execution/evaluation order XZY 1150 */ 1151 1152 st_engine_heartbeat_disable(engine); 1153 timeslice = xchg(&engine->props.timeslice_duration_ms, 1); 1154 1155 slot = memset32(engine->status_page.addr + 1000, 0, 4); 1156 1157 ce = intel_context_create(engine); 1158 if (IS_ERR(ce)) { 1159 err = PTR_ERR(ce); 1160 goto err; 1161 } 1162 1163 rq[A1] = create_rewinder(ce, NULL, slot, X); 1164 if (IS_ERR(rq[A1])) { 1165 intel_context_put(ce); 1166 goto err; 1167 } 1168 1169 rq[A2] = create_rewinder(ce, NULL, slot, Y); 1170 intel_context_put(ce); 1171 if (IS_ERR(rq[A2])) 1172 goto err; 1173 1174 err = wait_for_submit(engine, rq[A2], HZ / 2); 1175 if (err) { 1176 pr_err("%s: failed to submit first context\n", 1177 engine->name); 1178 goto err; 1179 } 1180 1181 ce = intel_context_create(engine); 1182 if (IS_ERR(ce)) { 1183 err = PTR_ERR(ce); 1184 goto err; 1185 } 1186 1187 rq[B1] = create_rewinder(ce, rq[A1], slot, Z); 1188 intel_context_put(ce); 1189 if (IS_ERR(rq[2])) 1190 goto err; 1191 1192 err = wait_for_submit(engine, rq[B1], HZ / 2); 1193 if (err) { 1194 pr_err("%s: failed to submit second context\n", 1195 engine->name); 1196 goto err; 1197 } 1198 1199 /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */ 1200 ENGINE_TRACE(engine, "forcing tasklet for rewind\n"); 1201 while (i915_request_is_active(rq[A2])) { /* semaphore yield! */ 1202 /* Wait for the timeslice to kick in */ 1203 timer_delete(&engine->execlists.timer); 1204 tasklet_hi_schedule(&engine->sched_engine->tasklet); 1205 intel_engine_flush_submission(engine); 1206 } 1207 /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */ 1208 GEM_BUG_ON(!i915_request_is_active(rq[A1])); 1209 GEM_BUG_ON(!i915_request_is_active(rq[B1])); 1210 GEM_BUG_ON(i915_request_is_active(rq[A2])); 1211 1212 /* Release the hounds! */ 1213 slot[0] = 1; 1214 wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */ 1215 1216 for (i = 1; i <= 3; i++) { 1217 unsigned long timeout = jiffies + HZ / 2; 1218 1219 while (!READ_ONCE(slot[i]) && 1220 time_before(jiffies, timeout)) 1221 ; 1222 1223 if (!time_before(jiffies, timeout)) { 1224 pr_err("%s: rq[%d] timed out\n", 1225 engine->name, i - 1); 1226 err = -ETIME; 1227 goto err; 1228 } 1229 1230 pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]); 1231 } 1232 1233 /* XZY: XZ < XY */ 1234 if (slot[Z] - slot[X] >= slot[Y] - slot[X]) { 1235 pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n", 1236 engine->name, 1237 slot[Z] - slot[X], 1238 slot[Y] - slot[X]); 1239 err = -EINVAL; 1240 } 1241 1242 err: 1243 memset32(&slot[0], -1, 4); 1244 wmb(); 1245 1246 engine->props.timeslice_duration_ms = timeslice; 1247 st_engine_heartbeat_enable(engine); 1248 for (i = 0; i < 3; i++) 1249 i915_request_put(rq[i]); 1250 if (igt_flush_test(gt->i915)) 1251 err = -EIO; 1252 if (err) 1253 return err; 1254 } 1255 1256 return 0; 1257 } 1258 1259 static struct i915_request *nop_request(struct intel_engine_cs *engine) 1260 { 1261 struct i915_request *rq; 1262 1263 rq = intel_engine_create_kernel_request(engine); 1264 if (IS_ERR(rq)) 1265 return rq; 1266 1267 i915_request_get(rq); 1268 i915_request_add(rq); 1269 1270 return rq; 1271 } 1272 1273 static long slice_timeout(struct intel_engine_cs *engine) 1274 { 1275 long timeout; 1276 1277 /* Enough time for a timeslice to kick in, and kick out */ 1278 timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine)); 1279 1280 /* Enough time for the nop request to complete */ 1281 timeout += HZ / 5; 1282 1283 return timeout + 1; 1284 } 1285 1286 static int live_timeslice_queue(void *arg) 1287 { 1288 struct intel_gt *gt = arg; 1289 struct drm_i915_gem_object *obj; 1290 struct intel_engine_cs *engine; 1291 enum intel_engine_id id; 1292 struct i915_vma *vma; 1293 void *vaddr; 1294 int err = 0; 1295 1296 /* 1297 * Make sure that even if ELSP[0] and ELSP[1] are filled with 1298 * timeslicing between them disabled, we *do* enable timeslicing 1299 * if the queue demands it. (Normally, we do not submit if 1300 * ELSP[1] is already occupied, so must rely on timeslicing to 1301 * eject ELSP[0] in favour of the queue.) 1302 */ 1303 if (!CONFIG_DRM_I915_TIMESLICE_DURATION) 1304 return 0; 1305 1306 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); 1307 if (IS_ERR(obj)) 1308 return PTR_ERR(obj); 1309 1310 vma = i915_vma_instance(obj, >->ggtt->vm, NULL); 1311 if (IS_ERR(vma)) { 1312 err = PTR_ERR(vma); 1313 goto err_obj; 1314 } 1315 1316 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 1317 if (IS_ERR(vaddr)) { 1318 err = PTR_ERR(vaddr); 1319 goto err_obj; 1320 } 1321 1322 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); 1323 if (err) 1324 goto err_map; 1325 1326 err = i915_vma_sync(vma); 1327 if (err) 1328 goto err_pin; 1329 1330 for_each_engine(engine, gt, id) { 1331 struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX }; 1332 struct i915_request *rq, *nop; 1333 1334 if (!intel_engine_has_preemption(engine)) 1335 continue; 1336 1337 st_engine_heartbeat_disable(engine); 1338 memset(vaddr, 0, PAGE_SIZE); 1339 1340 /* ELSP[0]: semaphore wait */ 1341 rq = semaphore_queue(engine, vma, 0); 1342 if (IS_ERR(rq)) { 1343 err = PTR_ERR(rq); 1344 goto err_heartbeat; 1345 } 1346 engine->sched_engine->schedule(rq, &attr); 1347 err = wait_for_submit(engine, rq, HZ / 2); 1348 if (err) { 1349 pr_err("%s: Timed out trying to submit semaphores\n", 1350 engine->name); 1351 goto err_rq; 1352 } 1353 1354 /* ELSP[1]: nop request */ 1355 nop = nop_request(engine); 1356 if (IS_ERR(nop)) { 1357 err = PTR_ERR(nop); 1358 goto err_rq; 1359 } 1360 err = wait_for_submit(engine, nop, HZ / 2); 1361 i915_request_put(nop); 1362 if (err) { 1363 pr_err("%s: Timed out trying to submit nop\n", 1364 engine->name); 1365 goto err_rq; 1366 } 1367 1368 GEM_BUG_ON(i915_request_completed(rq)); 1369 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); 1370 1371 /* Queue: semaphore signal, matching priority as semaphore */ 1372 err = release_queue(engine, vma, 1, effective_prio(rq)); 1373 if (err) 1374 goto err_rq; 1375 1376 /* Wait until we ack the release_queue and start timeslicing */ 1377 do { 1378 cond_resched(); 1379 intel_engine_flush_submission(engine); 1380 } while (READ_ONCE(engine->execlists.pending[0])); 1381 1382 /* Timeslice every jiffy, so within 2 we should signal */ 1383 if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) { 1384 struct drm_printer p = 1385 drm_info_printer(gt->i915->drm.dev); 1386 1387 pr_err("%s: Failed to timeslice into queue\n", 1388 engine->name); 1389 intel_engine_dump(engine, &p, 1390 "%s\n", engine->name); 1391 1392 memset(vaddr, 0xff, PAGE_SIZE); 1393 err = -EIO; 1394 } 1395 err_rq: 1396 i915_request_put(rq); 1397 err_heartbeat: 1398 st_engine_heartbeat_enable(engine); 1399 if (err) 1400 break; 1401 } 1402 1403 err_pin: 1404 i915_vma_unpin(vma); 1405 err_map: 1406 i915_gem_object_unpin_map(obj); 1407 err_obj: 1408 i915_gem_object_put(obj); 1409 return err; 1410 } 1411 1412 static int live_timeslice_nopreempt(void *arg) 1413 { 1414 struct intel_gt *gt = arg; 1415 struct intel_engine_cs *engine; 1416 enum intel_engine_id id; 1417 struct igt_spinner spin; 1418 int err = 0; 1419 1420 /* 1421 * We should not timeslice into a request that is marked with 1422 * I915_REQUEST_NOPREEMPT. 1423 */ 1424 if (!CONFIG_DRM_I915_TIMESLICE_DURATION) 1425 return 0; 1426 1427 if (igt_spinner_init(&spin, gt)) 1428 return -ENOMEM; 1429 1430 for_each_engine(engine, gt, id) { 1431 struct intel_context *ce; 1432 struct i915_request *rq; 1433 unsigned long timeslice; 1434 1435 if (!intel_engine_has_preemption(engine)) 1436 continue; 1437 1438 ce = intel_context_create(engine); 1439 if (IS_ERR(ce)) { 1440 err = PTR_ERR(ce); 1441 break; 1442 } 1443 1444 st_engine_heartbeat_disable(engine); 1445 timeslice = xchg(&engine->props.timeslice_duration_ms, 1); 1446 1447 /* Create an unpreemptible spinner */ 1448 1449 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); 1450 intel_context_put(ce); 1451 if (IS_ERR(rq)) { 1452 err = PTR_ERR(rq); 1453 goto out_heartbeat; 1454 } 1455 1456 i915_request_get(rq); 1457 i915_request_add(rq); 1458 1459 if (!igt_wait_for_spinner(&spin, rq)) { 1460 i915_request_put(rq); 1461 err = -ETIME; 1462 goto out_spin; 1463 } 1464 1465 set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags); 1466 i915_request_put(rq); 1467 1468 /* Followed by a maximum priority barrier (heartbeat) */ 1469 1470 ce = intel_context_create(engine); 1471 if (IS_ERR(ce)) { 1472 err = PTR_ERR(ce); 1473 goto out_spin; 1474 } 1475 1476 rq = intel_context_create_request(ce); 1477 intel_context_put(ce); 1478 if (IS_ERR(rq)) { 1479 err = PTR_ERR(rq); 1480 goto out_spin; 1481 } 1482 1483 rq->sched.attr.priority = I915_PRIORITY_BARRIER; 1484 i915_request_get(rq); 1485 i915_request_add(rq); 1486 1487 /* 1488 * Wait until the barrier is in ELSP, and we know timeslicing 1489 * will have been activated. 1490 */ 1491 if (wait_for_submit(engine, rq, HZ / 2)) { 1492 i915_request_put(rq); 1493 err = -ETIME; 1494 goto out_spin; 1495 } 1496 1497 /* 1498 * Since the ELSP[0] request is unpreemptible, it should not 1499 * allow the maximum priority barrier through. Wait long 1500 * enough to see if it is timesliced in by mistake. 1501 */ 1502 if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) { 1503 pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n", 1504 engine->name); 1505 err = -EINVAL; 1506 } 1507 i915_request_put(rq); 1508 1509 out_spin: 1510 igt_spinner_end(&spin); 1511 out_heartbeat: 1512 xchg(&engine->props.timeslice_duration_ms, timeslice); 1513 st_engine_heartbeat_enable(engine); 1514 if (err) 1515 break; 1516 1517 if (igt_flush_test(gt->i915)) { 1518 err = -EIO; 1519 break; 1520 } 1521 } 1522 1523 igt_spinner_fini(&spin); 1524 return err; 1525 } 1526 1527 static int live_busywait_preempt(void *arg) 1528 { 1529 struct intel_gt *gt = arg; 1530 struct i915_gem_context *ctx_hi, *ctx_lo; 1531 struct intel_engine_cs *engine; 1532 struct drm_i915_gem_object *obj; 1533 struct i915_vma *vma; 1534 enum intel_engine_id id; 1535 u32 *map; 1536 int err; 1537 1538 /* 1539 * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can 1540 * preempt the busywaits used to synchronise between rings. 1541 */ 1542 1543 ctx_hi = kernel_context(gt->i915, NULL); 1544 if (IS_ERR(ctx_hi)) 1545 return PTR_ERR(ctx_hi); 1546 1547 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; 1548 1549 ctx_lo = kernel_context(gt->i915, NULL); 1550 if (IS_ERR(ctx_lo)) { 1551 err = PTR_ERR(ctx_lo); 1552 goto err_ctx_hi; 1553 } 1554 1555 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; 1556 1557 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); 1558 if (IS_ERR(obj)) { 1559 err = PTR_ERR(obj); 1560 goto err_ctx_lo; 1561 } 1562 1563 map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 1564 if (IS_ERR(map)) { 1565 err = PTR_ERR(map); 1566 goto err_obj; 1567 } 1568 1569 vma = i915_vma_instance(obj, >->ggtt->vm, NULL); 1570 if (IS_ERR(vma)) { 1571 err = PTR_ERR(vma); 1572 goto err_map; 1573 } 1574 1575 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); 1576 if (err) 1577 goto err_map; 1578 1579 err = i915_vma_sync(vma); 1580 if (err) 1581 goto err_vma; 1582 1583 for_each_engine(engine, gt, id) { 1584 struct i915_request *lo, *hi; 1585 struct igt_live_test t; 1586 u32 *cs; 1587 1588 if (!intel_engine_has_preemption(engine)) 1589 continue; 1590 1591 if (!intel_engine_can_store_dword(engine)) 1592 continue; 1593 1594 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { 1595 err = -EIO; 1596 goto err_vma; 1597 } 1598 1599 /* 1600 * We create two requests. The low priority request 1601 * busywaits on a semaphore (inside the ringbuffer where 1602 * is should be preemptible) and the high priority requests 1603 * uses a MI_STORE_DWORD_IMM to update the semaphore value 1604 * allowing the first request to complete. If preemption 1605 * fails, we hang instead. 1606 */ 1607 1608 lo = igt_request_alloc(ctx_lo, engine); 1609 if (IS_ERR(lo)) { 1610 err = PTR_ERR(lo); 1611 goto err_vma; 1612 } 1613 1614 cs = intel_ring_begin(lo, 8); 1615 if (IS_ERR(cs)) { 1616 err = PTR_ERR(cs); 1617 i915_request_add(lo); 1618 goto err_vma; 1619 } 1620 1621 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 1622 *cs++ = i915_ggtt_offset(vma); 1623 *cs++ = 0; 1624 *cs++ = 1; 1625 1626 /* XXX Do we need a flush + invalidate here? */ 1627 1628 *cs++ = MI_SEMAPHORE_WAIT | 1629 MI_SEMAPHORE_GLOBAL_GTT | 1630 MI_SEMAPHORE_POLL | 1631 MI_SEMAPHORE_SAD_EQ_SDD; 1632 *cs++ = 0; 1633 *cs++ = i915_ggtt_offset(vma); 1634 *cs++ = 0; 1635 1636 intel_ring_advance(lo, cs); 1637 1638 i915_request_get(lo); 1639 i915_request_add(lo); 1640 1641 if (wait_for(READ_ONCE(*map), 10)) { 1642 i915_request_put(lo); 1643 err = -ETIMEDOUT; 1644 goto err_vma; 1645 } 1646 1647 /* Low priority request should be busywaiting now */ 1648 if (i915_request_wait(lo, 0, 1) != -ETIME) { 1649 i915_request_put(lo); 1650 pr_err("%s: Busywaiting request did not!\n", 1651 engine->name); 1652 err = -EIO; 1653 goto err_vma; 1654 } 1655 1656 hi = igt_request_alloc(ctx_hi, engine); 1657 if (IS_ERR(hi)) { 1658 err = PTR_ERR(hi); 1659 i915_request_put(lo); 1660 goto err_vma; 1661 } 1662 1663 cs = intel_ring_begin(hi, 4); 1664 if (IS_ERR(cs)) { 1665 err = PTR_ERR(cs); 1666 i915_request_add(hi); 1667 i915_request_put(lo); 1668 goto err_vma; 1669 } 1670 1671 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 1672 *cs++ = i915_ggtt_offset(vma); 1673 *cs++ = 0; 1674 *cs++ = 0; 1675 1676 intel_ring_advance(hi, cs); 1677 i915_request_add(hi); 1678 1679 if (i915_request_wait(lo, 0, HZ / 5) < 0) { 1680 struct drm_printer p = drm_info_printer(gt->i915->drm.dev); 1681 1682 pr_err("%s: Failed to preempt semaphore busywait!\n", 1683 engine->name); 1684 1685 intel_engine_dump(engine, &p, "%s\n", engine->name); 1686 GEM_TRACE_DUMP(); 1687 1688 i915_request_put(lo); 1689 intel_gt_set_wedged(gt); 1690 err = -EIO; 1691 goto err_vma; 1692 } 1693 GEM_BUG_ON(READ_ONCE(*map)); 1694 i915_request_put(lo); 1695 1696 if (igt_live_test_end(&t)) { 1697 err = -EIO; 1698 goto err_vma; 1699 } 1700 } 1701 1702 err = 0; 1703 err_vma: 1704 i915_vma_unpin(vma); 1705 err_map: 1706 i915_gem_object_unpin_map(obj); 1707 err_obj: 1708 i915_gem_object_put(obj); 1709 err_ctx_lo: 1710 kernel_context_close(ctx_lo); 1711 err_ctx_hi: 1712 kernel_context_close(ctx_hi); 1713 return err; 1714 } 1715 1716 static struct i915_request * 1717 spinner_create_request(struct igt_spinner *spin, 1718 struct i915_gem_context *ctx, 1719 struct intel_engine_cs *engine, 1720 u32 arb) 1721 { 1722 struct intel_context *ce; 1723 struct i915_request *rq; 1724 1725 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); 1726 if (IS_ERR(ce)) 1727 return ERR_CAST(ce); 1728 1729 rq = igt_spinner_create_request(spin, ce, arb); 1730 intel_context_put(ce); 1731 return rq; 1732 } 1733 1734 static int live_preempt(void *arg) 1735 { 1736 struct intel_gt *gt = arg; 1737 struct i915_gem_context *ctx_hi, *ctx_lo; 1738 struct igt_spinner spin_hi, spin_lo; 1739 struct intel_engine_cs *engine; 1740 enum intel_engine_id id; 1741 int err = -ENOMEM; 1742 1743 ctx_hi = kernel_context(gt->i915, NULL); 1744 if (!ctx_hi) 1745 return -ENOMEM; 1746 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; 1747 1748 ctx_lo = kernel_context(gt->i915, NULL); 1749 if (!ctx_lo) 1750 goto err_ctx_hi; 1751 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; 1752 1753 if (igt_spinner_init(&spin_hi, gt)) 1754 goto err_ctx_lo; 1755 1756 if (igt_spinner_init(&spin_lo, gt)) 1757 goto err_spin_hi; 1758 1759 for_each_engine(engine, gt, id) { 1760 struct igt_live_test t; 1761 struct i915_request *rq; 1762 1763 if (!intel_engine_has_preemption(engine)) 1764 continue; 1765 1766 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { 1767 err = -EIO; 1768 goto err_spin_lo; 1769 } 1770 1771 rq = spinner_create_request(&spin_lo, ctx_lo, engine, 1772 MI_ARB_CHECK); 1773 if (IS_ERR(rq)) { 1774 err = PTR_ERR(rq); 1775 goto err_spin_lo; 1776 } 1777 1778 i915_request_add(rq); 1779 if (!igt_wait_for_spinner(&spin_lo, rq)) { 1780 GEM_TRACE("lo spinner failed to start\n"); 1781 GEM_TRACE_DUMP(); 1782 intel_gt_set_wedged(gt); 1783 err = -EIO; 1784 goto err_spin_lo; 1785 } 1786 1787 rq = spinner_create_request(&spin_hi, ctx_hi, engine, 1788 MI_ARB_CHECK); 1789 if (IS_ERR(rq)) { 1790 igt_spinner_end(&spin_lo); 1791 err = PTR_ERR(rq); 1792 goto err_spin_lo; 1793 } 1794 1795 i915_request_add(rq); 1796 if (!igt_wait_for_spinner(&spin_hi, rq)) { 1797 GEM_TRACE("hi spinner failed to start\n"); 1798 GEM_TRACE_DUMP(); 1799 intel_gt_set_wedged(gt); 1800 err = -EIO; 1801 goto err_spin_lo; 1802 } 1803 1804 igt_spinner_end(&spin_hi); 1805 igt_spinner_end(&spin_lo); 1806 1807 if (igt_live_test_end(&t)) { 1808 err = -EIO; 1809 goto err_spin_lo; 1810 } 1811 } 1812 1813 err = 0; 1814 err_spin_lo: 1815 igt_spinner_fini(&spin_lo); 1816 err_spin_hi: 1817 igt_spinner_fini(&spin_hi); 1818 err_ctx_lo: 1819 kernel_context_close(ctx_lo); 1820 err_ctx_hi: 1821 kernel_context_close(ctx_hi); 1822 return err; 1823 } 1824 1825 static int live_late_preempt(void *arg) 1826 { 1827 struct intel_gt *gt = arg; 1828 struct i915_gem_context *ctx_hi, *ctx_lo; 1829 struct igt_spinner spin_hi, spin_lo; 1830 struct intel_engine_cs *engine; 1831 struct i915_sched_attr attr = {}; 1832 enum intel_engine_id id; 1833 int err = -ENOMEM; 1834 1835 ctx_hi = kernel_context(gt->i915, NULL); 1836 if (!ctx_hi) 1837 return -ENOMEM; 1838 1839 ctx_lo = kernel_context(gt->i915, NULL); 1840 if (!ctx_lo) 1841 goto err_ctx_hi; 1842 1843 if (igt_spinner_init(&spin_hi, gt)) 1844 goto err_ctx_lo; 1845 1846 if (igt_spinner_init(&spin_lo, gt)) 1847 goto err_spin_hi; 1848 1849 /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */ 1850 ctx_lo->sched.priority = 1; 1851 1852 for_each_engine(engine, gt, id) { 1853 struct igt_live_test t; 1854 struct i915_request *rq; 1855 1856 if (!intel_engine_has_preemption(engine)) 1857 continue; 1858 1859 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { 1860 err = -EIO; 1861 goto err_spin_lo; 1862 } 1863 1864 rq = spinner_create_request(&spin_lo, ctx_lo, engine, 1865 MI_ARB_CHECK); 1866 if (IS_ERR(rq)) { 1867 err = PTR_ERR(rq); 1868 goto err_spin_lo; 1869 } 1870 1871 i915_request_add(rq); 1872 if (!igt_wait_for_spinner(&spin_lo, rq)) { 1873 pr_err("First context failed to start\n"); 1874 goto err_wedged; 1875 } 1876 1877 rq = spinner_create_request(&spin_hi, ctx_hi, engine, 1878 MI_NOOP); 1879 if (IS_ERR(rq)) { 1880 igt_spinner_end(&spin_lo); 1881 err = PTR_ERR(rq); 1882 goto err_spin_lo; 1883 } 1884 1885 i915_request_add(rq); 1886 if (igt_wait_for_spinner(&spin_hi, rq)) { 1887 pr_err("Second context overtook first?\n"); 1888 goto err_wedged; 1889 } 1890 1891 attr.priority = I915_PRIORITY_MAX; 1892 engine->sched_engine->schedule(rq, &attr); 1893 1894 if (!igt_wait_for_spinner(&spin_hi, rq)) { 1895 pr_err("High priority context failed to preempt the low priority context\n"); 1896 GEM_TRACE_DUMP(); 1897 goto err_wedged; 1898 } 1899 1900 igt_spinner_end(&spin_hi); 1901 igt_spinner_end(&spin_lo); 1902 1903 if (igt_live_test_end(&t)) { 1904 err = -EIO; 1905 goto err_spin_lo; 1906 } 1907 } 1908 1909 err = 0; 1910 err_spin_lo: 1911 igt_spinner_fini(&spin_lo); 1912 err_spin_hi: 1913 igt_spinner_fini(&spin_hi); 1914 err_ctx_lo: 1915 kernel_context_close(ctx_lo); 1916 err_ctx_hi: 1917 kernel_context_close(ctx_hi); 1918 return err; 1919 1920 err_wedged: 1921 igt_spinner_end(&spin_hi); 1922 igt_spinner_end(&spin_lo); 1923 intel_gt_set_wedged(gt); 1924 err = -EIO; 1925 goto err_spin_lo; 1926 } 1927 1928 struct preempt_client { 1929 struct igt_spinner spin; 1930 struct i915_gem_context *ctx; 1931 }; 1932 1933 static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c) 1934 { 1935 c->ctx = kernel_context(gt->i915, NULL); 1936 if (!c->ctx) 1937 return -ENOMEM; 1938 1939 if (igt_spinner_init(&c->spin, gt)) 1940 goto err_ctx; 1941 1942 return 0; 1943 1944 err_ctx: 1945 kernel_context_close(c->ctx); 1946 return -ENOMEM; 1947 } 1948 1949 static void preempt_client_fini(struct preempt_client *c) 1950 { 1951 igt_spinner_fini(&c->spin); 1952 kernel_context_close(c->ctx); 1953 } 1954 1955 static int live_nopreempt(void *arg) 1956 { 1957 struct intel_gt *gt = arg; 1958 struct intel_engine_cs *engine; 1959 struct preempt_client a, b; 1960 enum intel_engine_id id; 1961 int err = -ENOMEM; 1962 1963 /* 1964 * Verify that we can disable preemption for an individual request 1965 * that may be being observed and not want to be interrupted. 1966 */ 1967 1968 if (preempt_client_init(gt, &a)) 1969 return -ENOMEM; 1970 if (preempt_client_init(gt, &b)) 1971 goto err_client_a; 1972 b.ctx->sched.priority = I915_PRIORITY_MAX; 1973 1974 for_each_engine(engine, gt, id) { 1975 struct i915_request *rq_a, *rq_b; 1976 1977 if (!intel_engine_has_preemption(engine)) 1978 continue; 1979 1980 engine->execlists.preempt_hang.count = 0; 1981 1982 rq_a = spinner_create_request(&a.spin, 1983 a.ctx, engine, 1984 MI_ARB_CHECK); 1985 if (IS_ERR(rq_a)) { 1986 err = PTR_ERR(rq_a); 1987 goto err_client_b; 1988 } 1989 1990 /* Low priority client, but unpreemptable! */ 1991 __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags); 1992 1993 i915_request_add(rq_a); 1994 if (!igt_wait_for_spinner(&a.spin, rq_a)) { 1995 pr_err("First client failed to start\n"); 1996 goto err_wedged; 1997 } 1998 1999 rq_b = spinner_create_request(&b.spin, 2000 b.ctx, engine, 2001 MI_ARB_CHECK); 2002 if (IS_ERR(rq_b)) { 2003 err = PTR_ERR(rq_b); 2004 goto err_client_b; 2005 } 2006 2007 i915_request_add(rq_b); 2008 2009 /* B is much more important than A! (But A is unpreemptable.) */ 2010 GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a)); 2011 2012 /* Wait long enough for preemption and timeslicing */ 2013 if (igt_wait_for_spinner(&b.spin, rq_b)) { 2014 pr_err("Second client started too early!\n"); 2015 goto err_wedged; 2016 } 2017 2018 igt_spinner_end(&a.spin); 2019 2020 if (!igt_wait_for_spinner(&b.spin, rq_b)) { 2021 pr_err("Second client failed to start\n"); 2022 goto err_wedged; 2023 } 2024 2025 igt_spinner_end(&b.spin); 2026 2027 if (engine->execlists.preempt_hang.count) { 2028 pr_err("Preemption recorded x%d; should have been suppressed!\n", 2029 engine->execlists.preempt_hang.count); 2030 err = -EINVAL; 2031 goto err_wedged; 2032 } 2033 2034 if (igt_flush_test(gt->i915)) 2035 goto err_wedged; 2036 } 2037 2038 err = 0; 2039 err_client_b: 2040 preempt_client_fini(&b); 2041 err_client_a: 2042 preempt_client_fini(&a); 2043 return err; 2044 2045 err_wedged: 2046 igt_spinner_end(&b.spin); 2047 igt_spinner_end(&a.spin); 2048 intel_gt_set_wedged(gt); 2049 err = -EIO; 2050 goto err_client_b; 2051 } 2052 2053 struct live_preempt_cancel { 2054 struct intel_engine_cs *engine; 2055 struct preempt_client a, b; 2056 }; 2057 2058 static int __cancel_active0(struct live_preempt_cancel *arg) 2059 { 2060 struct i915_request *rq; 2061 struct igt_live_test t; 2062 int err; 2063 2064 /* Preempt cancel of ELSP0 */ 2065 GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); 2066 if (igt_live_test_begin(&t, arg->engine->i915, 2067 __func__, arg->engine->name)) 2068 return -EIO; 2069 2070 rq = spinner_create_request(&arg->a.spin, 2071 arg->a.ctx, arg->engine, 2072 MI_ARB_CHECK); 2073 if (IS_ERR(rq)) 2074 return PTR_ERR(rq); 2075 2076 clear_bit(CONTEXT_BANNED, &rq->context->flags); 2077 i915_request_get(rq); 2078 i915_request_add(rq); 2079 if (!igt_wait_for_spinner(&arg->a.spin, rq)) { 2080 err = -EIO; 2081 goto out; 2082 } 2083 2084 intel_context_ban(rq->context, rq); 2085 err = intel_engine_pulse(arg->engine); 2086 if (err) 2087 goto out; 2088 2089 err = wait_for_reset(arg->engine, rq, HZ / 2); 2090 if (err) { 2091 pr_err("Cancelled inflight0 request did not reset\n"); 2092 goto out; 2093 } 2094 2095 out: 2096 i915_request_put(rq); 2097 if (igt_live_test_end(&t)) 2098 err = -EIO; 2099 return err; 2100 } 2101 2102 static int __cancel_active1(struct live_preempt_cancel *arg) 2103 { 2104 struct i915_request *rq[2] = {}; 2105 struct igt_live_test t; 2106 int err; 2107 2108 /* Preempt cancel of ELSP1 */ 2109 GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); 2110 if (igt_live_test_begin(&t, arg->engine->i915, 2111 __func__, arg->engine->name)) 2112 return -EIO; 2113 2114 rq[0] = spinner_create_request(&arg->a.spin, 2115 arg->a.ctx, arg->engine, 2116 MI_NOOP); /* no preemption */ 2117 if (IS_ERR(rq[0])) 2118 return PTR_ERR(rq[0]); 2119 2120 clear_bit(CONTEXT_BANNED, &rq[0]->context->flags); 2121 i915_request_get(rq[0]); 2122 i915_request_add(rq[0]); 2123 if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { 2124 err = -EIO; 2125 goto out; 2126 } 2127 2128 rq[1] = spinner_create_request(&arg->b.spin, 2129 arg->b.ctx, arg->engine, 2130 MI_ARB_CHECK); 2131 if (IS_ERR(rq[1])) { 2132 err = PTR_ERR(rq[1]); 2133 goto out; 2134 } 2135 2136 clear_bit(CONTEXT_BANNED, &rq[1]->context->flags); 2137 i915_request_get(rq[1]); 2138 err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); 2139 i915_request_add(rq[1]); 2140 if (err) 2141 goto out; 2142 2143 intel_context_ban(rq[1]->context, rq[1]); 2144 err = intel_engine_pulse(arg->engine); 2145 if (err) 2146 goto out; 2147 2148 igt_spinner_end(&arg->a.spin); 2149 err = wait_for_reset(arg->engine, rq[1], HZ / 2); 2150 if (err) 2151 goto out; 2152 2153 if (rq[0]->fence.error != 0) { 2154 pr_err("Normal inflight0 request did not complete\n"); 2155 err = -EINVAL; 2156 goto out; 2157 } 2158 2159 if (rq[1]->fence.error != -EIO) { 2160 pr_err("Cancelled inflight1 request did not report -EIO\n"); 2161 err = -EINVAL; 2162 goto out; 2163 } 2164 2165 out: 2166 i915_request_put(rq[1]); 2167 i915_request_put(rq[0]); 2168 if (igt_live_test_end(&t)) 2169 err = -EIO; 2170 return err; 2171 } 2172 2173 static int __cancel_queued(struct live_preempt_cancel *arg) 2174 { 2175 struct i915_request *rq[3] = {}; 2176 struct igt_live_test t; 2177 int err; 2178 2179 /* Full ELSP and one in the wings */ 2180 GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); 2181 if (igt_live_test_begin(&t, arg->engine->i915, 2182 __func__, arg->engine->name)) 2183 return -EIO; 2184 2185 rq[0] = spinner_create_request(&arg->a.spin, 2186 arg->a.ctx, arg->engine, 2187 MI_ARB_CHECK); 2188 if (IS_ERR(rq[0])) 2189 return PTR_ERR(rq[0]); 2190 2191 clear_bit(CONTEXT_BANNED, &rq[0]->context->flags); 2192 i915_request_get(rq[0]); 2193 i915_request_add(rq[0]); 2194 if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { 2195 err = -EIO; 2196 goto out; 2197 } 2198 2199 rq[1] = igt_request_alloc(arg->b.ctx, arg->engine); 2200 if (IS_ERR(rq[1])) { 2201 err = PTR_ERR(rq[1]); 2202 goto out; 2203 } 2204 2205 clear_bit(CONTEXT_BANNED, &rq[1]->context->flags); 2206 i915_request_get(rq[1]); 2207 err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); 2208 i915_request_add(rq[1]); 2209 if (err) 2210 goto out; 2211 2212 rq[2] = spinner_create_request(&arg->b.spin, 2213 arg->a.ctx, arg->engine, 2214 MI_ARB_CHECK); 2215 if (IS_ERR(rq[2])) { 2216 err = PTR_ERR(rq[2]); 2217 goto out; 2218 } 2219 2220 i915_request_get(rq[2]); 2221 err = i915_request_await_dma_fence(rq[2], &rq[1]->fence); 2222 i915_request_add(rq[2]); 2223 if (err) 2224 goto out; 2225 2226 intel_context_ban(rq[2]->context, rq[2]); 2227 err = intel_engine_pulse(arg->engine); 2228 if (err) 2229 goto out; 2230 2231 err = wait_for_reset(arg->engine, rq[2], HZ / 2); 2232 if (err) 2233 goto out; 2234 2235 if (rq[0]->fence.error != -EIO) { 2236 pr_err("Cancelled inflight0 request did not report -EIO\n"); 2237 err = -EINVAL; 2238 goto out; 2239 } 2240 2241 /* 2242 * The behavior between having semaphores and not is different. With 2243 * semaphores the subsequent request is on the hardware and not cancelled 2244 * while without the request is held in the driver and cancelled. 2245 */ 2246 if (intel_engine_has_semaphores(rq[1]->engine) && 2247 rq[1]->fence.error != 0) { 2248 pr_err("Normal inflight1 request did not complete\n"); 2249 err = -EINVAL; 2250 goto out; 2251 } 2252 2253 if (rq[2]->fence.error != -EIO) { 2254 pr_err("Cancelled queued request did not report -EIO\n"); 2255 err = -EINVAL; 2256 goto out; 2257 } 2258 2259 out: 2260 i915_request_put(rq[2]); 2261 i915_request_put(rq[1]); 2262 i915_request_put(rq[0]); 2263 if (igt_live_test_end(&t)) 2264 err = -EIO; 2265 return err; 2266 } 2267 2268 static int __cancel_hostile(struct live_preempt_cancel *arg) 2269 { 2270 struct i915_request *rq; 2271 int err; 2272 2273 /* Preempt cancel non-preemptible spinner in ELSP0 */ 2274 if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) 2275 return 0; 2276 2277 if (!intel_has_reset_engine(arg->engine->gt)) 2278 return 0; 2279 2280 GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); 2281 rq = spinner_create_request(&arg->a.spin, 2282 arg->a.ctx, arg->engine, 2283 MI_NOOP); /* preemption disabled */ 2284 if (IS_ERR(rq)) 2285 return PTR_ERR(rq); 2286 2287 clear_bit(CONTEXT_BANNED, &rq->context->flags); 2288 i915_request_get(rq); 2289 i915_request_add(rq); 2290 if (!igt_wait_for_spinner(&arg->a.spin, rq)) { 2291 err = -EIO; 2292 goto out; 2293 } 2294 2295 intel_context_ban(rq->context, rq); 2296 err = intel_engine_pulse(arg->engine); /* force reset */ 2297 if (err) 2298 goto out; 2299 2300 err = wait_for_reset(arg->engine, rq, HZ / 2); 2301 if (err) { 2302 pr_err("Cancelled inflight0 request did not reset\n"); 2303 goto out; 2304 } 2305 2306 out: 2307 i915_request_put(rq); 2308 if (igt_flush_test(arg->engine->i915)) 2309 err = -EIO; 2310 return err; 2311 } 2312 2313 static void force_reset_timeout(struct intel_engine_cs *engine) 2314 { 2315 engine->reset_timeout.probability = 999; 2316 atomic_set(&engine->reset_timeout.times, -1); 2317 } 2318 2319 static void cancel_reset_timeout(struct intel_engine_cs *engine) 2320 { 2321 memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout)); 2322 } 2323 2324 static int __cancel_fail(struct live_preempt_cancel *arg) 2325 { 2326 struct intel_engine_cs *engine = arg->engine; 2327 struct i915_request *rq; 2328 int err; 2329 2330 if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) 2331 return 0; 2332 2333 if (!intel_has_reset_engine(engine->gt)) 2334 return 0; 2335 2336 GEM_TRACE("%s(%s)\n", __func__, engine->name); 2337 rq = spinner_create_request(&arg->a.spin, 2338 arg->a.ctx, engine, 2339 MI_NOOP); /* preemption disabled */ 2340 if (IS_ERR(rq)) 2341 return PTR_ERR(rq); 2342 2343 clear_bit(CONTEXT_BANNED, &rq->context->flags); 2344 i915_request_get(rq); 2345 i915_request_add(rq); 2346 if (!igt_wait_for_spinner(&arg->a.spin, rq)) { 2347 err = -EIO; 2348 goto out; 2349 } 2350 2351 intel_context_set_banned(rq->context); 2352 2353 err = intel_engine_pulse(engine); 2354 if (err) 2355 goto out; 2356 2357 force_reset_timeout(engine); 2358 2359 /* force preempt reset [failure] */ 2360 while (!engine->execlists.pending[0]) 2361 intel_engine_flush_submission(engine); 2362 timer_delete_sync(&engine->execlists.preempt); 2363 intel_engine_flush_submission(engine); 2364 2365 cancel_reset_timeout(engine); 2366 2367 /* after failure, require heartbeats to reset device */ 2368 intel_engine_set_heartbeat(engine, 1); 2369 err = wait_for_reset(engine, rq, HZ / 2); 2370 intel_engine_set_heartbeat(engine, 2371 engine->defaults.heartbeat_interval_ms); 2372 if (err) { 2373 pr_err("Cancelled inflight0 request did not reset\n"); 2374 goto out; 2375 } 2376 2377 out: 2378 i915_request_put(rq); 2379 if (igt_flush_test(engine->i915)) 2380 err = -EIO; 2381 return err; 2382 } 2383 2384 static int live_preempt_cancel(void *arg) 2385 { 2386 struct intel_gt *gt = arg; 2387 struct live_preempt_cancel data; 2388 enum intel_engine_id id; 2389 int err = -ENOMEM; 2390 2391 /* 2392 * To cancel an inflight context, we need to first remove it from the 2393 * GPU. That sounds like preemption! Plus a little bit of bookkeeping. 2394 */ 2395 2396 if (preempt_client_init(gt, &data.a)) 2397 return -ENOMEM; 2398 if (preempt_client_init(gt, &data.b)) 2399 goto err_client_a; 2400 2401 for_each_engine(data.engine, gt, id) { 2402 if (!intel_engine_has_preemption(data.engine)) 2403 continue; 2404 2405 err = __cancel_active0(&data); 2406 if (err) 2407 goto err_wedged; 2408 2409 err = __cancel_active1(&data); 2410 if (err) 2411 goto err_wedged; 2412 2413 err = __cancel_queued(&data); 2414 if (err) 2415 goto err_wedged; 2416 2417 err = __cancel_hostile(&data); 2418 if (err) 2419 goto err_wedged; 2420 2421 err = __cancel_fail(&data); 2422 if (err) 2423 goto err_wedged; 2424 } 2425 2426 err = 0; 2427 err_client_b: 2428 preempt_client_fini(&data.b); 2429 err_client_a: 2430 preempt_client_fini(&data.a); 2431 return err; 2432 2433 err_wedged: 2434 GEM_TRACE_DUMP(); 2435 igt_spinner_end(&data.b.spin); 2436 igt_spinner_end(&data.a.spin); 2437 intel_gt_set_wedged(gt); 2438 goto err_client_b; 2439 } 2440 2441 static int live_suppress_self_preempt(void *arg) 2442 { 2443 struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX }; 2444 struct intel_gt *gt = arg; 2445 struct intel_engine_cs *engine; 2446 struct preempt_client a, b; 2447 enum intel_engine_id id; 2448 int err = -ENOMEM; 2449 2450 /* 2451 * Verify that if a preemption request does not cause a change in 2452 * the current execution order, the preempt-to-idle injection is 2453 * skipped and that we do not accidentally apply it after the CS 2454 * completion event. 2455 */ 2456 2457 if (intel_uc_uses_guc_submission(>->uc)) 2458 return 0; /* presume black blox */ 2459 2460 if (intel_vgpu_active(gt->i915)) 2461 return 0; /* GVT forces single port & request submission */ 2462 2463 if (preempt_client_init(gt, &a)) 2464 return -ENOMEM; 2465 if (preempt_client_init(gt, &b)) 2466 goto err_client_a; 2467 2468 for_each_engine(engine, gt, id) { 2469 struct i915_request *rq_a, *rq_b; 2470 int depth; 2471 2472 if (!intel_engine_has_preemption(engine)) 2473 continue; 2474 2475 if (igt_flush_test(gt->i915)) 2476 goto err_wedged; 2477 2478 st_engine_heartbeat_disable(engine); 2479 engine->execlists.preempt_hang.count = 0; 2480 2481 rq_a = spinner_create_request(&a.spin, 2482 a.ctx, engine, 2483 MI_NOOP); 2484 if (IS_ERR(rq_a)) { 2485 err = PTR_ERR(rq_a); 2486 st_engine_heartbeat_enable(engine); 2487 goto err_client_b; 2488 } 2489 2490 i915_request_add(rq_a); 2491 if (!igt_wait_for_spinner(&a.spin, rq_a)) { 2492 pr_err("First client failed to start\n"); 2493 st_engine_heartbeat_enable(engine); 2494 goto err_wedged; 2495 } 2496 2497 /* Keep postponing the timer to avoid premature slicing */ 2498 mod_timer(&engine->execlists.timer, jiffies + HZ); 2499 for (depth = 0; depth < 8; depth++) { 2500 rq_b = spinner_create_request(&b.spin, 2501 b.ctx, engine, 2502 MI_NOOP); 2503 if (IS_ERR(rq_b)) { 2504 err = PTR_ERR(rq_b); 2505 st_engine_heartbeat_enable(engine); 2506 goto err_client_b; 2507 } 2508 i915_request_add(rq_b); 2509 2510 GEM_BUG_ON(i915_request_completed(rq_a)); 2511 engine->sched_engine->schedule(rq_a, &attr); 2512 igt_spinner_end(&a.spin); 2513 2514 if (!igt_wait_for_spinner(&b.spin, rq_b)) { 2515 pr_err("Second client failed to start\n"); 2516 st_engine_heartbeat_enable(engine); 2517 goto err_wedged; 2518 } 2519 2520 swap(a, b); 2521 rq_a = rq_b; 2522 } 2523 igt_spinner_end(&a.spin); 2524 2525 if (engine->execlists.preempt_hang.count) { 2526 pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n", 2527 engine->name, 2528 engine->execlists.preempt_hang.count, 2529 depth); 2530 st_engine_heartbeat_enable(engine); 2531 err = -EINVAL; 2532 goto err_client_b; 2533 } 2534 2535 st_engine_heartbeat_enable(engine); 2536 if (igt_flush_test(gt->i915)) 2537 goto err_wedged; 2538 } 2539 2540 err = 0; 2541 err_client_b: 2542 preempt_client_fini(&b); 2543 err_client_a: 2544 preempt_client_fini(&a); 2545 return err; 2546 2547 err_wedged: 2548 igt_spinner_end(&b.spin); 2549 igt_spinner_end(&a.spin); 2550 intel_gt_set_wedged(gt); 2551 err = -EIO; 2552 goto err_client_b; 2553 } 2554 2555 static int live_chain_preempt(void *arg) 2556 { 2557 struct intel_gt *gt = arg; 2558 struct intel_engine_cs *engine; 2559 struct preempt_client hi, lo; 2560 enum intel_engine_id id; 2561 int err = -ENOMEM; 2562 2563 /* 2564 * Build a chain AB...BA between two contexts (A, B) and request 2565 * preemption of the last request. It should then complete before 2566 * the previously submitted spinner in B. 2567 */ 2568 2569 if (preempt_client_init(gt, &hi)) 2570 return -ENOMEM; 2571 2572 if (preempt_client_init(gt, &lo)) 2573 goto err_client_hi; 2574 2575 for_each_engine(engine, gt, id) { 2576 struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX }; 2577 struct igt_live_test t; 2578 struct i915_request *rq; 2579 int ring_size, count, i; 2580 2581 if (!intel_engine_has_preemption(engine)) 2582 continue; 2583 2584 rq = spinner_create_request(&lo.spin, 2585 lo.ctx, engine, 2586 MI_ARB_CHECK); 2587 if (IS_ERR(rq)) 2588 goto err_wedged; 2589 2590 i915_request_get(rq); 2591 i915_request_add(rq); 2592 2593 ring_size = rq->wa_tail - rq->head; 2594 if (ring_size < 0) 2595 ring_size += rq->ring->size; 2596 ring_size = rq->ring->size / ring_size; 2597 pr_debug("%s(%s): Using maximum of %d requests\n", 2598 __func__, engine->name, ring_size); 2599 2600 igt_spinner_end(&lo.spin); 2601 if (i915_request_wait(rq, 0, HZ / 2) < 0) { 2602 pr_err("Timed out waiting to flush %s\n", engine->name); 2603 i915_request_put(rq); 2604 goto err_wedged; 2605 } 2606 i915_request_put(rq); 2607 2608 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { 2609 err = -EIO; 2610 goto err_wedged; 2611 } 2612 2613 for_each_prime_number_from(count, 1, ring_size) { 2614 rq = spinner_create_request(&hi.spin, 2615 hi.ctx, engine, 2616 MI_ARB_CHECK); 2617 if (IS_ERR(rq)) 2618 goto err_wedged; 2619 i915_request_add(rq); 2620 if (!igt_wait_for_spinner(&hi.spin, rq)) 2621 goto err_wedged; 2622 2623 rq = spinner_create_request(&lo.spin, 2624 lo.ctx, engine, 2625 MI_ARB_CHECK); 2626 if (IS_ERR(rq)) 2627 goto err_wedged; 2628 i915_request_add(rq); 2629 2630 for (i = 0; i < count; i++) { 2631 rq = igt_request_alloc(lo.ctx, engine); 2632 if (IS_ERR(rq)) 2633 goto err_wedged; 2634 i915_request_add(rq); 2635 } 2636 2637 rq = igt_request_alloc(hi.ctx, engine); 2638 if (IS_ERR(rq)) 2639 goto err_wedged; 2640 2641 i915_request_get(rq); 2642 i915_request_add(rq); 2643 engine->sched_engine->schedule(rq, &attr); 2644 2645 igt_spinner_end(&hi.spin); 2646 if (i915_request_wait(rq, 0, HZ / 5) < 0) { 2647 struct drm_printer p = 2648 drm_info_printer(gt->i915->drm.dev); 2649 2650 pr_err("Failed to preempt over chain of %d\n", 2651 count); 2652 intel_engine_dump(engine, &p, 2653 "%s\n", engine->name); 2654 i915_request_put(rq); 2655 goto err_wedged; 2656 } 2657 igt_spinner_end(&lo.spin); 2658 i915_request_put(rq); 2659 2660 rq = igt_request_alloc(lo.ctx, engine); 2661 if (IS_ERR(rq)) 2662 goto err_wedged; 2663 2664 i915_request_get(rq); 2665 i915_request_add(rq); 2666 2667 if (i915_request_wait(rq, 0, HZ / 5) < 0) { 2668 struct drm_printer p = 2669 drm_info_printer(gt->i915->drm.dev); 2670 2671 pr_err("Failed to flush low priority chain of %d requests\n", 2672 count); 2673 intel_engine_dump(engine, &p, 2674 "%s\n", engine->name); 2675 2676 i915_request_put(rq); 2677 goto err_wedged; 2678 } 2679 i915_request_put(rq); 2680 } 2681 2682 if (igt_live_test_end(&t)) { 2683 err = -EIO; 2684 goto err_wedged; 2685 } 2686 } 2687 2688 err = 0; 2689 err_client_lo: 2690 preempt_client_fini(&lo); 2691 err_client_hi: 2692 preempt_client_fini(&hi); 2693 return err; 2694 2695 err_wedged: 2696 igt_spinner_end(&hi.spin); 2697 igt_spinner_end(&lo.spin); 2698 intel_gt_set_wedged(gt); 2699 err = -EIO; 2700 goto err_client_lo; 2701 } 2702 2703 static int create_gang(struct intel_engine_cs *engine, 2704 struct i915_request **prev) 2705 { 2706 struct drm_i915_gem_object *obj; 2707 struct intel_context *ce; 2708 struct i915_request *rq; 2709 struct i915_vma *vma; 2710 u32 *cs; 2711 int err; 2712 2713 ce = intel_context_create(engine); 2714 if (IS_ERR(ce)) 2715 return PTR_ERR(ce); 2716 2717 obj = i915_gem_object_create_internal(engine->i915, 4096); 2718 if (IS_ERR(obj)) { 2719 err = PTR_ERR(obj); 2720 goto err_ce; 2721 } 2722 2723 vma = i915_vma_instance(obj, ce->vm, NULL); 2724 if (IS_ERR(vma)) { 2725 err = PTR_ERR(vma); 2726 goto err_obj; 2727 } 2728 2729 err = i915_vma_pin(vma, 0, 0, PIN_USER); 2730 if (err) 2731 goto err_obj; 2732 2733 cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 2734 if (IS_ERR(cs)) { 2735 err = PTR_ERR(cs); 2736 goto err_obj; 2737 } 2738 2739 /* Semaphore target: spin until zero */ 2740 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 2741 2742 *cs++ = MI_SEMAPHORE_WAIT | 2743 MI_SEMAPHORE_POLL | 2744 MI_SEMAPHORE_SAD_EQ_SDD; 2745 *cs++ = 0; 2746 *cs++ = lower_32_bits(i915_vma_offset(vma)); 2747 *cs++ = upper_32_bits(i915_vma_offset(vma)); 2748 2749 if (*prev) { 2750 u64 offset = i915_vma_offset((*prev)->batch); 2751 2752 /* Terminate the spinner in the next lower priority batch. */ 2753 *cs++ = MI_STORE_DWORD_IMM_GEN4; 2754 *cs++ = lower_32_bits(offset); 2755 *cs++ = upper_32_bits(offset); 2756 *cs++ = 0; 2757 } 2758 2759 *cs++ = MI_BATCH_BUFFER_END; 2760 i915_gem_object_flush_map(obj); 2761 i915_gem_object_unpin_map(obj); 2762 2763 rq = intel_context_create_request(ce); 2764 if (IS_ERR(rq)) { 2765 err = PTR_ERR(rq); 2766 goto err_obj; 2767 } 2768 2769 rq->batch = i915_vma_get(vma); 2770 i915_request_get(rq); 2771 2772 err = igt_vma_move_to_active_unlocked(vma, rq, 0); 2773 if (!err) 2774 err = rq->engine->emit_bb_start(rq, 2775 i915_vma_offset(vma), 2776 PAGE_SIZE, 0); 2777 i915_request_add(rq); 2778 if (err) 2779 goto err_rq; 2780 2781 i915_gem_object_put(obj); 2782 intel_context_put(ce); 2783 2784 rq->mock.link.next = &(*prev)->mock.link; 2785 *prev = rq; 2786 return 0; 2787 2788 err_rq: 2789 i915_vma_put(rq->batch); 2790 i915_request_put(rq); 2791 err_obj: 2792 i915_gem_object_put(obj); 2793 err_ce: 2794 intel_context_put(ce); 2795 return err; 2796 } 2797 2798 static int __live_preempt_ring(struct intel_engine_cs *engine, 2799 struct igt_spinner *spin, 2800 int queue_sz, int ring_sz) 2801 { 2802 struct intel_context *ce[2] = {}; 2803 struct i915_request *rq; 2804 struct igt_live_test t; 2805 int err = 0; 2806 int n; 2807 2808 if (igt_live_test_begin(&t, engine->i915, __func__, engine->name)) 2809 return -EIO; 2810 2811 for (n = 0; n < ARRAY_SIZE(ce); n++) { 2812 struct intel_context *tmp; 2813 2814 tmp = intel_context_create(engine); 2815 if (IS_ERR(tmp)) { 2816 err = PTR_ERR(tmp); 2817 goto err_ce; 2818 } 2819 2820 tmp->ring_size = ring_sz; 2821 2822 err = intel_context_pin(tmp); 2823 if (err) { 2824 intel_context_put(tmp); 2825 goto err_ce; 2826 } 2827 2828 memset32(tmp->ring->vaddr, 2829 0xdeadbeef, /* trigger a hang if executed */ 2830 tmp->ring->vma->size / sizeof(u32)); 2831 2832 ce[n] = tmp; 2833 } 2834 2835 rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK); 2836 if (IS_ERR(rq)) { 2837 err = PTR_ERR(rq); 2838 goto err_ce; 2839 } 2840 2841 i915_request_get(rq); 2842 rq->sched.attr.priority = I915_PRIORITY_BARRIER; 2843 i915_request_add(rq); 2844 2845 if (!igt_wait_for_spinner(spin, rq)) { 2846 intel_gt_set_wedged(engine->gt); 2847 i915_request_put(rq); 2848 err = -ETIME; 2849 goto err_ce; 2850 } 2851 2852 /* Fill the ring, until we will cause a wrap */ 2853 n = 0; 2854 while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) { 2855 struct i915_request *tmp; 2856 2857 tmp = intel_context_create_request(ce[0]); 2858 if (IS_ERR(tmp)) { 2859 err = PTR_ERR(tmp); 2860 i915_request_put(rq); 2861 goto err_ce; 2862 } 2863 2864 i915_request_add(tmp); 2865 intel_engine_flush_submission(engine); 2866 n++; 2867 } 2868 intel_engine_flush_submission(engine); 2869 pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n", 2870 engine->name, queue_sz, n, 2871 ce[0]->ring->size, 2872 ce[0]->ring->tail, 2873 ce[0]->ring->emit, 2874 rq->tail); 2875 i915_request_put(rq); 2876 2877 /* Create a second request to preempt the first ring */ 2878 rq = intel_context_create_request(ce[1]); 2879 if (IS_ERR(rq)) { 2880 err = PTR_ERR(rq); 2881 goto err_ce; 2882 } 2883 2884 rq->sched.attr.priority = I915_PRIORITY_BARRIER; 2885 i915_request_get(rq); 2886 i915_request_add(rq); 2887 2888 err = wait_for_submit(engine, rq, HZ / 2); 2889 i915_request_put(rq); 2890 if (err) { 2891 pr_err("%s: preemption request was not submitted\n", 2892 engine->name); 2893 err = -ETIME; 2894 } 2895 2896 pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n", 2897 engine->name, 2898 ce[0]->ring->tail, ce[0]->ring->emit, 2899 ce[1]->ring->tail, ce[1]->ring->emit); 2900 2901 err_ce: 2902 intel_engine_flush_submission(engine); 2903 igt_spinner_end(spin); 2904 for (n = 0; n < ARRAY_SIZE(ce); n++) { 2905 if (IS_ERR_OR_NULL(ce[n])) 2906 break; 2907 2908 intel_context_unpin(ce[n]); 2909 intel_context_put(ce[n]); 2910 } 2911 if (igt_live_test_end(&t)) 2912 err = -EIO; 2913 return err; 2914 } 2915 2916 static int live_preempt_ring(void *arg) 2917 { 2918 struct intel_gt *gt = arg; 2919 struct intel_engine_cs *engine; 2920 struct igt_spinner spin; 2921 enum intel_engine_id id; 2922 int err = 0; 2923 2924 /* 2925 * Check that we rollback large chunks of a ring in order to do a 2926 * preemption event. Similar to live_unlite_ring, but looking at 2927 * ring size rather than the impact of intel_ring_direction(). 2928 */ 2929 2930 if (igt_spinner_init(&spin, gt)) 2931 return -ENOMEM; 2932 2933 for_each_engine(engine, gt, id) { 2934 int n; 2935 2936 if (!intel_engine_has_preemption(engine)) 2937 continue; 2938 2939 if (!intel_engine_can_store_dword(engine)) 2940 continue; 2941 2942 st_engine_heartbeat_disable(engine); 2943 2944 for (n = 0; n <= 3; n++) { 2945 err = __live_preempt_ring(engine, &spin, 2946 n * SZ_4K / 4, SZ_4K); 2947 if (err) 2948 break; 2949 } 2950 2951 st_engine_heartbeat_enable(engine); 2952 if (err) 2953 break; 2954 } 2955 2956 igt_spinner_fini(&spin); 2957 return err; 2958 } 2959 2960 static int live_preempt_gang(void *arg) 2961 { 2962 struct intel_gt *gt = arg; 2963 struct intel_engine_cs *engine; 2964 enum intel_engine_id id; 2965 2966 /* 2967 * Build as long a chain of preempters as we can, with each 2968 * request higher priority than the last. Once we are ready, we release 2969 * the last batch which then precolates down the chain, each releasing 2970 * the next oldest in turn. The intent is to simply push as hard as we 2971 * can with the number of preemptions, trying to exceed narrow HW 2972 * limits. At a minimum, we insist that we can sort all the user 2973 * high priority levels into execution order. 2974 */ 2975 2976 for_each_engine(engine, gt, id) { 2977 struct i915_request *rq = NULL; 2978 struct igt_live_test t; 2979 IGT_TIMEOUT(end_time); 2980 int prio = 0; 2981 int err = 0; 2982 u32 *cs; 2983 2984 if (!intel_engine_has_preemption(engine)) 2985 continue; 2986 2987 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) 2988 return -EIO; 2989 2990 do { 2991 struct i915_sched_attr attr = { .priority = prio++ }; 2992 2993 err = create_gang(engine, &rq); 2994 if (err) 2995 break; 2996 2997 /* Submit each spinner at increasing priority */ 2998 engine->sched_engine->schedule(rq, &attr); 2999 } while (prio <= I915_PRIORITY_MAX && 3000 !__igt_timeout(end_time, NULL)); 3001 pr_debug("%s: Preempt chain of %d requests\n", 3002 engine->name, prio); 3003 3004 /* 3005 * Such that the last spinner is the highest priority and 3006 * should execute first. When that spinner completes, 3007 * it will terminate the next lowest spinner until there 3008 * are no more spinners and the gang is complete. 3009 */ 3010 cs = i915_gem_object_pin_map_unlocked(rq->batch->obj, I915_MAP_WC); 3011 if (!IS_ERR(cs)) { 3012 *cs = 0; 3013 i915_gem_object_unpin_map(rq->batch->obj); 3014 } else { 3015 err = PTR_ERR(cs); 3016 intel_gt_set_wedged(gt); 3017 } 3018 3019 while (rq) { /* wait for each rq from highest to lowest prio */ 3020 struct i915_request *n = list_next_entry(rq, mock.link); 3021 3022 if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) { 3023 struct drm_printer p = 3024 drm_info_printer(engine->i915->drm.dev); 3025 3026 pr_err("Failed to flush chain of %d requests, at %d\n", 3027 prio, rq_prio(rq)); 3028 intel_engine_dump(engine, &p, 3029 "%s\n", engine->name); 3030 3031 err = -ETIME; 3032 } 3033 3034 i915_vma_put(rq->batch); 3035 i915_request_put(rq); 3036 rq = n; 3037 } 3038 3039 if (igt_live_test_end(&t)) 3040 err = -EIO; 3041 if (err) 3042 return err; 3043 } 3044 3045 return 0; 3046 } 3047 3048 static struct i915_vma * 3049 create_gpr_user(struct intel_engine_cs *engine, 3050 struct i915_vma *result, 3051 unsigned int offset) 3052 { 3053 struct drm_i915_gem_object *obj; 3054 struct i915_vma *vma; 3055 u32 *cs; 3056 int err; 3057 int i; 3058 3059 obj = i915_gem_object_create_internal(engine->i915, 4096); 3060 if (IS_ERR(obj)) 3061 return ERR_CAST(obj); 3062 3063 vma = i915_vma_instance(obj, result->vm, NULL); 3064 if (IS_ERR(vma)) { 3065 i915_gem_object_put(obj); 3066 return vma; 3067 } 3068 3069 err = i915_vma_pin(vma, 0, 0, PIN_USER); 3070 if (err) { 3071 i915_vma_put(vma); 3072 return ERR_PTR(err); 3073 } 3074 3075 cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 3076 if (IS_ERR(cs)) { 3077 i915_vma_put(vma); 3078 return ERR_CAST(cs); 3079 } 3080 3081 /* All GPR are clear for new contexts. We use GPR(0) as a constant */ 3082 *cs++ = MI_LOAD_REGISTER_IMM(1); 3083 *cs++ = CS_GPR(engine, 0); 3084 *cs++ = 1; 3085 3086 for (i = 1; i < NUM_GPR; i++) { 3087 u64 addr; 3088 3089 /* 3090 * Perform: GPR[i]++ 3091 * 3092 * As we read and write into the context saved GPR[i], if 3093 * we restart this batch buffer from an earlier point, we 3094 * will repeat the increment and store a value > 1. 3095 */ 3096 *cs++ = MI_MATH(4); 3097 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i)); 3098 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0)); 3099 *cs++ = MI_MATH_ADD; 3100 *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU); 3101 3102 addr = i915_vma_offset(result) + offset + i * sizeof(*cs); 3103 *cs++ = MI_STORE_REGISTER_MEM_GEN8; 3104 *cs++ = CS_GPR(engine, 2 * i); 3105 *cs++ = lower_32_bits(addr); 3106 *cs++ = upper_32_bits(addr); 3107 3108 *cs++ = MI_SEMAPHORE_WAIT | 3109 MI_SEMAPHORE_POLL | 3110 MI_SEMAPHORE_SAD_GTE_SDD; 3111 *cs++ = i; 3112 *cs++ = lower_32_bits(i915_vma_offset(result)); 3113 *cs++ = upper_32_bits(i915_vma_offset(result)); 3114 } 3115 3116 *cs++ = MI_BATCH_BUFFER_END; 3117 i915_gem_object_flush_map(obj); 3118 i915_gem_object_unpin_map(obj); 3119 3120 return vma; 3121 } 3122 3123 static struct i915_vma *create_global(struct intel_gt *gt, size_t sz) 3124 { 3125 struct drm_i915_gem_object *obj; 3126 struct i915_vma *vma; 3127 int err; 3128 3129 obj = i915_gem_object_create_internal(gt->i915, sz); 3130 if (IS_ERR(obj)) 3131 return ERR_CAST(obj); 3132 3133 vma = i915_vma_instance(obj, >->ggtt->vm, NULL); 3134 if (IS_ERR(vma)) { 3135 i915_gem_object_put(obj); 3136 return vma; 3137 } 3138 3139 err = i915_ggtt_pin(vma, NULL, 0, 0); 3140 if (err) { 3141 i915_vma_put(vma); 3142 return ERR_PTR(err); 3143 } 3144 3145 return vma; 3146 } 3147 3148 static struct i915_request * 3149 create_gpr_client(struct intel_engine_cs *engine, 3150 struct i915_vma *global, 3151 unsigned int offset) 3152 { 3153 struct i915_vma *batch, *vma; 3154 struct intel_context *ce; 3155 struct i915_request *rq; 3156 int err; 3157 3158 ce = intel_context_create(engine); 3159 if (IS_ERR(ce)) 3160 return ERR_CAST(ce); 3161 3162 vma = i915_vma_instance(global->obj, ce->vm, NULL); 3163 if (IS_ERR(vma)) { 3164 err = PTR_ERR(vma); 3165 goto out_ce; 3166 } 3167 3168 err = i915_vma_pin(vma, 0, 0, PIN_USER); 3169 if (err) 3170 goto out_ce; 3171 3172 batch = create_gpr_user(engine, vma, offset); 3173 if (IS_ERR(batch)) { 3174 err = PTR_ERR(batch); 3175 goto out_vma; 3176 } 3177 3178 rq = intel_context_create_request(ce); 3179 if (IS_ERR(rq)) { 3180 err = PTR_ERR(rq); 3181 goto out_batch; 3182 } 3183 3184 err = igt_vma_move_to_active_unlocked(vma, rq, 0); 3185 3186 i915_vma_lock(batch); 3187 if (!err) 3188 err = i915_vma_move_to_active(batch, rq, 0); 3189 if (!err) 3190 err = rq->engine->emit_bb_start(rq, 3191 i915_vma_offset(batch), 3192 PAGE_SIZE, 0); 3193 i915_vma_unlock(batch); 3194 i915_vma_unpin(batch); 3195 3196 if (!err) 3197 i915_request_get(rq); 3198 i915_request_add(rq); 3199 3200 out_batch: 3201 i915_vma_put(batch); 3202 out_vma: 3203 i915_vma_unpin(vma); 3204 out_ce: 3205 intel_context_put(ce); 3206 return err ? ERR_PTR(err) : rq; 3207 } 3208 3209 static int preempt_user(struct intel_engine_cs *engine, 3210 struct i915_vma *global, 3211 int id) 3212 { 3213 struct i915_sched_attr attr = { 3214 .priority = I915_PRIORITY_MAX 3215 }; 3216 struct i915_request *rq; 3217 int err = 0; 3218 u32 *cs; 3219 3220 rq = intel_engine_create_kernel_request(engine); 3221 if (IS_ERR(rq)) 3222 return PTR_ERR(rq); 3223 3224 cs = intel_ring_begin(rq, 4); 3225 if (IS_ERR(cs)) { 3226 i915_request_add(rq); 3227 return PTR_ERR(cs); 3228 } 3229 3230 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 3231 *cs++ = i915_ggtt_offset(global); 3232 *cs++ = 0; 3233 *cs++ = id; 3234 3235 intel_ring_advance(rq, cs); 3236 3237 i915_request_get(rq); 3238 i915_request_add(rq); 3239 3240 engine->sched_engine->schedule(rq, &attr); 3241 3242 if (i915_request_wait(rq, 0, HZ / 2) < 0) 3243 err = -ETIME; 3244 i915_request_put(rq); 3245 3246 return err; 3247 } 3248 3249 static int live_preempt_user(void *arg) 3250 { 3251 struct intel_gt *gt = arg; 3252 struct intel_engine_cs *engine; 3253 struct i915_vma *global; 3254 enum intel_engine_id id; 3255 u32 *result; 3256 int err = 0; 3257 3258 /* 3259 * In our other tests, we look at preemption in carefully 3260 * controlled conditions in the ringbuffer. Since most of the 3261 * time is spent in user batches, most of our preemptions naturally 3262 * occur there. We want to verify that when we preempt inside a batch 3263 * we continue on from the current instruction and do not roll back 3264 * to the start, or another earlier arbitration point. 3265 * 3266 * To verify this, we create a batch which is a mixture of 3267 * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with 3268 * a few preempting contexts thrown into the mix, we look for any 3269 * repeated instructions (which show up as incorrect values). 3270 */ 3271 3272 global = create_global(gt, 4096); 3273 if (IS_ERR(global)) 3274 return PTR_ERR(global); 3275 3276 result = i915_gem_object_pin_map_unlocked(global->obj, I915_MAP_WC); 3277 if (IS_ERR(result)) { 3278 i915_vma_unpin_and_release(&global, 0); 3279 return PTR_ERR(result); 3280 } 3281 3282 for_each_engine(engine, gt, id) { 3283 struct i915_request *client[3] = {}; 3284 struct igt_live_test t; 3285 int i; 3286 3287 if (!intel_engine_has_preemption(engine)) 3288 continue; 3289 3290 if (GRAPHICS_VER(gt->i915) == 8 && engine->class != RENDER_CLASS) 3291 continue; /* we need per-context GPR */ 3292 3293 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { 3294 err = -EIO; 3295 break; 3296 } 3297 3298 memset(result, 0, 4096); 3299 3300 for (i = 0; i < ARRAY_SIZE(client); i++) { 3301 struct i915_request *rq; 3302 3303 rq = create_gpr_client(engine, global, 3304 NUM_GPR * i * sizeof(u32)); 3305 if (IS_ERR(rq)) { 3306 err = PTR_ERR(rq); 3307 goto end_test; 3308 } 3309 3310 client[i] = rq; 3311 } 3312 3313 /* Continuously preempt the set of 3 running contexts */ 3314 for (i = 1; i <= NUM_GPR; i++) { 3315 err = preempt_user(engine, global, i); 3316 if (err) 3317 goto end_test; 3318 } 3319 3320 if (READ_ONCE(result[0]) != NUM_GPR) { 3321 pr_err("%s: Failed to release semaphore\n", 3322 engine->name); 3323 err = -EIO; 3324 goto end_test; 3325 } 3326 3327 for (i = 0; i < ARRAY_SIZE(client); i++) { 3328 int gpr; 3329 3330 if (i915_request_wait(client[i], 0, HZ / 2) < 0) { 3331 err = -ETIME; 3332 goto end_test; 3333 } 3334 3335 for (gpr = 1; gpr < NUM_GPR; gpr++) { 3336 if (result[NUM_GPR * i + gpr] != 1) { 3337 pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n", 3338 engine->name, 3339 i, gpr, result[NUM_GPR * i + gpr]); 3340 err = -EINVAL; 3341 goto end_test; 3342 } 3343 } 3344 } 3345 3346 end_test: 3347 for (i = 0; i < ARRAY_SIZE(client); i++) { 3348 if (!client[i]) 3349 break; 3350 3351 i915_request_put(client[i]); 3352 } 3353 3354 /* Flush the semaphores on error */ 3355 smp_store_mb(result[0], -1); 3356 if (igt_live_test_end(&t)) 3357 err = -EIO; 3358 if (err) 3359 break; 3360 } 3361 3362 i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP); 3363 return err; 3364 } 3365 3366 static int live_preempt_timeout(void *arg) 3367 { 3368 struct intel_gt *gt = arg; 3369 struct i915_gem_context *ctx_hi, *ctx_lo; 3370 struct igt_spinner spin_lo; 3371 struct intel_engine_cs *engine; 3372 enum intel_engine_id id; 3373 int err = -ENOMEM; 3374 3375 /* 3376 * Check that we force preemption to occur by cancelling the previous 3377 * context if it refuses to yield the GPU. 3378 */ 3379 if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) 3380 return 0; 3381 3382 if (!intel_has_reset_engine(gt)) 3383 return 0; 3384 3385 ctx_hi = kernel_context(gt->i915, NULL); 3386 if (!ctx_hi) 3387 return -ENOMEM; 3388 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; 3389 3390 ctx_lo = kernel_context(gt->i915, NULL); 3391 if (!ctx_lo) 3392 goto err_ctx_hi; 3393 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; 3394 3395 if (igt_spinner_init(&spin_lo, gt)) 3396 goto err_ctx_lo; 3397 3398 for_each_engine(engine, gt, id) { 3399 unsigned long saved_timeout; 3400 struct i915_request *rq; 3401 3402 if (!intel_engine_has_preemption(engine)) 3403 continue; 3404 3405 rq = spinner_create_request(&spin_lo, ctx_lo, engine, 3406 MI_NOOP); /* preemption disabled */ 3407 if (IS_ERR(rq)) { 3408 err = PTR_ERR(rq); 3409 goto err_spin_lo; 3410 } 3411 3412 i915_request_add(rq); 3413 if (!igt_wait_for_spinner(&spin_lo, rq)) { 3414 intel_gt_set_wedged(gt); 3415 err = -EIO; 3416 goto err_spin_lo; 3417 } 3418 3419 rq = igt_request_alloc(ctx_hi, engine); 3420 if (IS_ERR(rq)) { 3421 igt_spinner_end(&spin_lo); 3422 err = PTR_ERR(rq); 3423 goto err_spin_lo; 3424 } 3425 3426 /* Flush the previous CS ack before changing timeouts */ 3427 while (READ_ONCE(engine->execlists.pending[0])) 3428 cpu_relax(); 3429 3430 saved_timeout = engine->props.preempt_timeout_ms; 3431 engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffy */ 3432 3433 i915_request_get(rq); 3434 i915_request_add(rq); 3435 3436 intel_engine_flush_submission(engine); 3437 engine->props.preempt_timeout_ms = saved_timeout; 3438 3439 if (i915_request_wait(rq, 0, HZ / 10) < 0) { 3440 intel_gt_set_wedged(gt); 3441 i915_request_put(rq); 3442 err = -ETIME; 3443 goto err_spin_lo; 3444 } 3445 3446 igt_spinner_end(&spin_lo); 3447 i915_request_put(rq); 3448 } 3449 3450 err = 0; 3451 err_spin_lo: 3452 igt_spinner_fini(&spin_lo); 3453 err_ctx_lo: 3454 kernel_context_close(ctx_lo); 3455 err_ctx_hi: 3456 kernel_context_close(ctx_hi); 3457 return err; 3458 } 3459 3460 static int random_range(struct rnd_state *rnd, int min, int max) 3461 { 3462 return i915_prandom_u32_max_state(max - min, rnd) + min; 3463 } 3464 3465 static int random_priority(struct rnd_state *rnd) 3466 { 3467 return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX); 3468 } 3469 3470 struct preempt_smoke { 3471 struct intel_gt *gt; 3472 struct kthread_work work; 3473 struct i915_gem_context **contexts; 3474 struct intel_engine_cs *engine; 3475 struct drm_i915_gem_object *batch; 3476 unsigned int ncontext; 3477 struct rnd_state prng; 3478 unsigned long count; 3479 int result; 3480 }; 3481 3482 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke) 3483 { 3484 return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext, 3485 &smoke->prng)]; 3486 } 3487 3488 static int smoke_submit(struct preempt_smoke *smoke, 3489 struct i915_gem_context *ctx, int prio, 3490 struct drm_i915_gem_object *batch) 3491 { 3492 struct i915_request *rq; 3493 struct i915_vma *vma = NULL; 3494 int err = 0; 3495 3496 if (batch) { 3497 struct i915_address_space *vm; 3498 3499 vm = i915_gem_context_get_eb_vm(ctx); 3500 vma = i915_vma_instance(batch, vm, NULL); 3501 i915_vm_put(vm); 3502 if (IS_ERR(vma)) 3503 return PTR_ERR(vma); 3504 3505 err = i915_vma_pin(vma, 0, 0, PIN_USER); 3506 if (err) 3507 return err; 3508 } 3509 3510 ctx->sched.priority = prio; 3511 3512 rq = igt_request_alloc(ctx, smoke->engine); 3513 if (IS_ERR(rq)) { 3514 err = PTR_ERR(rq); 3515 goto unpin; 3516 } 3517 3518 if (vma) { 3519 err = igt_vma_move_to_active_unlocked(vma, rq, 0); 3520 if (!err) 3521 err = rq->engine->emit_bb_start(rq, 3522 i915_vma_offset(vma), 3523 PAGE_SIZE, 0); 3524 } 3525 3526 i915_request_add(rq); 3527 3528 unpin: 3529 if (vma) 3530 i915_vma_unpin(vma); 3531 3532 return err; 3533 } 3534 3535 static void smoke_crescendo_work(struct kthread_work *work) 3536 { 3537 struct preempt_smoke *smoke = container_of(work, typeof(*smoke), work); 3538 IGT_TIMEOUT(end_time); 3539 unsigned long count; 3540 3541 count = 0; 3542 do { 3543 struct i915_gem_context *ctx = smoke_context(smoke); 3544 3545 smoke->result = smoke_submit(smoke, ctx, 3546 count % I915_PRIORITY_MAX, 3547 smoke->batch); 3548 3549 count++; 3550 } while (!smoke->result && count < smoke->ncontext && 3551 !__igt_timeout(end_time, NULL)); 3552 3553 smoke->count = count; 3554 } 3555 3556 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) 3557 #define BATCH BIT(0) 3558 { 3559 struct kthread_worker *worker[I915_NUM_ENGINES] = {}; 3560 struct preempt_smoke *arg; 3561 struct intel_engine_cs *engine; 3562 enum intel_engine_id id; 3563 unsigned long count; 3564 int err = 0; 3565 3566 arg = kmalloc_array(I915_NUM_ENGINES, sizeof(*arg), GFP_KERNEL); 3567 if (!arg) 3568 return -ENOMEM; 3569 3570 memset(arg, 0, I915_NUM_ENGINES * sizeof(*arg)); 3571 3572 for_each_engine(engine, smoke->gt, id) { 3573 arg[id] = *smoke; 3574 arg[id].engine = engine; 3575 if (!(flags & BATCH)) 3576 arg[id].batch = NULL; 3577 arg[id].count = 0; 3578 3579 worker[id] = kthread_run_worker(0, "igt/smoke:%d", id); 3580 if (IS_ERR(worker[id])) { 3581 err = PTR_ERR(worker[id]); 3582 break; 3583 } 3584 3585 kthread_init_work(&arg[id].work, smoke_crescendo_work); 3586 kthread_queue_work(worker[id], &arg[id].work); 3587 } 3588 3589 count = 0; 3590 for_each_engine(engine, smoke->gt, id) { 3591 if (IS_ERR_OR_NULL(worker[id])) 3592 continue; 3593 3594 kthread_flush_work(&arg[id].work); 3595 if (arg[id].result && !err) 3596 err = arg[id].result; 3597 3598 count += arg[id].count; 3599 3600 kthread_destroy_worker(worker[id]); 3601 } 3602 3603 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", 3604 count, flags, smoke->gt->info.num_engines, smoke->ncontext); 3605 3606 kfree(arg); 3607 return 0; 3608 } 3609 3610 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) 3611 { 3612 enum intel_engine_id id; 3613 IGT_TIMEOUT(end_time); 3614 unsigned long count; 3615 3616 count = 0; 3617 do { 3618 for_each_engine(smoke->engine, smoke->gt, id) { 3619 struct i915_gem_context *ctx = smoke_context(smoke); 3620 int err; 3621 3622 err = smoke_submit(smoke, 3623 ctx, random_priority(&smoke->prng), 3624 flags & BATCH ? smoke->batch : NULL); 3625 if (err) 3626 return err; 3627 3628 count++; 3629 } 3630 } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL)); 3631 3632 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", 3633 count, flags, smoke->gt->info.num_engines, smoke->ncontext); 3634 return 0; 3635 } 3636 3637 static int live_preempt_smoke(void *arg) 3638 { 3639 struct preempt_smoke smoke = { 3640 .gt = arg, 3641 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed), 3642 .ncontext = 256, 3643 }; 3644 const unsigned int phase[] = { 0, BATCH }; 3645 struct igt_live_test t; 3646 int err = -ENOMEM; 3647 u32 *cs; 3648 int n; 3649 3650 smoke.contexts = kmalloc_array(smoke.ncontext, 3651 sizeof(*smoke.contexts), 3652 GFP_KERNEL); 3653 if (!smoke.contexts) 3654 return -ENOMEM; 3655 3656 smoke.batch = 3657 i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE); 3658 if (IS_ERR(smoke.batch)) { 3659 err = PTR_ERR(smoke.batch); 3660 goto err_free; 3661 } 3662 3663 cs = i915_gem_object_pin_map_unlocked(smoke.batch, I915_MAP_WB); 3664 if (IS_ERR(cs)) { 3665 err = PTR_ERR(cs); 3666 goto err_batch; 3667 } 3668 for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) 3669 cs[n] = MI_ARB_CHECK; 3670 cs[n] = MI_BATCH_BUFFER_END; 3671 i915_gem_object_flush_map(smoke.batch); 3672 i915_gem_object_unpin_map(smoke.batch); 3673 3674 if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) { 3675 err = -EIO; 3676 goto err_batch; 3677 } 3678 3679 for (n = 0; n < smoke.ncontext; n++) { 3680 smoke.contexts[n] = kernel_context(smoke.gt->i915, NULL); 3681 if (!smoke.contexts[n]) 3682 goto err_ctx; 3683 } 3684 3685 for (n = 0; n < ARRAY_SIZE(phase); n++) { 3686 err = smoke_crescendo(&smoke, phase[n]); 3687 if (err) 3688 goto err_ctx; 3689 3690 err = smoke_random(&smoke, phase[n]); 3691 if (err) 3692 goto err_ctx; 3693 } 3694 3695 err_ctx: 3696 if (igt_live_test_end(&t)) 3697 err = -EIO; 3698 3699 for (n = 0; n < smoke.ncontext; n++) { 3700 if (!smoke.contexts[n]) 3701 break; 3702 kernel_context_close(smoke.contexts[n]); 3703 } 3704 3705 err_batch: 3706 i915_gem_object_put(smoke.batch); 3707 err_free: 3708 kfree(smoke.contexts); 3709 3710 return err; 3711 } 3712 3713 static int nop_virtual_engine(struct intel_gt *gt, 3714 struct intel_engine_cs **siblings, 3715 unsigned int nsibling, 3716 unsigned int nctx, 3717 unsigned int flags) 3718 #define CHAIN BIT(0) 3719 { 3720 IGT_TIMEOUT(end_time); 3721 struct i915_request *request[16] = {}; 3722 struct intel_context *ve[16]; 3723 unsigned long n, prime, nc; 3724 struct igt_live_test t; 3725 ktime_t times[2] = {}; 3726 int err; 3727 3728 GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve)); 3729 3730 for (n = 0; n < nctx; n++) { 3731 ve[n] = intel_engine_create_virtual(siblings, nsibling, 0); 3732 if (IS_ERR(ve[n])) { 3733 err = PTR_ERR(ve[n]); 3734 nctx = n; 3735 goto out; 3736 } 3737 3738 err = intel_context_pin(ve[n]); 3739 if (err) { 3740 intel_context_put(ve[n]); 3741 nctx = n; 3742 goto out; 3743 } 3744 } 3745 3746 err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name); 3747 if (err) 3748 goto out; 3749 3750 for_each_prime_number_from(prime, 1, 8192) { 3751 times[1] = ktime_get_raw(); 3752 3753 if (flags & CHAIN) { 3754 for (nc = 0; nc < nctx; nc++) { 3755 for (n = 0; n < prime; n++) { 3756 struct i915_request *rq; 3757 3758 rq = i915_request_create(ve[nc]); 3759 if (IS_ERR(rq)) { 3760 err = PTR_ERR(rq); 3761 goto out; 3762 } 3763 3764 if (request[nc]) 3765 i915_request_put(request[nc]); 3766 request[nc] = i915_request_get(rq); 3767 i915_request_add(rq); 3768 } 3769 } 3770 } else { 3771 for (n = 0; n < prime; n++) { 3772 for (nc = 0; nc < nctx; nc++) { 3773 struct i915_request *rq; 3774 3775 rq = i915_request_create(ve[nc]); 3776 if (IS_ERR(rq)) { 3777 err = PTR_ERR(rq); 3778 goto out; 3779 } 3780 3781 if (request[nc]) 3782 i915_request_put(request[nc]); 3783 request[nc] = i915_request_get(rq); 3784 i915_request_add(rq); 3785 } 3786 } 3787 } 3788 3789 for (nc = 0; nc < nctx; nc++) { 3790 if (i915_request_wait(request[nc], 0, HZ / 10) < 0) { 3791 pr_err("%s(%s): wait for %llx:%lld timed out\n", 3792 __func__, ve[0]->engine->name, 3793 request[nc]->fence.context, 3794 request[nc]->fence.seqno); 3795 3796 GEM_TRACE("%s(%s) failed at request %llx:%lld\n", 3797 __func__, ve[0]->engine->name, 3798 request[nc]->fence.context, 3799 request[nc]->fence.seqno); 3800 GEM_TRACE_DUMP(); 3801 intel_gt_set_wedged(gt); 3802 break; 3803 } 3804 } 3805 3806 times[1] = ktime_sub(ktime_get_raw(), times[1]); 3807 if (prime == 1) 3808 times[0] = times[1]; 3809 3810 for (nc = 0; nc < nctx; nc++) { 3811 i915_request_put(request[nc]); 3812 request[nc] = NULL; 3813 } 3814 3815 if (__igt_timeout(end_time, NULL)) 3816 break; 3817 } 3818 3819 err = igt_live_test_end(&t); 3820 if (err) 3821 goto out; 3822 3823 pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n", 3824 nctx, ve[0]->engine->name, ktime_to_ns(times[0]), 3825 prime, div64_u64(ktime_to_ns(times[1]), prime)); 3826 3827 out: 3828 if (igt_flush_test(gt->i915)) 3829 err = -EIO; 3830 3831 for (nc = 0; nc < nctx; nc++) { 3832 i915_request_put(request[nc]); 3833 intel_context_unpin(ve[nc]); 3834 intel_context_put(ve[nc]); 3835 } 3836 return err; 3837 } 3838 3839 static unsigned int 3840 __select_siblings(struct intel_gt *gt, 3841 unsigned int class, 3842 struct intel_engine_cs **siblings, 3843 bool (*filter)(const struct intel_engine_cs *)) 3844 { 3845 unsigned int n = 0; 3846 unsigned int inst; 3847 3848 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { 3849 if (!gt->engine_class[class][inst]) 3850 continue; 3851 3852 if (filter && !filter(gt->engine_class[class][inst])) 3853 continue; 3854 3855 siblings[n++] = gt->engine_class[class][inst]; 3856 } 3857 3858 return n; 3859 } 3860 3861 static unsigned int 3862 select_siblings(struct intel_gt *gt, 3863 unsigned int class, 3864 struct intel_engine_cs **siblings) 3865 { 3866 return __select_siblings(gt, class, siblings, NULL); 3867 } 3868 3869 static int live_virtual_engine(void *arg) 3870 { 3871 struct intel_gt *gt = arg; 3872 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; 3873 struct intel_engine_cs *engine; 3874 enum intel_engine_id id; 3875 unsigned int class; 3876 int err; 3877 3878 if (intel_uc_uses_guc_submission(>->uc)) 3879 return 0; 3880 3881 for_each_engine(engine, gt, id) { 3882 err = nop_virtual_engine(gt, &engine, 1, 1, 0); 3883 if (err) { 3884 pr_err("Failed to wrap engine %s: err=%d\n", 3885 engine->name, err); 3886 return err; 3887 } 3888 } 3889 3890 for (class = 0; class <= MAX_ENGINE_CLASS; class++) { 3891 int nsibling, n; 3892 3893 nsibling = select_siblings(gt, class, siblings); 3894 if (nsibling < 2) 3895 continue; 3896 3897 for (n = 1; n <= nsibling + 1; n++) { 3898 err = nop_virtual_engine(gt, siblings, nsibling, 3899 n, 0); 3900 if (err) 3901 return err; 3902 } 3903 3904 err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN); 3905 if (err) 3906 return err; 3907 } 3908 3909 return 0; 3910 } 3911 3912 static int mask_virtual_engine(struct intel_gt *gt, 3913 struct intel_engine_cs **siblings, 3914 unsigned int nsibling) 3915 { 3916 struct i915_request *request[MAX_ENGINE_INSTANCE + 1]; 3917 struct intel_context *ve; 3918 struct igt_live_test t; 3919 unsigned int n; 3920 int err; 3921 3922 /* 3923 * Check that by setting the execution mask on a request, we can 3924 * restrict it to our desired engine within the virtual engine. 3925 */ 3926 3927 ve = intel_engine_create_virtual(siblings, nsibling, 0); 3928 if (IS_ERR(ve)) { 3929 err = PTR_ERR(ve); 3930 goto out_close; 3931 } 3932 3933 err = intel_context_pin(ve); 3934 if (err) 3935 goto out_put; 3936 3937 err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name); 3938 if (err) 3939 goto out_unpin; 3940 3941 for (n = 0; n < nsibling; n++) { 3942 request[n] = i915_request_create(ve); 3943 if (IS_ERR(request[n])) { 3944 err = PTR_ERR(request[n]); 3945 nsibling = n; 3946 goto out; 3947 } 3948 3949 /* Reverse order as it's more likely to be unnatural */ 3950 request[n]->execution_mask = siblings[nsibling - n - 1]->mask; 3951 3952 i915_request_get(request[n]); 3953 i915_request_add(request[n]); 3954 } 3955 3956 for (n = 0; n < nsibling; n++) { 3957 if (i915_request_wait(request[n], 0, HZ / 10) < 0) { 3958 pr_err("%s(%s): wait for %llx:%lld timed out\n", 3959 __func__, ve->engine->name, 3960 request[n]->fence.context, 3961 request[n]->fence.seqno); 3962 3963 GEM_TRACE("%s(%s) failed at request %llx:%lld\n", 3964 __func__, ve->engine->name, 3965 request[n]->fence.context, 3966 request[n]->fence.seqno); 3967 GEM_TRACE_DUMP(); 3968 intel_gt_set_wedged(gt); 3969 err = -EIO; 3970 goto out; 3971 } 3972 3973 if (request[n]->engine != siblings[nsibling - n - 1]) { 3974 pr_err("Executed on wrong sibling '%s', expected '%s'\n", 3975 request[n]->engine->name, 3976 siblings[nsibling - n - 1]->name); 3977 err = -EINVAL; 3978 goto out; 3979 } 3980 } 3981 3982 err = igt_live_test_end(&t); 3983 out: 3984 if (igt_flush_test(gt->i915)) 3985 err = -EIO; 3986 3987 for (n = 0; n < nsibling; n++) 3988 i915_request_put(request[n]); 3989 3990 out_unpin: 3991 intel_context_unpin(ve); 3992 out_put: 3993 intel_context_put(ve); 3994 out_close: 3995 return err; 3996 } 3997 3998 static int live_virtual_mask(void *arg) 3999 { 4000 struct intel_gt *gt = arg; 4001 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; 4002 unsigned int class; 4003 int err; 4004 4005 if (intel_uc_uses_guc_submission(>->uc)) 4006 return 0; 4007 4008 for (class = 0; class <= MAX_ENGINE_CLASS; class++) { 4009 unsigned int nsibling; 4010 4011 nsibling = select_siblings(gt, class, siblings); 4012 if (nsibling < 2) 4013 continue; 4014 4015 err = mask_virtual_engine(gt, siblings, nsibling); 4016 if (err) 4017 return err; 4018 } 4019 4020 return 0; 4021 } 4022 4023 static int slicein_virtual_engine(struct intel_gt *gt, 4024 struct intel_engine_cs **siblings, 4025 unsigned int nsibling) 4026 { 4027 const long timeout = slice_timeout(siblings[0]); 4028 struct intel_context *ce; 4029 struct i915_request *rq; 4030 struct igt_spinner spin; 4031 unsigned int n; 4032 int err = 0; 4033 4034 /* 4035 * Virtual requests must take part in timeslicing on the target engines. 4036 */ 4037 4038 if (igt_spinner_init(&spin, gt)) 4039 return -ENOMEM; 4040 4041 for (n = 0; n < nsibling; n++) { 4042 ce = intel_context_create(siblings[n]); 4043 if (IS_ERR(ce)) { 4044 err = PTR_ERR(ce); 4045 goto out; 4046 } 4047 4048 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); 4049 intel_context_put(ce); 4050 if (IS_ERR(rq)) { 4051 err = PTR_ERR(rq); 4052 goto out; 4053 } 4054 4055 i915_request_add(rq); 4056 } 4057 4058 ce = intel_engine_create_virtual(siblings, nsibling, 0); 4059 if (IS_ERR(ce)) { 4060 err = PTR_ERR(ce); 4061 goto out; 4062 } 4063 4064 rq = intel_context_create_request(ce); 4065 intel_context_put(ce); 4066 if (IS_ERR(rq)) { 4067 err = PTR_ERR(rq); 4068 goto out; 4069 } 4070 4071 i915_request_get(rq); 4072 i915_request_add(rq); 4073 if (i915_request_wait(rq, 0, timeout) < 0) { 4074 GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n", 4075 __func__, rq->engine->name); 4076 GEM_TRACE_DUMP(); 4077 intel_gt_set_wedged(gt); 4078 err = -EIO; 4079 } 4080 i915_request_put(rq); 4081 4082 out: 4083 igt_spinner_end(&spin); 4084 if (igt_flush_test(gt->i915)) 4085 err = -EIO; 4086 igt_spinner_fini(&spin); 4087 return err; 4088 } 4089 4090 static int sliceout_virtual_engine(struct intel_gt *gt, 4091 struct intel_engine_cs **siblings, 4092 unsigned int nsibling) 4093 { 4094 const long timeout = slice_timeout(siblings[0]); 4095 struct intel_context *ce; 4096 struct i915_request *rq; 4097 struct igt_spinner spin; 4098 unsigned int n; 4099 int err = 0; 4100 4101 /* 4102 * Virtual requests must allow others a fair timeslice. 4103 */ 4104 4105 if (igt_spinner_init(&spin, gt)) 4106 return -ENOMEM; 4107 4108 /* XXX We do not handle oversubscription and fairness with normal rq */ 4109 for (n = 0; n < nsibling; n++) { 4110 ce = intel_engine_create_virtual(siblings, nsibling, 0); 4111 if (IS_ERR(ce)) { 4112 err = PTR_ERR(ce); 4113 goto out; 4114 } 4115 4116 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); 4117 intel_context_put(ce); 4118 if (IS_ERR(rq)) { 4119 err = PTR_ERR(rq); 4120 goto out; 4121 } 4122 4123 i915_request_add(rq); 4124 } 4125 4126 for (n = 0; !err && n < nsibling; n++) { 4127 ce = intel_context_create(siblings[n]); 4128 if (IS_ERR(ce)) { 4129 err = PTR_ERR(ce); 4130 goto out; 4131 } 4132 4133 rq = intel_context_create_request(ce); 4134 intel_context_put(ce); 4135 if (IS_ERR(rq)) { 4136 err = PTR_ERR(rq); 4137 goto out; 4138 } 4139 4140 i915_request_get(rq); 4141 i915_request_add(rq); 4142 if (i915_request_wait(rq, 0, timeout) < 0) { 4143 GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n", 4144 __func__, siblings[n]->name); 4145 GEM_TRACE_DUMP(); 4146 intel_gt_set_wedged(gt); 4147 err = -EIO; 4148 } 4149 i915_request_put(rq); 4150 } 4151 4152 out: 4153 igt_spinner_end(&spin); 4154 if (igt_flush_test(gt->i915)) 4155 err = -EIO; 4156 igt_spinner_fini(&spin); 4157 return err; 4158 } 4159 4160 static int live_virtual_slice(void *arg) 4161 { 4162 struct intel_gt *gt = arg; 4163 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; 4164 unsigned int class; 4165 int err; 4166 4167 if (intel_uc_uses_guc_submission(>->uc)) 4168 return 0; 4169 4170 for (class = 0; class <= MAX_ENGINE_CLASS; class++) { 4171 unsigned int nsibling; 4172 4173 nsibling = __select_siblings(gt, class, siblings, 4174 intel_engine_has_timeslices); 4175 if (nsibling < 2) 4176 continue; 4177 4178 err = slicein_virtual_engine(gt, siblings, nsibling); 4179 if (err) 4180 return err; 4181 4182 err = sliceout_virtual_engine(gt, siblings, nsibling); 4183 if (err) 4184 return err; 4185 } 4186 4187 return 0; 4188 } 4189 4190 static int preserved_virtual_engine(struct intel_gt *gt, 4191 struct intel_engine_cs **siblings, 4192 unsigned int nsibling) 4193 { 4194 struct i915_request *last = NULL; 4195 struct intel_context *ve; 4196 struct i915_vma *scratch; 4197 struct igt_live_test t; 4198 unsigned int n; 4199 int err = 0; 4200 u32 *cs; 4201 4202 scratch = 4203 __vm_create_scratch_for_read_pinned(&siblings[0]->gt->ggtt->vm, 4204 PAGE_SIZE); 4205 if (IS_ERR(scratch)) 4206 return PTR_ERR(scratch); 4207 4208 err = i915_vma_sync(scratch); 4209 if (err) 4210 goto out_scratch; 4211 4212 ve = intel_engine_create_virtual(siblings, nsibling, 0); 4213 if (IS_ERR(ve)) { 4214 err = PTR_ERR(ve); 4215 goto out_scratch; 4216 } 4217 4218 err = intel_context_pin(ve); 4219 if (err) 4220 goto out_put; 4221 4222 err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name); 4223 if (err) 4224 goto out_unpin; 4225 4226 for (n = 0; n < NUM_GPR_DW; n++) { 4227 struct intel_engine_cs *engine = siblings[n % nsibling]; 4228 struct i915_request *rq; 4229 4230 rq = i915_request_create(ve); 4231 if (IS_ERR(rq)) { 4232 err = PTR_ERR(rq); 4233 goto out_end; 4234 } 4235 4236 i915_request_put(last); 4237 last = i915_request_get(rq); 4238 4239 cs = intel_ring_begin(rq, 8); 4240 if (IS_ERR(cs)) { 4241 i915_request_add(rq); 4242 err = PTR_ERR(cs); 4243 goto out_end; 4244 } 4245 4246 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; 4247 *cs++ = CS_GPR(engine, n); 4248 *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32); 4249 *cs++ = 0; 4250 4251 *cs++ = MI_LOAD_REGISTER_IMM(1); 4252 *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW); 4253 *cs++ = n + 1; 4254 4255 *cs++ = MI_NOOP; 4256 intel_ring_advance(rq, cs); 4257 4258 /* Restrict this request to run on a particular engine */ 4259 rq->execution_mask = engine->mask; 4260 i915_request_add(rq); 4261 } 4262 4263 if (i915_request_wait(last, 0, HZ / 5) < 0) { 4264 err = -ETIME; 4265 goto out_end; 4266 } 4267 4268 cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB); 4269 if (IS_ERR(cs)) { 4270 err = PTR_ERR(cs); 4271 goto out_end; 4272 } 4273 4274 for (n = 0; n < NUM_GPR_DW; n++) { 4275 if (cs[n] != n) { 4276 pr_err("Incorrect value[%d] found for GPR[%d]\n", 4277 cs[n], n); 4278 err = -EINVAL; 4279 break; 4280 } 4281 } 4282 4283 i915_gem_object_unpin_map(scratch->obj); 4284 4285 out_end: 4286 if (igt_live_test_end(&t)) 4287 err = -EIO; 4288 i915_request_put(last); 4289 out_unpin: 4290 intel_context_unpin(ve); 4291 out_put: 4292 intel_context_put(ve); 4293 out_scratch: 4294 i915_vma_unpin_and_release(&scratch, 0); 4295 return err; 4296 } 4297 4298 static int live_virtual_preserved(void *arg) 4299 { 4300 struct intel_gt *gt = arg; 4301 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; 4302 unsigned int class; 4303 4304 /* 4305 * Check that the context image retains non-privileged (user) registers 4306 * from one engine to the next. For this we check that the CS_GPR 4307 * are preserved. 4308 */ 4309 4310 if (intel_uc_uses_guc_submission(>->uc)) 4311 return 0; 4312 4313 /* As we use CS_GPR we cannot run before they existed on all engines. */ 4314 if (GRAPHICS_VER(gt->i915) < 9) 4315 return 0; 4316 4317 for (class = 0; class <= MAX_ENGINE_CLASS; class++) { 4318 int nsibling, err; 4319 4320 nsibling = select_siblings(gt, class, siblings); 4321 if (nsibling < 2) 4322 continue; 4323 4324 err = preserved_virtual_engine(gt, siblings, nsibling); 4325 if (err) 4326 return err; 4327 } 4328 4329 return 0; 4330 } 4331 4332 static int reset_virtual_engine(struct intel_gt *gt, 4333 struct intel_engine_cs **siblings, 4334 unsigned int nsibling) 4335 { 4336 struct intel_engine_cs *engine; 4337 struct intel_context *ve; 4338 struct igt_spinner spin; 4339 struct i915_request *rq; 4340 unsigned int n; 4341 int err = 0; 4342 4343 /* 4344 * In order to support offline error capture for fast preempt reset, 4345 * we need to decouple the guilty request and ensure that it and its 4346 * descendents are not executed while the capture is in progress. 4347 */ 4348 4349 if (igt_spinner_init(&spin, gt)) 4350 return -ENOMEM; 4351 4352 ve = intel_engine_create_virtual(siblings, nsibling, 0); 4353 if (IS_ERR(ve)) { 4354 err = PTR_ERR(ve); 4355 goto out_spin; 4356 } 4357 4358 for (n = 0; n < nsibling; n++) 4359 st_engine_heartbeat_disable(siblings[n]); 4360 4361 rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK); 4362 if (IS_ERR(rq)) { 4363 err = PTR_ERR(rq); 4364 goto out_heartbeat; 4365 } 4366 i915_request_add(rq); 4367 4368 if (!igt_wait_for_spinner(&spin, rq)) { 4369 intel_gt_set_wedged(gt); 4370 err = -ETIME; 4371 goto out_heartbeat; 4372 } 4373 4374 engine = rq->engine; 4375 GEM_BUG_ON(engine == ve->engine); 4376 4377 /* Take ownership of the reset and tasklet */ 4378 err = engine_lock_reset_tasklet(engine); 4379 if (err) 4380 goto out_heartbeat; 4381 4382 engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet); 4383 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); 4384 4385 /* Fake a preemption event; failed of course */ 4386 spin_lock_irq(&engine->sched_engine->lock); 4387 __unwind_incomplete_requests(engine); 4388 spin_unlock_irq(&engine->sched_engine->lock); 4389 GEM_BUG_ON(rq->engine != engine); 4390 4391 /* Reset the engine while keeping our active request on hold */ 4392 execlists_hold(engine, rq); 4393 GEM_BUG_ON(!i915_request_on_hold(rq)); 4394 4395 __intel_engine_reset_bh(engine, NULL); 4396 GEM_BUG_ON(rq->fence.error != -EIO); 4397 4398 /* Release our grasp on the engine, letting CS flow again */ 4399 engine_unlock_reset_tasklet(engine); 4400 4401 /* Check that we do not resubmit the held request */ 4402 i915_request_get(rq); 4403 if (!i915_request_wait(rq, 0, HZ / 5)) { 4404 pr_err("%s: on hold request completed!\n", 4405 engine->name); 4406 intel_gt_set_wedged(gt); 4407 err = -EIO; 4408 goto out_rq; 4409 } 4410 GEM_BUG_ON(!i915_request_on_hold(rq)); 4411 4412 /* But is resubmitted on release */ 4413 execlists_unhold(engine, rq); 4414 if (i915_request_wait(rq, 0, HZ / 5) < 0) { 4415 pr_err("%s: held request did not complete!\n", 4416 engine->name); 4417 intel_gt_set_wedged(gt); 4418 err = -ETIME; 4419 } 4420 4421 out_rq: 4422 i915_request_put(rq); 4423 out_heartbeat: 4424 for (n = 0; n < nsibling; n++) 4425 st_engine_heartbeat_enable(siblings[n]); 4426 4427 intel_context_put(ve); 4428 out_spin: 4429 igt_spinner_fini(&spin); 4430 return err; 4431 } 4432 4433 static int live_virtual_reset(void *arg) 4434 { 4435 struct intel_gt *gt = arg; 4436 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; 4437 unsigned int class; 4438 4439 /* 4440 * Check that we handle a reset event within a virtual engine. 4441 * Only the physical engine is reset, but we have to check the flow 4442 * of the virtual requests around the reset, and make sure it is not 4443 * forgotten. 4444 */ 4445 4446 if (intel_uc_uses_guc_submission(>->uc)) 4447 return 0; 4448 4449 if (!intel_has_reset_engine(gt)) 4450 return 0; 4451 4452 for (class = 0; class <= MAX_ENGINE_CLASS; class++) { 4453 int nsibling, err; 4454 4455 nsibling = select_siblings(gt, class, siblings); 4456 if (nsibling < 2) 4457 continue; 4458 4459 err = reset_virtual_engine(gt, siblings, nsibling); 4460 if (err) 4461 return err; 4462 } 4463 4464 return 0; 4465 } 4466 4467 int intel_execlists_live_selftests(struct drm_i915_private *i915) 4468 { 4469 static const struct i915_subtest tests[] = { 4470 SUBTEST(live_sanitycheck), 4471 SUBTEST(live_unlite_switch), 4472 SUBTEST(live_unlite_preempt), 4473 SUBTEST(live_unlite_ring), 4474 SUBTEST(live_pin_rewind), 4475 SUBTEST(live_hold_reset), 4476 SUBTEST(live_error_interrupt), 4477 SUBTEST(live_timeslice_preempt), 4478 SUBTEST(live_timeslice_rewind), 4479 SUBTEST(live_timeslice_queue), 4480 SUBTEST(live_timeslice_nopreempt), 4481 SUBTEST(live_busywait_preempt), 4482 SUBTEST(live_preempt), 4483 SUBTEST(live_late_preempt), 4484 SUBTEST(live_nopreempt), 4485 SUBTEST(live_preempt_cancel), 4486 SUBTEST(live_suppress_self_preempt), 4487 SUBTEST(live_chain_preempt), 4488 SUBTEST(live_preempt_ring), 4489 SUBTEST(live_preempt_gang), 4490 SUBTEST(live_preempt_timeout), 4491 SUBTEST(live_preempt_user), 4492 SUBTEST(live_preempt_smoke), 4493 SUBTEST(live_virtual_engine), 4494 SUBTEST(live_virtual_mask), 4495 SUBTEST(live_virtual_preserved), 4496 SUBTEST(live_virtual_slice), 4497 SUBTEST(live_virtual_reset), 4498 }; 4499 4500 if (to_gt(i915)->submission_method != INTEL_SUBMISSION_ELSP) 4501 return 0; 4502 4503 if (intel_gt_is_wedged(to_gt(i915))) 4504 return 0; 4505 4506 return intel_gt_live_subtests(tests, to_gt(i915)); 4507 } 4508