1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2014 Intel Corporation 4 */ 5 6 #include "gem/i915_gem_lmem.h" 7 8 #include "gen8_engine_cs.h" 9 #include "i915_drv.h" 10 #include "i915_perf.h" 11 #include "i915_reg.h" 12 #include "intel_context.h" 13 #include "intel_engine.h" 14 #include "intel_engine_regs.h" 15 #include "intel_gpu_commands.h" 16 #include "intel_gt.h" 17 #include "intel_gt_regs.h" 18 #include "intel_lrc.h" 19 #include "intel_lrc_reg.h" 20 #include "intel_ring.h" 21 #include "shmem_utils.h" 22 23 /* 24 * The per-platform tables are u8-encoded in @data. Decode @data and set the 25 * addresses' offset and commands in @regs. The following encoding is used 26 * for each byte. There are 2 steps: decoding commands and decoding addresses. 27 * 28 * Commands: 29 * [7]: create NOPs - number of NOPs are set in lower bits 30 * [6]: When creating MI_LOAD_REGISTER_IMM command, allow to set 31 * MI_LRI_FORCE_POSTED 32 * [5:0]: Number of NOPs or registers to set values to in case of 33 * MI_LOAD_REGISTER_IMM 34 * 35 * Addresses: these are decoded after a MI_LOAD_REGISTER_IMM command by "count" 36 * number of registers. They are set by using the REG/REG16 macros: the former 37 * is used for offsets smaller than 0x200 while the latter is for values bigger 38 * than that. Those macros already set all the bits documented below correctly: 39 * 40 * [7]: When a register offset needs more than 6 bits, use additional bytes, to 41 * follow, for the lower bits 42 * [6:0]: Register offset, without considering the engine base. 43 * 44 * This function only tweaks the commands and register offsets. Values are not 45 * filled out. 46 */ 47 static void set_offsets(u32 *regs, 48 const u8 *data, 49 const struct intel_engine_cs *engine, 50 bool close) 51 #define NOP(x) (BIT(7) | (x)) 52 #define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6))) 53 #define POSTED BIT(0) 54 #define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200)) 55 #define REG16(x) \ 56 (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \ 57 (((x) >> 2) & 0x7f) 58 #define END 0 59 { 60 const u32 base = engine->mmio_base; 61 62 while (*data) { 63 u8 count, flags; 64 65 if (*data & BIT(7)) { /* skip */ 66 count = *data++ & ~BIT(7); 67 regs += count; 68 continue; 69 } 70 71 count = *data & 0x3f; 72 flags = *data >> 6; 73 data++; 74 75 *regs = MI_LOAD_REGISTER_IMM(count); 76 if (flags & POSTED) 77 *regs |= MI_LRI_FORCE_POSTED; 78 if (GRAPHICS_VER(engine->i915) >= 11) 79 *regs |= MI_LRI_LRM_CS_MMIO; 80 regs++; 81 82 GEM_BUG_ON(!count); 83 do { 84 u32 offset = 0; 85 u8 v; 86 87 do { 88 v = *data++; 89 offset <<= 7; 90 offset |= v & ~BIT(7); 91 } while (v & BIT(7)); 92 93 regs[0] = base + (offset << 2); 94 regs += 2; 95 } while (--count); 96 } 97 98 if (close) { 99 /* Close the batch; used mainly by live_lrc_layout() */ 100 *regs = MI_BATCH_BUFFER_END; 101 if (GRAPHICS_VER(engine->i915) >= 11) 102 *regs |= BIT(0); 103 } 104 } 105 106 static const u8 gen8_xcs_offsets[] = { 107 NOP(1), 108 LRI(11, 0), 109 REG16(0x244), 110 REG(0x034), 111 REG(0x030), 112 REG(0x038), 113 REG(0x03c), 114 REG(0x168), 115 REG(0x140), 116 REG(0x110), 117 REG(0x11c), 118 REG(0x114), 119 REG(0x118), 120 121 NOP(9), 122 LRI(9, 0), 123 REG16(0x3a8), 124 REG16(0x28c), 125 REG16(0x288), 126 REG16(0x284), 127 REG16(0x280), 128 REG16(0x27c), 129 REG16(0x278), 130 REG16(0x274), 131 REG16(0x270), 132 133 NOP(13), 134 LRI(2, 0), 135 REG16(0x200), 136 REG(0x028), 137 138 END 139 }; 140 141 static const u8 gen9_xcs_offsets[] = { 142 NOP(1), 143 LRI(14, POSTED), 144 REG16(0x244), 145 REG(0x034), 146 REG(0x030), 147 REG(0x038), 148 REG(0x03c), 149 REG(0x168), 150 REG(0x140), 151 REG(0x110), 152 REG(0x11c), 153 REG(0x114), 154 REG(0x118), 155 REG(0x1c0), 156 REG(0x1c4), 157 REG(0x1c8), 158 159 NOP(3), 160 LRI(9, POSTED), 161 REG16(0x3a8), 162 REG16(0x28c), 163 REG16(0x288), 164 REG16(0x284), 165 REG16(0x280), 166 REG16(0x27c), 167 REG16(0x278), 168 REG16(0x274), 169 REG16(0x270), 170 171 NOP(13), 172 LRI(1, POSTED), 173 REG16(0x200), 174 175 NOP(13), 176 LRI(44, POSTED), 177 REG(0x028), 178 REG(0x09c), 179 REG(0x0c0), 180 REG(0x178), 181 REG(0x17c), 182 REG16(0x358), 183 REG(0x170), 184 REG(0x150), 185 REG(0x154), 186 REG(0x158), 187 REG16(0x41c), 188 REG16(0x600), 189 REG16(0x604), 190 REG16(0x608), 191 REG16(0x60c), 192 REG16(0x610), 193 REG16(0x614), 194 REG16(0x618), 195 REG16(0x61c), 196 REG16(0x620), 197 REG16(0x624), 198 REG16(0x628), 199 REG16(0x62c), 200 REG16(0x630), 201 REG16(0x634), 202 REG16(0x638), 203 REG16(0x63c), 204 REG16(0x640), 205 REG16(0x644), 206 REG16(0x648), 207 REG16(0x64c), 208 REG16(0x650), 209 REG16(0x654), 210 REG16(0x658), 211 REG16(0x65c), 212 REG16(0x660), 213 REG16(0x664), 214 REG16(0x668), 215 REG16(0x66c), 216 REG16(0x670), 217 REG16(0x674), 218 REG16(0x678), 219 REG16(0x67c), 220 REG(0x068), 221 222 END 223 }; 224 225 static const u8 gen12_xcs_offsets[] = { 226 NOP(1), 227 LRI(13, POSTED), 228 REG16(0x244), 229 REG(0x034), 230 REG(0x030), 231 REG(0x038), 232 REG(0x03c), 233 REG(0x168), 234 REG(0x140), 235 REG(0x110), 236 REG(0x1c0), 237 REG(0x1c4), 238 REG(0x1c8), 239 REG(0x180), 240 REG16(0x2b4), 241 242 NOP(5), 243 LRI(9, POSTED), 244 REG16(0x3a8), 245 REG16(0x28c), 246 REG16(0x288), 247 REG16(0x284), 248 REG16(0x280), 249 REG16(0x27c), 250 REG16(0x278), 251 REG16(0x274), 252 REG16(0x270), 253 254 END 255 }; 256 257 static const u8 dg2_xcs_offsets[] = { 258 NOP(1), 259 LRI(15, POSTED), 260 REG16(0x244), 261 REG(0x034), 262 REG(0x030), 263 REG(0x038), 264 REG(0x03c), 265 REG(0x168), 266 REG(0x140), 267 REG(0x110), 268 REG(0x1c0), 269 REG(0x1c4), 270 REG(0x1c8), 271 REG(0x180), 272 REG16(0x2b4), 273 REG(0x120), 274 REG(0x124), 275 276 NOP(1), 277 LRI(9, POSTED), 278 REG16(0x3a8), 279 REG16(0x28c), 280 REG16(0x288), 281 REG16(0x284), 282 REG16(0x280), 283 REG16(0x27c), 284 REG16(0x278), 285 REG16(0x274), 286 REG16(0x270), 287 288 END 289 }; 290 291 static const u8 gen8_rcs_offsets[] = { 292 NOP(1), 293 LRI(14, POSTED), 294 REG16(0x244), 295 REG(0x034), 296 REG(0x030), 297 REG(0x038), 298 REG(0x03c), 299 REG(0x168), 300 REG(0x140), 301 REG(0x110), 302 REG(0x11c), 303 REG(0x114), 304 REG(0x118), 305 REG(0x1c0), 306 REG(0x1c4), 307 REG(0x1c8), 308 309 NOP(3), 310 LRI(9, POSTED), 311 REG16(0x3a8), 312 REG16(0x28c), 313 REG16(0x288), 314 REG16(0x284), 315 REG16(0x280), 316 REG16(0x27c), 317 REG16(0x278), 318 REG16(0x274), 319 REG16(0x270), 320 321 NOP(13), 322 LRI(1, 0), 323 REG(0x0c8), 324 325 END 326 }; 327 328 static const u8 gen9_rcs_offsets[] = { 329 NOP(1), 330 LRI(14, POSTED), 331 REG16(0x244), 332 REG(0x34), 333 REG(0x30), 334 REG(0x38), 335 REG(0x3c), 336 REG(0x168), 337 REG(0x140), 338 REG(0x110), 339 REG(0x11c), 340 REG(0x114), 341 REG(0x118), 342 REG(0x1c0), 343 REG(0x1c4), 344 REG(0x1c8), 345 346 NOP(3), 347 LRI(9, POSTED), 348 REG16(0x3a8), 349 REG16(0x28c), 350 REG16(0x288), 351 REG16(0x284), 352 REG16(0x280), 353 REG16(0x27c), 354 REG16(0x278), 355 REG16(0x274), 356 REG16(0x270), 357 358 NOP(13), 359 LRI(1, 0), 360 REG(0xc8), 361 362 NOP(13), 363 LRI(44, POSTED), 364 REG(0x28), 365 REG(0x9c), 366 REG(0xc0), 367 REG(0x178), 368 REG(0x17c), 369 REG16(0x358), 370 REG(0x170), 371 REG(0x150), 372 REG(0x154), 373 REG(0x158), 374 REG16(0x41c), 375 REG16(0x600), 376 REG16(0x604), 377 REG16(0x608), 378 REG16(0x60c), 379 REG16(0x610), 380 REG16(0x614), 381 REG16(0x618), 382 REG16(0x61c), 383 REG16(0x620), 384 REG16(0x624), 385 REG16(0x628), 386 REG16(0x62c), 387 REG16(0x630), 388 REG16(0x634), 389 REG16(0x638), 390 REG16(0x63c), 391 REG16(0x640), 392 REG16(0x644), 393 REG16(0x648), 394 REG16(0x64c), 395 REG16(0x650), 396 REG16(0x654), 397 REG16(0x658), 398 REG16(0x65c), 399 REG16(0x660), 400 REG16(0x664), 401 REG16(0x668), 402 REG16(0x66c), 403 REG16(0x670), 404 REG16(0x674), 405 REG16(0x678), 406 REG16(0x67c), 407 REG(0x68), 408 409 END 410 }; 411 412 static const u8 gen11_rcs_offsets[] = { 413 NOP(1), 414 LRI(15, POSTED), 415 REG16(0x244), 416 REG(0x034), 417 REG(0x030), 418 REG(0x038), 419 REG(0x03c), 420 REG(0x168), 421 REG(0x140), 422 REG(0x110), 423 REG(0x11c), 424 REG(0x114), 425 REG(0x118), 426 REG(0x1c0), 427 REG(0x1c4), 428 REG(0x1c8), 429 REG(0x180), 430 431 NOP(1), 432 LRI(9, POSTED), 433 REG16(0x3a8), 434 REG16(0x28c), 435 REG16(0x288), 436 REG16(0x284), 437 REG16(0x280), 438 REG16(0x27c), 439 REG16(0x278), 440 REG16(0x274), 441 REG16(0x270), 442 443 LRI(1, POSTED), 444 REG(0x1b0), 445 446 NOP(10), 447 LRI(1, 0), 448 REG(0x0c8), 449 450 END 451 }; 452 453 static const u8 gen12_rcs_offsets[] = { 454 NOP(1), 455 LRI(13, POSTED), 456 REG16(0x244), 457 REG(0x034), 458 REG(0x030), 459 REG(0x038), 460 REG(0x03c), 461 REG(0x168), 462 REG(0x140), 463 REG(0x110), 464 REG(0x1c0), 465 REG(0x1c4), 466 REG(0x1c8), 467 REG(0x180), 468 REG16(0x2b4), 469 470 NOP(5), 471 LRI(9, POSTED), 472 REG16(0x3a8), 473 REG16(0x28c), 474 REG16(0x288), 475 REG16(0x284), 476 REG16(0x280), 477 REG16(0x27c), 478 REG16(0x278), 479 REG16(0x274), 480 REG16(0x270), 481 482 LRI(3, POSTED), 483 REG(0x1b0), 484 REG16(0x5a8), 485 REG16(0x5ac), 486 487 NOP(6), 488 LRI(1, 0), 489 REG(0x0c8), 490 NOP(3 + 9 + 1), 491 492 LRI(51, POSTED), 493 REG16(0x588), 494 REG16(0x588), 495 REG16(0x588), 496 REG16(0x588), 497 REG16(0x588), 498 REG16(0x588), 499 REG(0x028), 500 REG(0x09c), 501 REG(0x0c0), 502 REG(0x178), 503 REG(0x17c), 504 REG16(0x358), 505 REG(0x170), 506 REG(0x150), 507 REG(0x154), 508 REG(0x158), 509 REG16(0x41c), 510 REG16(0x600), 511 REG16(0x604), 512 REG16(0x608), 513 REG16(0x60c), 514 REG16(0x610), 515 REG16(0x614), 516 REG16(0x618), 517 REG16(0x61c), 518 REG16(0x620), 519 REG16(0x624), 520 REG16(0x628), 521 REG16(0x62c), 522 REG16(0x630), 523 REG16(0x634), 524 REG16(0x638), 525 REG16(0x63c), 526 REG16(0x640), 527 REG16(0x644), 528 REG16(0x648), 529 REG16(0x64c), 530 REG16(0x650), 531 REG16(0x654), 532 REG16(0x658), 533 REG16(0x65c), 534 REG16(0x660), 535 REG16(0x664), 536 REG16(0x668), 537 REG16(0x66c), 538 REG16(0x670), 539 REG16(0x674), 540 REG16(0x678), 541 REG16(0x67c), 542 REG(0x068), 543 REG(0x084), 544 NOP(1), 545 546 END 547 }; 548 549 static const u8 dg2_rcs_offsets[] = { 550 NOP(1), 551 LRI(15, POSTED), 552 REG16(0x244), 553 REG(0x034), 554 REG(0x030), 555 REG(0x038), 556 REG(0x03c), 557 REG(0x168), 558 REG(0x140), 559 REG(0x110), 560 REG(0x1c0), 561 REG(0x1c4), 562 REG(0x1c8), 563 REG(0x180), 564 REG16(0x2b4), 565 REG(0x120), 566 REG(0x124), 567 568 NOP(1), 569 LRI(9, POSTED), 570 REG16(0x3a8), 571 REG16(0x28c), 572 REG16(0x288), 573 REG16(0x284), 574 REG16(0x280), 575 REG16(0x27c), 576 REG16(0x278), 577 REG16(0x274), 578 REG16(0x270), 579 580 LRI(3, POSTED), 581 REG(0x1b0), 582 REG16(0x5a8), 583 REG16(0x5ac), 584 585 NOP(6), 586 LRI(1, 0), 587 REG(0x0c8), 588 589 END 590 }; 591 592 static const u8 mtl_rcs_offsets[] = { 593 NOP(1), 594 LRI(15, POSTED), 595 REG16(0x244), 596 REG(0x034), 597 REG(0x030), 598 REG(0x038), 599 REG(0x03c), 600 REG(0x168), 601 REG(0x140), 602 REG(0x110), 603 REG(0x1c0), 604 REG(0x1c4), 605 REG(0x1c8), 606 REG(0x180), 607 REG16(0x2b4), 608 REG(0x120), 609 REG(0x124), 610 611 NOP(1), 612 LRI(9, POSTED), 613 REG16(0x3a8), 614 REG16(0x28c), 615 REG16(0x288), 616 REG16(0x284), 617 REG16(0x280), 618 REG16(0x27c), 619 REG16(0x278), 620 REG16(0x274), 621 REG16(0x270), 622 623 NOP(2), 624 LRI(2, POSTED), 625 REG16(0x5a8), 626 REG16(0x5ac), 627 628 NOP(6), 629 LRI(1, 0), 630 REG(0x0c8), 631 632 END 633 }; 634 635 #undef END 636 #undef REG16 637 #undef REG 638 #undef LRI 639 #undef NOP 640 641 static const u8 *reg_offsets(const struct intel_engine_cs *engine) 642 { 643 /* 644 * The gen12+ lists only have the registers we program in the basic 645 * default state. We rely on the context image using relative 646 * addressing to automatic fixup the register state between the 647 * physical engines for virtual engine. 648 */ 649 GEM_BUG_ON(GRAPHICS_VER(engine->i915) >= 12 && 650 !intel_engine_has_relative_mmio(engine)); 651 652 if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE) { 653 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 70)) 654 return mtl_rcs_offsets; 655 else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) 656 return dg2_rcs_offsets; 657 else if (GRAPHICS_VER(engine->i915) >= 12) 658 return gen12_rcs_offsets; 659 else if (GRAPHICS_VER(engine->i915) >= 11) 660 return gen11_rcs_offsets; 661 else if (GRAPHICS_VER(engine->i915) >= 9) 662 return gen9_rcs_offsets; 663 else 664 return gen8_rcs_offsets; 665 } else { 666 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) 667 return dg2_xcs_offsets; 668 else if (GRAPHICS_VER(engine->i915) >= 12) 669 return gen12_xcs_offsets; 670 else if (GRAPHICS_VER(engine->i915) >= 9) 671 return gen9_xcs_offsets; 672 else 673 return gen8_xcs_offsets; 674 } 675 } 676 677 static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) 678 { 679 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) 680 return 0x70; 681 else if (GRAPHICS_VER(engine->i915) >= 12) 682 return 0x60; 683 else if (GRAPHICS_VER(engine->i915) >= 9) 684 return 0x54; 685 else if (engine->class == RENDER_CLASS) 686 return 0x58; 687 else 688 return -1; 689 } 690 691 static int lrc_ring_bb_offset(const struct intel_engine_cs *engine) 692 { 693 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) 694 return 0x80; 695 else if (GRAPHICS_VER(engine->i915) >= 12) 696 return 0x70; 697 else if (GRAPHICS_VER(engine->i915) >= 9) 698 return 0x64; 699 else if (GRAPHICS_VER(engine->i915) >= 8 && 700 engine->class == RENDER_CLASS) 701 return 0xc4; 702 else 703 return -1; 704 } 705 706 static int lrc_ring_gpr0(const struct intel_engine_cs *engine) 707 { 708 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) 709 return 0x84; 710 else if (GRAPHICS_VER(engine->i915) >= 12) 711 return 0x74; 712 else if (GRAPHICS_VER(engine->i915) >= 9) 713 return 0x68; 714 else if (engine->class == RENDER_CLASS) 715 return 0xd8; 716 else 717 return -1; 718 } 719 720 static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine) 721 { 722 if (GRAPHICS_VER(engine->i915) >= 12) 723 return 0x12; 724 else if (GRAPHICS_VER(engine->i915) >= 9 || engine->class == RENDER_CLASS) 725 return 0x18; 726 else 727 return -1; 728 } 729 730 static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine) 731 { 732 int x; 733 734 x = lrc_ring_wa_bb_per_ctx(engine); 735 if (x < 0) 736 return x; 737 738 return x + 2; 739 } 740 741 static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine) 742 { 743 int x; 744 745 x = lrc_ring_indirect_ptr(engine); 746 if (x < 0) 747 return x; 748 749 return x + 2; 750 } 751 752 static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine) 753 { 754 755 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) 756 /* 757 * Note that the CSFE context has a dummy slot for CMD_BUF_CCTL 758 * simply to match the RCS context image layout. 759 */ 760 return 0xc6; 761 else if (engine->class != RENDER_CLASS) 762 return -1; 763 else if (GRAPHICS_VER(engine->i915) >= 12) 764 return 0xb6; 765 else if (GRAPHICS_VER(engine->i915) >= 11) 766 return 0xaa; 767 else 768 return -1; 769 } 770 771 static u32 772 lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine) 773 { 774 if (GRAPHICS_VER(engine->i915) >= 12) 775 return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 776 else if (GRAPHICS_VER(engine->i915) >= 11) 777 return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 778 else if (GRAPHICS_VER(engine->i915) >= 9) 779 return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 780 else if (GRAPHICS_VER(engine->i915) >= 8) 781 return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 782 783 GEM_BUG_ON(GRAPHICS_VER(engine->i915) < 8); 784 785 return 0; 786 } 787 788 static void 789 lrc_setup_bb_per_ctx(u32 *regs, 790 const struct intel_engine_cs *engine, 791 u32 ctx_bb_ggtt_addr) 792 { 793 GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1); 794 regs[lrc_ring_wa_bb_per_ctx(engine) + 1] = 795 ctx_bb_ggtt_addr | 796 PER_CTX_BB_FORCE | 797 PER_CTX_BB_VALID; 798 } 799 800 static void 801 lrc_setup_indirect_ctx(u32 *regs, 802 const struct intel_engine_cs *engine, 803 u32 ctx_bb_ggtt_addr, 804 u32 size) 805 { 806 GEM_BUG_ON(!size); 807 GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES)); 808 GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1); 809 regs[lrc_ring_indirect_ptr(engine) + 1] = 810 ctx_bb_ggtt_addr | (size / CACHELINE_BYTES); 811 812 GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1); 813 regs[lrc_ring_indirect_offset(engine) + 1] = 814 lrc_ring_indirect_offset_default(engine) << 6; 815 } 816 817 static bool ctx_needs_runalone(const struct intel_context *ce) 818 { 819 struct i915_gem_context *gem_ctx; 820 bool ctx_is_protected = false; 821 822 /* 823 * On MTL and newer platforms, protected contexts require setting 824 * the LRC run-alone bit or else the encryption will not happen. 825 */ 826 if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 70) && 827 (ce->engine->class == COMPUTE_CLASS || ce->engine->class == RENDER_CLASS)) { 828 rcu_read_lock(); 829 gem_ctx = rcu_dereference(ce->gem_context); 830 if (gem_ctx) 831 ctx_is_protected = gem_ctx->uses_protected_content; 832 rcu_read_unlock(); 833 } 834 835 return ctx_is_protected; 836 } 837 838 static void init_common_regs(u32 * const regs, 839 const struct intel_context *ce, 840 const struct intel_engine_cs *engine, 841 bool inhibit) 842 { 843 u32 ctl; 844 int loc; 845 846 ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); 847 ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 848 if (inhibit) 849 ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT; 850 if (GRAPHICS_VER(engine->i915) < 11) 851 ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | 852 CTX_CTRL_RS_CTX_ENABLE); 853 if (ctx_needs_runalone(ce)) 854 ctl |= _MASKED_BIT_ENABLE(GEN12_CTX_CTRL_RUNALONE_MODE); 855 regs[CTX_CONTEXT_CONTROL] = ctl; 856 857 regs[CTX_TIMESTAMP] = ce->stats.runtime.last; 858 859 loc = lrc_ring_bb_offset(engine); 860 if (loc != -1) 861 regs[loc + 1] = 0; 862 } 863 864 static void init_wa_bb_regs(u32 * const regs, 865 const struct intel_engine_cs *engine) 866 { 867 const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx; 868 869 if (wa_ctx->per_ctx.size) { 870 const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); 871 872 GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1); 873 regs[lrc_ring_wa_bb_per_ctx(engine) + 1] = 874 (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; 875 } 876 877 if (wa_ctx->indirect_ctx.size) { 878 lrc_setup_indirect_ctx(regs, engine, 879 i915_ggtt_offset(wa_ctx->vma) + 880 wa_ctx->indirect_ctx.offset, 881 wa_ctx->indirect_ctx.size); 882 } 883 } 884 885 static void init_ppgtt_regs(u32 *regs, const struct i915_ppgtt *ppgtt) 886 { 887 if (i915_vm_is_4lvl(&ppgtt->vm)) { 888 /* 64b PPGTT (48bit canonical) 889 * PDP0_DESCRIPTOR contains the base address to PML4 and 890 * other PDP Descriptors are ignored. 891 */ 892 ASSIGN_CTX_PML4(ppgtt, regs); 893 } else { 894 ASSIGN_CTX_PDP(ppgtt, regs, 3); 895 ASSIGN_CTX_PDP(ppgtt, regs, 2); 896 ASSIGN_CTX_PDP(ppgtt, regs, 1); 897 ASSIGN_CTX_PDP(ppgtt, regs, 0); 898 } 899 } 900 901 static struct i915_ppgtt *vm_alias(struct i915_address_space *vm) 902 { 903 if (i915_is_ggtt(vm)) 904 return i915_vm_to_ggtt(vm)->alias; 905 else 906 return i915_vm_to_ppgtt(vm); 907 } 908 909 static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine) 910 { 911 int x; 912 913 x = lrc_ring_mi_mode(engine); 914 if (x != -1) { 915 regs[x + 1] &= ~STOP_RING; 916 regs[x + 1] |= STOP_RING << 16; 917 } 918 } 919 920 static void __lrc_init_regs(u32 *regs, 921 const struct intel_context *ce, 922 const struct intel_engine_cs *engine, 923 bool inhibit) 924 { 925 /* 926 * A context is actually a big batch buffer with several 927 * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The 928 * values we are setting here are only for the first context restore: 929 * on a subsequent save, the GPU will recreate this batchbuffer with new 930 * values (including all the missing MI_LOAD_REGISTER_IMM commands that 931 * we are not initializing here). 932 * 933 * Must keep consistent with virtual_update_register_offsets(). 934 */ 935 936 if (inhibit) 937 memset(regs, 0, PAGE_SIZE); 938 939 set_offsets(regs, reg_offsets(engine), engine, inhibit); 940 941 init_common_regs(regs, ce, engine, inhibit); 942 init_ppgtt_regs(regs, vm_alias(ce->vm)); 943 944 init_wa_bb_regs(regs, engine); 945 946 __reset_stop_ring(regs, engine); 947 } 948 949 void lrc_init_regs(const struct intel_context *ce, 950 const struct intel_engine_cs *engine, 951 bool inhibit) 952 { 953 __lrc_init_regs(ce->lrc_reg_state, ce, engine, inhibit); 954 } 955 956 void lrc_reset_regs(const struct intel_context *ce, 957 const struct intel_engine_cs *engine) 958 { 959 __reset_stop_ring(ce->lrc_reg_state, engine); 960 } 961 962 static void 963 set_redzone(void *vaddr, const struct intel_engine_cs *engine) 964 { 965 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 966 return; 967 968 vaddr += engine->context_size; 969 970 memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE); 971 } 972 973 static void 974 check_redzone(const void *vaddr, const struct intel_engine_cs *engine) 975 { 976 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 977 return; 978 979 vaddr += engine->context_size; 980 981 if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE)) 982 drm_err_once(&engine->i915->drm, 983 "%s context redzone overwritten!\n", 984 engine->name); 985 } 986 987 static u32 context_wa_bb_offset(const struct intel_context *ce) 988 { 989 return PAGE_SIZE * ce->wa_bb_page; 990 } 991 992 /* 993 * per_ctx below determines which WABB section is used. 994 * When true, the function returns the location of the 995 * PER_CTX_BB. When false, the function returns the 996 * location of the INDIRECT_CTX. 997 */ 998 static u32 *context_wabb(const struct intel_context *ce, bool per_ctx) 999 { 1000 void *ptr; 1001 1002 GEM_BUG_ON(!ce->wa_bb_page); 1003 1004 ptr = ce->lrc_reg_state; 1005 ptr -= LRC_STATE_OFFSET; /* back to start of context image */ 1006 ptr += context_wa_bb_offset(ce); 1007 ptr += per_ctx ? PAGE_SIZE : 0; 1008 1009 return ptr; 1010 } 1011 1012 void lrc_init_state(struct intel_context *ce, 1013 struct intel_engine_cs *engine, 1014 void *state) 1015 { 1016 bool inhibit = true; 1017 1018 set_redzone(state, engine); 1019 1020 if (ce->default_state) { 1021 shmem_read(ce->default_state, 0, state, engine->context_size); 1022 __set_bit(CONTEXT_VALID_BIT, &ce->flags); 1023 inhibit = false; 1024 } 1025 1026 /* Clear the ppHWSP (inc. per-context counters) */ 1027 memset(state, 0, PAGE_SIZE); 1028 1029 /* Clear the indirect wa and storage */ 1030 if (ce->wa_bb_page) 1031 memset(state + context_wa_bb_offset(ce), 0, PAGE_SIZE); 1032 1033 /* 1034 * The second page of the context object contains some registers which 1035 * must be set up prior to the first execution. 1036 */ 1037 __lrc_init_regs(state + LRC_STATE_OFFSET, ce, engine, inhibit); 1038 } 1039 1040 u32 lrc_indirect_bb(const struct intel_context *ce) 1041 { 1042 return i915_ggtt_offset(ce->state) + context_wa_bb_offset(ce); 1043 } 1044 1045 static u32 *setup_predicate_disable_wa(const struct intel_context *ce, u32 *cs) 1046 { 1047 /* If predication is active, this will be noop'ed */ 1048 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT | (4 - 2); 1049 *cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA; 1050 *cs++ = 0; 1051 *cs++ = 0; /* No predication */ 1052 1053 /* predicated end, only terminates if SET_PREDICATE_RESULT:0 is clear */ 1054 *cs++ = MI_BATCH_BUFFER_END | BIT(15); 1055 *cs++ = MI_SET_PREDICATE | MI_SET_PREDICATE_DISABLE; 1056 1057 /* Instructions are no longer predicated (disabled), we can proceed */ 1058 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT | (4 - 2); 1059 *cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA; 1060 *cs++ = 0; 1061 *cs++ = 1; /* enable predication before the next BB */ 1062 1063 *cs++ = MI_BATCH_BUFFER_END; 1064 GEM_BUG_ON(offset_in_page(cs) > DG2_PREDICATE_RESULT_WA); 1065 1066 return cs; 1067 } 1068 1069 static struct i915_vma * 1070 __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine) 1071 { 1072 struct drm_i915_gem_object *obj; 1073 struct i915_vma *vma; 1074 u32 context_size; 1075 1076 context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); 1077 1078 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 1079 context_size += I915_GTT_PAGE_SIZE; /* for redzone */ 1080 1081 if (GRAPHICS_VER(engine->i915) >= 12) { 1082 ce->wa_bb_page = context_size / PAGE_SIZE; 1083 /* INDIRECT_CTX and PER_CTX_BB need separate pages. */ 1084 context_size += PAGE_SIZE * 2; 1085 } 1086 1087 if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) { 1088 ce->parallel.guc.parent_page = context_size / PAGE_SIZE; 1089 context_size += PARENT_SCRATCH_SIZE; 1090 } 1091 1092 obj = i915_gem_object_create_lmem(engine->i915, context_size, 1093 I915_BO_ALLOC_PM_VOLATILE); 1094 if (IS_ERR(obj)) { 1095 obj = i915_gem_object_create_shmem(engine->i915, context_size); 1096 if (IS_ERR(obj)) 1097 return ERR_CAST(obj); 1098 1099 /* 1100 * Wa_22016122933: For Media version 13.0, all Media GT shared 1101 * memory needs to be mapped as WC on CPU side and UC (PAT 1102 * index 2) on GPU side. 1103 */ 1104 if (intel_gt_needs_wa_22016122933(engine->gt)) 1105 i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); 1106 } 1107 1108 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 1109 if (IS_ERR(vma)) { 1110 i915_gem_object_put(obj); 1111 return vma; 1112 } 1113 1114 return vma; 1115 } 1116 1117 static struct intel_timeline * 1118 pinned_timeline(struct intel_context *ce, struct intel_engine_cs *engine) 1119 { 1120 struct intel_timeline *tl = fetch_and_zero(&ce->timeline); 1121 1122 return intel_timeline_create_from_engine(engine, page_unmask_bits(tl)); 1123 } 1124 1125 int lrc_alloc(struct intel_context *ce, struct intel_engine_cs *engine) 1126 { 1127 struct intel_ring *ring; 1128 struct i915_vma *vma; 1129 int err; 1130 1131 GEM_BUG_ON(ce->state); 1132 1133 if (!intel_context_has_own_state(ce)) 1134 ce->default_state = engine->default_state; 1135 1136 vma = __lrc_alloc_state(ce, engine); 1137 if (IS_ERR(vma)) 1138 return PTR_ERR(vma); 1139 1140 ring = intel_engine_create_ring(engine, ce->ring_size); 1141 if (IS_ERR(ring)) { 1142 err = PTR_ERR(ring); 1143 goto err_vma; 1144 } 1145 1146 if (!page_mask_bits(ce->timeline)) { 1147 struct intel_timeline *tl; 1148 1149 /* 1150 * Use the static global HWSP for the kernel context, and 1151 * a dynamically allocated cacheline for everyone else. 1152 */ 1153 if (unlikely(ce->timeline)) 1154 tl = pinned_timeline(ce, engine); 1155 else 1156 tl = intel_timeline_create(engine->gt); 1157 if (IS_ERR(tl)) { 1158 err = PTR_ERR(tl); 1159 goto err_ring; 1160 } 1161 1162 ce->timeline = tl; 1163 } 1164 1165 ce->ring = ring; 1166 ce->state = vma; 1167 1168 return 0; 1169 1170 err_ring: 1171 intel_ring_put(ring); 1172 err_vma: 1173 i915_vma_put(vma); 1174 return err; 1175 } 1176 1177 void lrc_reset(struct intel_context *ce) 1178 { 1179 GEM_BUG_ON(!intel_context_is_pinned(ce)); 1180 1181 intel_ring_reset(ce->ring, ce->ring->emit); 1182 1183 /* Scrub away the garbage */ 1184 lrc_init_regs(ce, ce->engine, true); 1185 ce->lrc.lrca = lrc_update_regs(ce, ce->engine, ce->ring->tail); 1186 } 1187 1188 int 1189 lrc_pre_pin(struct intel_context *ce, 1190 struct intel_engine_cs *engine, 1191 struct i915_gem_ww_ctx *ww, 1192 void **vaddr) 1193 { 1194 GEM_BUG_ON(!ce->state); 1195 GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); 1196 1197 *vaddr = i915_gem_object_pin_map(ce->state->obj, 1198 intel_gt_coherent_map_type(ce->engine->gt, 1199 ce->state->obj, 1200 false) | 1201 I915_MAP_OVERRIDE); 1202 1203 return PTR_ERR_OR_ZERO(*vaddr); 1204 } 1205 1206 int 1207 lrc_pin(struct intel_context *ce, 1208 struct intel_engine_cs *engine, 1209 void *vaddr) 1210 { 1211 ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET; 1212 1213 if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags)) 1214 lrc_init_state(ce, engine, vaddr); 1215 1216 ce->lrc.lrca = lrc_update_regs(ce, engine, ce->ring->tail); 1217 return 0; 1218 } 1219 1220 void lrc_unpin(struct intel_context *ce) 1221 { 1222 if (unlikely(ce->parallel.last_rq)) { 1223 i915_request_put(ce->parallel.last_rq); 1224 ce->parallel.last_rq = NULL; 1225 } 1226 check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET, 1227 ce->engine); 1228 } 1229 1230 void lrc_post_unpin(struct intel_context *ce) 1231 { 1232 i915_gem_object_unpin_map(ce->state->obj); 1233 } 1234 1235 void lrc_fini(struct intel_context *ce) 1236 { 1237 if (!ce->state) 1238 return; 1239 1240 intel_ring_put(fetch_and_zero(&ce->ring)); 1241 i915_vma_put(fetch_and_zero(&ce->state)); 1242 } 1243 1244 void lrc_destroy(struct kref *kref) 1245 { 1246 struct intel_context *ce = container_of(kref, typeof(*ce), ref); 1247 1248 GEM_BUG_ON(!i915_active_is_idle(&ce->active)); 1249 GEM_BUG_ON(intel_context_is_pinned(ce)); 1250 1251 lrc_fini(ce); 1252 1253 intel_context_fini(ce); 1254 intel_context_free(ce); 1255 } 1256 1257 static u32 * 1258 gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs) 1259 { 1260 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | 1261 MI_SRM_LRM_GLOBAL_GTT | 1262 MI_LRI_LRM_CS_MMIO; 1263 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 1264 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + 1265 CTX_TIMESTAMP * sizeof(u32); 1266 *cs++ = 0; 1267 1268 *cs++ = MI_LOAD_REGISTER_REG | 1269 MI_LRR_SOURCE_CS_MMIO | 1270 MI_LRI_LRM_CS_MMIO; 1271 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 1272 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0)); 1273 1274 *cs++ = MI_LOAD_REGISTER_REG | 1275 MI_LRR_SOURCE_CS_MMIO | 1276 MI_LRI_LRM_CS_MMIO; 1277 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 1278 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0)); 1279 1280 return cs; 1281 } 1282 1283 static u32 * 1284 gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs) 1285 { 1286 GEM_BUG_ON(lrc_ring_gpr0(ce->engine) == -1); 1287 1288 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | 1289 MI_SRM_LRM_GLOBAL_GTT | 1290 MI_LRI_LRM_CS_MMIO; 1291 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 1292 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + 1293 (lrc_ring_gpr0(ce->engine) + 1) * sizeof(u32); 1294 *cs++ = 0; 1295 1296 return cs; 1297 } 1298 1299 static u32 * 1300 gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs) 1301 { 1302 GEM_BUG_ON(lrc_ring_cmd_buf_cctl(ce->engine) == -1); 1303 1304 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | 1305 MI_SRM_LRM_GLOBAL_GTT | 1306 MI_LRI_LRM_CS_MMIO; 1307 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 1308 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + 1309 (lrc_ring_cmd_buf_cctl(ce->engine) + 1) * sizeof(u32); 1310 *cs++ = 0; 1311 1312 *cs++ = MI_LOAD_REGISTER_REG | 1313 MI_LRR_SOURCE_CS_MMIO | 1314 MI_LRI_LRM_CS_MMIO; 1315 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 1316 *cs++ = i915_mmio_reg_offset(RING_CMD_BUF_CCTL(0)); 1317 1318 return cs; 1319 } 1320 1321 /* 1322 * The bspec's tuning guide asks us to program a vertical watermark value of 1323 * 0x3FF. However this register is not saved/restored properly by the 1324 * hardware, so we're required to apply the desired value via INDIRECT_CTX 1325 * batch buffer to ensure the value takes effect properly. All other bits 1326 * in this register should remain at 0 (the hardware default). 1327 */ 1328 static u32 * 1329 dg2_emit_draw_watermark_setting(u32 *cs) 1330 { 1331 *cs++ = MI_LOAD_REGISTER_IMM(1); 1332 *cs++ = i915_mmio_reg_offset(DRAW_WATERMARK); 1333 *cs++ = REG_FIELD_PREP(VERT_WM_VAL, 0x3FF); 1334 1335 return cs; 1336 } 1337 1338 static u32 * 1339 gen12_invalidate_state_cache(u32 *cs) 1340 { 1341 *cs++ = MI_LOAD_REGISTER_IMM(1); 1342 *cs++ = i915_mmio_reg_offset(GEN12_CS_DEBUG_MODE2); 1343 *cs++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE); 1344 return cs; 1345 } 1346 1347 static u32 * 1348 gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) 1349 { 1350 cs = gen12_emit_timestamp_wa(ce, cs); 1351 cs = gen12_emit_cmd_buf_wa(ce, cs); 1352 cs = gen12_emit_restore_scratch(ce, cs); 1353 1354 /* Wa_16013000631:dg2 */ 1355 if (IS_DG2_G11(ce->engine->i915)) 1356 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); 1357 1358 cs = gen12_emit_aux_table_inv(ce->engine, cs); 1359 1360 /* Wa_18022495364 */ 1361 if (IS_GFX_GT_IP_RANGE(ce->engine->gt, IP_VER(12, 0), IP_VER(12, 10))) 1362 cs = gen12_invalidate_state_cache(cs); 1363 1364 /* Wa_16014892111 */ 1365 if (IS_GFX_GT_IP_STEP(ce->engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) || 1366 IS_GFX_GT_IP_STEP(ce->engine->gt, IP_VER(12, 71), STEP_A0, STEP_B0) || 1367 IS_DG2(ce->engine->i915)) 1368 cs = dg2_emit_draw_watermark_setting(cs); 1369 1370 return cs; 1371 } 1372 1373 static u32 * 1374 gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) 1375 { 1376 cs = gen12_emit_timestamp_wa(ce, cs); 1377 cs = gen12_emit_restore_scratch(ce, cs); 1378 1379 /* Wa_16013000631:dg2 */ 1380 if (IS_DG2_G11(ce->engine->i915)) 1381 if (ce->engine->class == COMPUTE_CLASS) 1382 cs = gen8_emit_pipe_control(cs, 1383 PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 1384 0); 1385 1386 return gen12_emit_aux_table_inv(ce->engine, cs); 1387 } 1388 1389 static u32 *xehp_emit_fastcolor_blt_wabb(const struct intel_context *ce, u32 *cs) 1390 { 1391 struct intel_gt *gt = ce->engine->gt; 1392 int mocs = gt->mocs.uc_index << 1; 1393 1394 /** 1395 * Wa_16018031267 / Wa_16018063123 requires that SW forces the 1396 * main copy engine arbitration into round robin mode. We 1397 * additionally need to submit the following WABB blt command 1398 * to produce 4 subblits with each subblit generating 0 byte 1399 * write requests as WABB: 1400 * 1401 * XY_FASTCOLOR_BLT 1402 * BG0 -> 5100000E 1403 * BG1 -> 0000003F (Dest pitch) 1404 * BG2 -> 00000000 (X1, Y1) = (0, 0) 1405 * BG3 -> 00040001 (X2, Y2) = (1, 4) 1406 * BG4 -> scratch 1407 * BG5 -> scratch 1408 * BG6-12 -> 00000000 1409 * BG13 -> 20004004 (Surf. Width= 2,Surf. Height = 5 ) 1410 * BG14 -> 00000010 (Qpitch = 4) 1411 * BG15 -> 00000000 1412 */ 1413 *cs++ = XY_FAST_COLOR_BLT_CMD | (16 - 2); 1414 *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) | 0x3f; 1415 *cs++ = 0; 1416 *cs++ = 4 << 16 | 1; 1417 *cs++ = lower_32_bits(i915_vma_offset(ce->vm->rsvd.vma)); 1418 *cs++ = upper_32_bits(i915_vma_offset(ce->vm->rsvd.vma)); 1419 *cs++ = 0; 1420 *cs++ = 0; 1421 *cs++ = 0; 1422 *cs++ = 0; 1423 *cs++ = 0; 1424 *cs++ = 0; 1425 *cs++ = 0; 1426 *cs++ = 0x20004004; 1427 *cs++ = 0x10; 1428 *cs++ = 0; 1429 1430 return cs; 1431 } 1432 1433 static u32 * 1434 xehp_emit_per_ctx_bb(const struct intel_context *ce, u32 *cs) 1435 { 1436 /* Wa_16018031267, Wa_16018063123 */ 1437 if (NEEDS_FASTCOLOR_BLT_WABB(ce->engine)) 1438 cs = xehp_emit_fastcolor_blt_wabb(ce, cs); 1439 1440 return cs; 1441 } 1442 1443 static void 1444 setup_per_ctx_bb(const struct intel_context *ce, 1445 const struct intel_engine_cs *engine, 1446 u32 *(*emit)(const struct intel_context *, u32 *)) 1447 { 1448 /* Place PER_CTX_BB on next page after INDIRECT_CTX */ 1449 u32 * const start = context_wabb(ce, true); 1450 u32 *cs; 1451 1452 cs = emit(ce, start); 1453 1454 /* PER_CTX_BB must manually terminate */ 1455 *cs++ = MI_BATCH_BUFFER_END; 1456 1457 GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs)); 1458 lrc_setup_bb_per_ctx(ce->lrc_reg_state, engine, 1459 lrc_indirect_bb(ce) + PAGE_SIZE); 1460 } 1461 1462 static void 1463 setup_indirect_ctx_bb(const struct intel_context *ce, 1464 const struct intel_engine_cs *engine, 1465 u32 *(*emit)(const struct intel_context *, u32 *)) 1466 { 1467 u32 * const start = context_wabb(ce, false); 1468 u32 *cs; 1469 1470 cs = emit(ce, start); 1471 GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs)); 1472 while ((unsigned long)cs % CACHELINE_BYTES) 1473 *cs++ = MI_NOOP; 1474 1475 GEM_BUG_ON(cs - start > DG2_PREDICATE_RESULT_BB / sizeof(*start)); 1476 setup_predicate_disable_wa(ce, start + DG2_PREDICATE_RESULT_BB / sizeof(*start)); 1477 1478 lrc_setup_indirect_ctx(ce->lrc_reg_state, engine, 1479 lrc_indirect_bb(ce), 1480 (cs - start) * sizeof(*cs)); 1481 } 1482 1483 /* 1484 * The context descriptor encodes various attributes of a context, 1485 * including its GTT address and some flags. Because it's fairly 1486 * expensive to calculate, we'll just do it once and cache the result, 1487 * which remains valid until the context is unpinned. 1488 * 1489 * This is what a descriptor looks like, from LSB to MSB:: 1490 * 1491 * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template) 1492 * bits 12-31: LRCA, GTT address of (the HWSP of) this context 1493 * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC) 1494 * bits 53-54: mbz, reserved for use by hardware 1495 * bits 55-63: group ID, currently unused and set to 0 1496 * 1497 * Starting from Gen11, the upper dword of the descriptor has a new format: 1498 * 1499 * bits 32-36: reserved 1500 * bits 37-47: SW context ID 1501 * bits 48:53: engine instance 1502 * bit 54: mbz, reserved for use by hardware 1503 * bits 55-60: SW counter 1504 * bits 61-63: engine class 1505 * 1506 * On Xe_HP, the upper dword of the descriptor has a new format: 1507 * 1508 * bits 32-37: virtual function number 1509 * bit 38: mbz, reserved for use by hardware 1510 * bits 39-54: SW context ID 1511 * bits 55-57: reserved 1512 * bits 58-63: SW counter 1513 * 1514 * engine info, SW context ID and SW counter need to form a unique number 1515 * (Context ID) per lrc. 1516 */ 1517 static u32 lrc_descriptor(const struct intel_context *ce) 1518 { 1519 u32 desc; 1520 1521 desc = INTEL_LEGACY_32B_CONTEXT; 1522 if (i915_vm_is_4lvl(ce->vm)) 1523 desc = INTEL_LEGACY_64B_CONTEXT; 1524 desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT; 1525 1526 desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; 1527 if (GRAPHICS_VER(ce->vm->i915) == 8) 1528 desc |= GEN8_CTX_L3LLC_COHERENT; 1529 1530 return i915_ggtt_offset(ce->state) | desc; 1531 } 1532 1533 u32 lrc_update_regs(const struct intel_context *ce, 1534 const struct intel_engine_cs *engine, 1535 u32 head) 1536 { 1537 struct intel_ring *ring = ce->ring; 1538 u32 *regs = ce->lrc_reg_state; 1539 1540 GEM_BUG_ON(!intel_ring_offset_valid(ring, head)); 1541 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 1542 1543 regs[CTX_RING_START] = i915_ggtt_offset(ring->vma); 1544 regs[CTX_RING_HEAD] = head; 1545 regs[CTX_RING_TAIL] = ring->tail; 1546 regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; 1547 1548 /* RPCS */ 1549 if (engine->class == RENDER_CLASS) { 1550 regs[CTX_R_PWR_CLK_STATE] = 1551 intel_sseu_make_rpcs(engine->gt, &ce->sseu); 1552 1553 i915_oa_init_reg_state(ce, engine); 1554 } 1555 1556 if (ce->wa_bb_page) { 1557 u32 *(*fn)(const struct intel_context *ce, u32 *cs); 1558 1559 fn = gen12_emit_indirect_ctx_xcs; 1560 if (ce->engine->class == RENDER_CLASS) 1561 fn = gen12_emit_indirect_ctx_rcs; 1562 1563 /* Mutually exclusive wrt to global indirect bb */ 1564 GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size); 1565 setup_indirect_ctx_bb(ce, engine, fn); 1566 setup_per_ctx_bb(ce, engine, xehp_emit_per_ctx_bb); 1567 } 1568 1569 return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE; 1570 } 1571 1572 void lrc_update_offsets(struct intel_context *ce, 1573 struct intel_engine_cs *engine) 1574 { 1575 set_offsets(ce->lrc_reg_state, reg_offsets(engine), engine, false); 1576 } 1577 1578 void lrc_check_regs(const struct intel_context *ce, 1579 const struct intel_engine_cs *engine, 1580 const char *when) 1581 { 1582 const struct intel_ring *ring = ce->ring; 1583 u32 *regs = ce->lrc_reg_state; 1584 bool valid = true; 1585 int x; 1586 1587 if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) { 1588 pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n", 1589 engine->name, 1590 regs[CTX_RING_START], 1591 i915_ggtt_offset(ring->vma)); 1592 regs[CTX_RING_START] = i915_ggtt_offset(ring->vma); 1593 valid = false; 1594 } 1595 1596 if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) != 1597 (RING_CTL_SIZE(ring->size) | RING_VALID)) { 1598 pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n", 1599 engine->name, 1600 regs[CTX_RING_CTL], 1601 (u32)(RING_CTL_SIZE(ring->size) | RING_VALID)); 1602 regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; 1603 valid = false; 1604 } 1605 1606 x = lrc_ring_mi_mode(engine); 1607 if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) { 1608 pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n", 1609 engine->name, regs[x + 1]); 1610 regs[x + 1] &= ~STOP_RING; 1611 regs[x + 1] |= STOP_RING << 16; 1612 valid = false; 1613 } 1614 1615 WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when); 1616 } 1617 1618 /* 1619 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after 1620 * PIPE_CONTROL instruction. This is required for the flush to happen correctly 1621 * but there is a slight complication as this is applied in WA batch where the 1622 * values are only initialized once so we cannot take register value at the 1623 * beginning and reuse it further; hence we save its value to memory, upload a 1624 * constant value with bit21 set and then we restore it back with the saved value. 1625 * To simplify the WA, a constant value is formed by using the default value 1626 * of this register. This shouldn't be a problem because we are only modifying 1627 * it for a short period and this batch in non-premptible. We can ofcourse 1628 * use additional instructions that read the actual value of the register 1629 * at that time and set our bit of interest but it makes the WA complicated. 1630 * 1631 * This WA is also required for Gen9 so extracting as a function avoids 1632 * code duplication. 1633 */ 1634 static u32 * 1635 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) 1636 { 1637 /* NB no one else is allowed to scribble over scratch + 256! */ 1638 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; 1639 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 1640 *batch++ = intel_gt_scratch_offset(engine->gt, 1641 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA); 1642 *batch++ = 0; 1643 1644 *batch++ = MI_LOAD_REGISTER_IMM(1); 1645 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 1646 *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES; 1647 1648 batch = gen8_emit_pipe_control(batch, 1649 PIPE_CONTROL_CS_STALL | 1650 PIPE_CONTROL_DC_FLUSH_ENABLE, 1651 0); 1652 1653 *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; 1654 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 1655 *batch++ = intel_gt_scratch_offset(engine->gt, 1656 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA); 1657 *batch++ = 0; 1658 1659 return batch; 1660 } 1661 1662 /* 1663 * Typically we only have one indirect_ctx and per_ctx batch buffer which are 1664 * initialized at the beginning and shared across all contexts but this field 1665 * helps us to have multiple batches at different offsets and select them based 1666 * on a criteria. At the moment this batch always start at the beginning of the page 1667 * and at this point we don't have multiple wa_ctx batch buffers. 1668 * 1669 * The number of WA applied are not known at the beginning; we use this field 1670 * to return the no of DWORDS written. 1671 * 1672 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END 1673 * so it adds NOOPs as padding to make it cacheline aligned. 1674 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together 1675 * makes a complete batch buffer. 1676 */ 1677 static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) 1678 { 1679 /* WaDisableCtxRestoreArbitration:bdw,chv */ 1680 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 1681 1682 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 1683 if (IS_BROADWELL(engine->i915)) 1684 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 1685 1686 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ 1687 /* Actual scratch location is at 128 bytes offset */ 1688 batch = gen8_emit_pipe_control(batch, 1689 PIPE_CONTROL_FLUSH_L3 | 1690 PIPE_CONTROL_STORE_DATA_INDEX | 1691 PIPE_CONTROL_CS_STALL | 1692 PIPE_CONTROL_QW_WRITE, 1693 LRC_PPHWSP_SCRATCH_ADDR); 1694 1695 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 1696 1697 /* Pad to end of cacheline */ 1698 while ((unsigned long)batch % CACHELINE_BYTES) 1699 *batch++ = MI_NOOP; 1700 1701 /* 1702 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because 1703 * execution depends on the length specified in terms of cache lines 1704 * in the register CTX_RCS_INDIRECT_CTX 1705 */ 1706 1707 return batch; 1708 } 1709 1710 struct lri { 1711 i915_reg_t reg; 1712 u32 value; 1713 }; 1714 1715 static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count) 1716 { 1717 GEM_BUG_ON(!count || count > 63); 1718 1719 *batch++ = MI_LOAD_REGISTER_IMM(count); 1720 do { 1721 *batch++ = i915_mmio_reg_offset(lri->reg); 1722 *batch++ = lri->value; 1723 } while (lri++, --count); 1724 *batch++ = MI_NOOP; 1725 1726 return batch; 1727 } 1728 1729 static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) 1730 { 1731 static const struct lri lri[] = { 1732 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ 1733 { 1734 COMMON_SLICE_CHICKEN2, 1735 __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE, 1736 0), 1737 }, 1738 1739 /* BSpec: 11391 */ 1740 { 1741 FF_SLICE_CHICKEN, 1742 __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX, 1743 FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX), 1744 }, 1745 1746 /* BSpec: 11299 */ 1747 { 1748 _3D_CHICKEN3, 1749 __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX, 1750 _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX), 1751 } 1752 }; 1753 1754 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 1755 1756 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ 1757 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 1758 1759 /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */ 1760 batch = gen8_emit_pipe_control(batch, 1761 PIPE_CONTROL_FLUSH_L3 | 1762 PIPE_CONTROL_STORE_DATA_INDEX | 1763 PIPE_CONTROL_CS_STALL | 1764 PIPE_CONTROL_QW_WRITE, 1765 LRC_PPHWSP_SCRATCH_ADDR); 1766 1767 batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); 1768 1769 /* WaMediaPoolStateCmdInWABB:bxt,glk */ 1770 if (HAS_POOLED_EU(engine->i915)) { 1771 /* 1772 * EU pool configuration is setup along with golden context 1773 * during context initialization. This value depends on 1774 * device type (2x6 or 3x6) and needs to be updated based 1775 * on which subslice is disabled especially for 2x6 1776 * devices, however it is safe to load default 1777 * configuration of 3x6 device instead of masking off 1778 * corresponding bits because HW ignores bits of a disabled 1779 * subslice and drops down to appropriate config. Please 1780 * see render_state_setup() in i915_gem_render_state.c for 1781 * possible configurations, to avoid duplication they are 1782 * not shown here again. 1783 */ 1784 *batch++ = GEN9_MEDIA_POOL_STATE; 1785 *batch++ = GEN9_MEDIA_POOL_ENABLE; 1786 *batch++ = 0x00777000; 1787 *batch++ = 0; 1788 *batch++ = 0; 1789 *batch++ = 0; 1790 } 1791 1792 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 1793 1794 /* Pad to end of cacheline */ 1795 while ((unsigned long)batch % CACHELINE_BYTES) 1796 *batch++ = MI_NOOP; 1797 1798 return batch; 1799 } 1800 1801 #define CTX_WA_BB_SIZE (PAGE_SIZE) 1802 1803 static int lrc_create_wa_ctx(struct intel_engine_cs *engine) 1804 { 1805 struct drm_i915_gem_object *obj; 1806 struct i915_vma *vma; 1807 int err; 1808 1809 obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_SIZE); 1810 if (IS_ERR(obj)) 1811 return PTR_ERR(obj); 1812 1813 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 1814 if (IS_ERR(vma)) { 1815 err = PTR_ERR(vma); 1816 goto err; 1817 } 1818 1819 engine->wa_ctx.vma = vma; 1820 return 0; 1821 1822 err: 1823 i915_gem_object_put(obj); 1824 return err; 1825 } 1826 1827 void lrc_fini_wa_ctx(struct intel_engine_cs *engine) 1828 { 1829 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); 1830 } 1831 1832 typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch); 1833 1834 void lrc_init_wa_ctx(struct intel_engine_cs *engine) 1835 { 1836 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; 1837 struct i915_wa_ctx_bb *wa_bb[] = { 1838 &wa_ctx->indirect_ctx, &wa_ctx->per_ctx 1839 }; 1840 wa_bb_func_t wa_bb_fn[ARRAY_SIZE(wa_bb)]; 1841 struct i915_gem_ww_ctx ww; 1842 void *batch, *batch_ptr; 1843 unsigned int i; 1844 int err; 1845 1846 if (GRAPHICS_VER(engine->i915) >= 11 || 1847 !(engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)) 1848 return; 1849 1850 if (GRAPHICS_VER(engine->i915) == 9) { 1851 wa_bb_fn[0] = gen9_init_indirectctx_bb; 1852 wa_bb_fn[1] = NULL; 1853 } else if (GRAPHICS_VER(engine->i915) == 8) { 1854 wa_bb_fn[0] = gen8_init_indirectctx_bb; 1855 wa_bb_fn[1] = NULL; 1856 } 1857 1858 err = lrc_create_wa_ctx(engine); 1859 if (err) { 1860 /* 1861 * We continue even if we fail to initialize WA batch 1862 * because we only expect rare glitches but nothing 1863 * critical to prevent us from using GPU 1864 */ 1865 drm_err(&engine->i915->drm, 1866 "Ignoring context switch w/a allocation error:%d\n", 1867 err); 1868 return; 1869 } 1870 1871 if (!engine->wa_ctx.vma) 1872 return; 1873 1874 i915_gem_ww_ctx_init(&ww, true); 1875 retry: 1876 err = i915_gem_object_lock(wa_ctx->vma->obj, &ww); 1877 if (!err) 1878 err = i915_ggtt_pin(wa_ctx->vma, &ww, 0, PIN_HIGH); 1879 if (err) 1880 goto err; 1881 1882 batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB); 1883 if (IS_ERR(batch)) { 1884 err = PTR_ERR(batch); 1885 goto err_unpin; 1886 } 1887 1888 /* 1889 * Emit the two workaround batch buffers, recording the offset from the 1890 * start of the workaround batch buffer object for each and their 1891 * respective sizes. 1892 */ 1893 batch_ptr = batch; 1894 for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) { 1895 wa_bb[i]->offset = batch_ptr - batch; 1896 if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, 1897 CACHELINE_BYTES))) { 1898 err = -EINVAL; 1899 break; 1900 } 1901 if (wa_bb_fn[i]) 1902 batch_ptr = wa_bb_fn[i](engine, batch_ptr); 1903 wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset); 1904 } 1905 GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_SIZE); 1906 1907 __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch); 1908 __i915_gem_object_release_map(wa_ctx->vma->obj); 1909 1910 /* Verify that we can handle failure to setup the wa_ctx */ 1911 if (!err) 1912 err = i915_inject_probe_error(engine->i915, -ENODEV); 1913 1914 err_unpin: 1915 if (err) 1916 i915_vma_unpin(wa_ctx->vma); 1917 err: 1918 if (err == -EDEADLK) { 1919 err = i915_gem_ww_ctx_backoff(&ww); 1920 if (!err) 1921 goto retry; 1922 } 1923 i915_gem_ww_ctx_fini(&ww); 1924 1925 if (err) { 1926 i915_vma_put(engine->wa_ctx.vma); 1927 1928 /* Clear all flags to prevent further use */ 1929 memset(wa_ctx, 0, sizeof(*wa_ctx)); 1930 } 1931 } 1932 1933 static void st_runtime_underflow(struct intel_context_stats *stats, s32 dt) 1934 { 1935 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1936 stats->runtime.num_underflow++; 1937 stats->runtime.max_underflow = 1938 max_t(u32, stats->runtime.max_underflow, -dt); 1939 #endif 1940 } 1941 1942 static u32 lrc_get_runtime(const struct intel_context *ce) 1943 { 1944 /* 1945 * We can use either ppHWSP[16] which is recorded before the context 1946 * switch (and so excludes the cost of context switches) or use the 1947 * value from the context image itself, which is saved/restored earlier 1948 * and so includes the cost of the save. 1949 */ 1950 return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]); 1951 } 1952 1953 void lrc_update_runtime(struct intel_context *ce) 1954 { 1955 struct intel_context_stats *stats = &ce->stats; 1956 u32 old; 1957 s32 dt; 1958 1959 old = stats->runtime.last; 1960 stats->runtime.last = lrc_get_runtime(ce); 1961 dt = stats->runtime.last - old; 1962 if (!dt) 1963 return; 1964 1965 if (unlikely(dt < 0)) { 1966 CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n", 1967 old, stats->runtime.last, dt); 1968 st_runtime_underflow(stats, dt); 1969 return; 1970 } 1971 1972 ewma_runtime_add(&stats->runtime.avg, dt); 1973 stats->runtime.total += dt; 1974 } 1975 1976 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1977 #include "selftest_lrc.c" 1978 #endif 1979