1 /* 2 * Copyright (c) 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * Mika Kuoppala <mika.kuoppala@intel.com> 27 * 28 */ 29 30 #include <linux/ascii85.h> 31 #include <linux/highmem.h> 32 #include <linux/nmi.h> 33 #include <linux/pagevec.h> 34 #include <linux/scatterlist.h> 35 #include <linux/string_helpers.h> 36 #include <linux/utsname.h> 37 #include <linux/zlib.h> 38 39 #include <drm/drm_cache.h> 40 #include <drm/drm_print.h> 41 42 #include "display/intel_dmc.h" 43 #include "display/intel_overlay.h" 44 45 #include "gem/i915_gem_context.h" 46 #include "gem/i915_gem_lmem.h" 47 #include "gt/intel_engine_regs.h" 48 #include "gt/intel_gt.h" 49 #include "gt/intel_gt_mcr.h" 50 #include "gt/intel_gt_pm.h" 51 #include "gt/intel_gt_regs.h" 52 #include "gt/uc/intel_guc_capture.h" 53 54 #include "i915_driver.h" 55 #include "i915_drv.h" 56 #include "i915_gpu_error.h" 57 #include "i915_memcpy.h" 58 #include "i915_reg.h" 59 #include "i915_scatterlist.h" 60 #include "i915_sysfs.h" 61 #include "i915_utils.h" 62 63 #define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) 64 #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN) 65 66 static void __sg_set_buf(struct scatterlist *sg, 67 void *addr, unsigned int len, loff_t it) 68 { 69 sg->page_link = (unsigned long)virt_to_page(addr); 70 sg->offset = offset_in_page(addr); 71 sg->length = len; 72 sg->dma_address = it; 73 } 74 75 static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) 76 { 77 if (!len) 78 return false; 79 80 if (e->bytes + len + 1 <= e->size) 81 return true; 82 83 if (e->bytes) { 84 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter); 85 e->iter += e->bytes; 86 e->buf = NULL; 87 e->bytes = 0; 88 } 89 90 if (e->cur == e->end) { 91 struct scatterlist *sgl; 92 93 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL); 94 if (!sgl) { 95 e->err = -ENOMEM; 96 return false; 97 } 98 99 if (e->cur) { 100 e->cur->offset = 0; 101 e->cur->length = 0; 102 e->cur->page_link = 103 (unsigned long)sgl | SG_CHAIN; 104 } else { 105 e->sgl = sgl; 106 } 107 108 e->cur = sgl; 109 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1; 110 } 111 112 e->size = ALIGN(len + 1, SZ_64K); 113 e->buf = kmalloc(e->size, ALLOW_FAIL); 114 if (!e->buf) { 115 e->size = PAGE_ALIGN(len + 1); 116 e->buf = kmalloc(e->size, GFP_KERNEL); 117 } 118 if (!e->buf) { 119 e->err = -ENOMEM; 120 return false; 121 } 122 123 return true; 124 } 125 126 __printf(2, 0) 127 static void i915_error_vprintf(struct drm_i915_error_state_buf *e, 128 const char *fmt, va_list args) 129 { 130 va_list ap; 131 int len; 132 133 if (e->err) 134 return; 135 136 va_copy(ap, args); 137 len = vsnprintf(NULL, 0, fmt, ap); 138 va_end(ap); 139 if (len <= 0) { 140 e->err = len; 141 return; 142 } 143 144 if (!__i915_error_grow(e, len)) 145 return; 146 147 GEM_BUG_ON(e->bytes >= e->size); 148 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args); 149 if (len < 0) { 150 e->err = len; 151 return; 152 } 153 e->bytes += len; 154 } 155 156 static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str) 157 { 158 unsigned len; 159 160 if (e->err || !str) 161 return; 162 163 len = strlen(str); 164 if (!__i915_error_grow(e, len)) 165 return; 166 167 GEM_BUG_ON(e->bytes + len > e->size); 168 memcpy(e->buf + e->bytes, str, len); 169 e->bytes += len; 170 } 171 172 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 173 #define err_puts(e, s) i915_error_puts(e, s) 174 175 static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf) 176 { 177 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va); 178 } 179 180 static inline struct drm_printer 181 i915_error_printer(struct drm_i915_error_state_buf *e) 182 { 183 struct drm_printer p = { 184 .printfn = __i915_printfn_error, 185 .arg = e, 186 }; 187 return p; 188 } 189 190 /* single threaded page allocator with a reserved stash for emergencies */ 191 static void pool_fini(struct folio_batch *fbatch) 192 { 193 folio_batch_release(fbatch); 194 } 195 196 static int pool_refill(struct folio_batch *fbatch, gfp_t gfp) 197 { 198 while (folio_batch_space(fbatch)) { 199 struct folio *folio; 200 201 folio = folio_alloc(gfp, 0); 202 if (!folio) 203 return -ENOMEM; 204 205 folio_batch_add(fbatch, folio); 206 } 207 208 return 0; 209 } 210 211 static int pool_init(struct folio_batch *fbatch, gfp_t gfp) 212 { 213 int err; 214 215 folio_batch_init(fbatch); 216 217 err = pool_refill(fbatch, gfp); 218 if (err) 219 pool_fini(fbatch); 220 221 return err; 222 } 223 224 static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp) 225 { 226 struct folio *folio; 227 228 folio = folio_alloc(gfp, 0); 229 if (!folio && folio_batch_count(fbatch)) 230 folio = fbatch->folios[--fbatch->nr]; 231 232 return folio ? folio_address(folio) : NULL; 233 } 234 235 static void pool_free(struct folio_batch *fbatch, void *addr) 236 { 237 struct folio *folio = virt_to_folio(addr); 238 239 if (folio_batch_space(fbatch)) 240 folio_batch_add(fbatch, folio); 241 else 242 folio_put(folio); 243 } 244 245 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR 246 247 struct i915_vma_compress { 248 struct folio_batch pool; 249 struct z_stream_s zstream; 250 void *tmp; 251 }; 252 253 static bool compress_init(struct i915_vma_compress *c) 254 { 255 struct z_stream_s *zstream = &c->zstream; 256 257 if (pool_init(&c->pool, ALLOW_FAIL)) 258 return false; 259 260 zstream->workspace = 261 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), 262 ALLOW_FAIL); 263 if (!zstream->workspace) { 264 pool_fini(&c->pool); 265 return false; 266 } 267 268 c->tmp = NULL; 269 if (i915_has_memcpy_from_wc()) 270 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL); 271 272 return true; 273 } 274 275 static bool compress_start(struct i915_vma_compress *c) 276 { 277 struct z_stream_s *zstream = &c->zstream; 278 void *workspace = zstream->workspace; 279 280 memset(zstream, 0, sizeof(*zstream)); 281 zstream->workspace = workspace; 282 283 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK; 284 } 285 286 static void *compress_next_page(struct i915_vma_compress *c, 287 struct i915_vma_coredump *dst) 288 { 289 void *page_addr; 290 struct page *page; 291 292 page_addr = pool_alloc(&c->pool, ALLOW_FAIL); 293 if (!page_addr) 294 return ERR_PTR(-ENOMEM); 295 296 page = virt_to_page(page_addr); 297 list_add_tail(&page->lru, &dst->page_list); 298 return page_addr; 299 } 300 301 static int compress_page(struct i915_vma_compress *c, 302 void *src, 303 struct i915_vma_coredump *dst, 304 bool wc) 305 { 306 struct z_stream_s *zstream = &c->zstream; 307 308 zstream->next_in = src; 309 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) 310 zstream->next_in = c->tmp; 311 zstream->avail_in = PAGE_SIZE; 312 313 do { 314 if (zstream->avail_out == 0) { 315 zstream->next_out = compress_next_page(c, dst); 316 if (IS_ERR(zstream->next_out)) 317 return PTR_ERR(zstream->next_out); 318 319 zstream->avail_out = PAGE_SIZE; 320 } 321 322 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK) 323 return -EIO; 324 325 cond_resched(); 326 } while (zstream->avail_in); 327 328 /* Fallback to uncompressed if we increase size? */ 329 if (0 && zstream->total_out > zstream->total_in) 330 return -E2BIG; 331 332 return 0; 333 } 334 335 static int compress_flush(struct i915_vma_compress *c, 336 struct i915_vma_coredump *dst) 337 { 338 struct z_stream_s *zstream = &c->zstream; 339 340 do { 341 switch (zlib_deflate(zstream, Z_FINISH)) { 342 case Z_OK: /* more space requested */ 343 zstream->next_out = compress_next_page(c, dst); 344 if (IS_ERR(zstream->next_out)) 345 return PTR_ERR(zstream->next_out); 346 347 zstream->avail_out = PAGE_SIZE; 348 break; 349 350 case Z_STREAM_END: 351 goto end; 352 353 default: /* any error */ 354 return -EIO; 355 } 356 } while (1); 357 358 end: 359 memset(zstream->next_out, 0, zstream->avail_out); 360 dst->unused = zstream->avail_out; 361 return 0; 362 } 363 364 static void compress_finish(struct i915_vma_compress *c) 365 { 366 zlib_deflateEnd(&c->zstream); 367 } 368 369 static void compress_fini(struct i915_vma_compress *c) 370 { 371 kfree(c->zstream.workspace); 372 if (c->tmp) 373 pool_free(&c->pool, c->tmp); 374 pool_fini(&c->pool); 375 } 376 377 static void err_compression_marker(struct drm_i915_error_state_buf *m) 378 { 379 err_puts(m, ":"); 380 } 381 382 #else 383 384 struct i915_vma_compress { 385 struct folio_batch pool; 386 }; 387 388 static bool compress_init(struct i915_vma_compress *c) 389 { 390 return pool_init(&c->pool, ALLOW_FAIL) == 0; 391 } 392 393 static bool compress_start(struct i915_vma_compress *c) 394 { 395 return true; 396 } 397 398 static int compress_page(struct i915_vma_compress *c, 399 void *src, 400 struct i915_vma_coredump *dst, 401 bool wc) 402 { 403 void *ptr; 404 405 ptr = pool_alloc(&c->pool, ALLOW_FAIL); 406 if (!ptr) 407 return -ENOMEM; 408 409 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) 410 memcpy(ptr, src, PAGE_SIZE); 411 list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list); 412 cond_resched(); 413 414 return 0; 415 } 416 417 static int compress_flush(struct i915_vma_compress *c, 418 struct i915_vma_coredump *dst) 419 { 420 return 0; 421 } 422 423 static void compress_finish(struct i915_vma_compress *c) 424 { 425 } 426 427 static void compress_fini(struct i915_vma_compress *c) 428 { 429 pool_fini(&c->pool); 430 } 431 432 static void err_compression_marker(struct drm_i915_error_state_buf *m) 433 { 434 err_puts(m, "~"); 435 } 436 437 #endif 438 439 static void error_print_instdone(struct drm_i915_error_state_buf *m, 440 const struct intel_engine_coredump *ee) 441 { 442 int slice; 443 int subslice; 444 int iter; 445 446 err_printf(m, " INSTDONE: 0x%08x\n", 447 ee->instdone.instdone); 448 449 if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3) 450 return; 451 452 err_printf(m, " SC_INSTDONE: 0x%08x\n", 453 ee->instdone.slice_common); 454 455 if (GRAPHICS_VER(m->i915) <= 6) 456 return; 457 458 for_each_ss_steering(iter, ee->engine->gt, slice, subslice) 459 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 460 slice, subslice, 461 ee->instdone.sampler[slice][subslice]); 462 463 for_each_ss_steering(iter, ee->engine->gt, slice, subslice) 464 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", 465 slice, subslice, 466 ee->instdone.row[slice][subslice]); 467 468 if (GRAPHICS_VER(m->i915) < 12) 469 return; 470 471 if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) { 472 for_each_ss_steering(iter, ee->engine->gt, slice, subslice) 473 err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n", 474 slice, subslice, 475 ee->instdone.geom_svg[slice][subslice]); 476 } 477 478 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n", 479 ee->instdone.slice_common_extra[0]); 480 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n", 481 ee->instdone.slice_common_extra[1]); 482 } 483 484 static void error_print_request(struct drm_i915_error_state_buf *m, 485 const char *prefix, 486 const struct i915_request_coredump *erq) 487 { 488 if (!erq->seqno) 489 return; 490 491 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n", 492 prefix, erq->pid, erq->context, erq->seqno, 493 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 494 &erq->flags) ? "!" : "", 495 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 496 &erq->flags) ? "+" : "", 497 erq->sched_attr.priority, 498 erq->head, erq->tail); 499 } 500 501 static void error_print_context(struct drm_i915_error_state_buf *m, 502 const char *header, 503 const struct i915_gem_context_coredump *ctx) 504 { 505 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n", 506 header, ctx->comm, ctx->pid, ctx->sched_attr.priority, 507 ctx->guilty, ctx->active, 508 ctx->total_runtime, ctx->avg_runtime); 509 err_printf(m, " context timeline seqno %u\n", ctx->hwsp_seqno); 510 } 511 512 static struct i915_vma_coredump * 513 __find_vma(struct i915_vma_coredump *vma, const char *name) 514 { 515 while (vma) { 516 if (strcmp(vma->name, name) == 0) 517 return vma; 518 vma = vma->next; 519 } 520 521 return NULL; 522 } 523 524 static struct i915_vma_coredump * 525 intel_gpu_error_find_batch(const struct intel_engine_coredump *ee) 526 { 527 return __find_vma(ee->vma, "batch"); 528 } 529 530 static void error_print_engine(struct drm_i915_error_state_buf *m, 531 const struct intel_engine_coredump *ee) 532 { 533 struct i915_vma_coredump *batch; 534 int n; 535 536 err_printf(m, "%s command stream:\n", ee->engine->name); 537 err_printf(m, " CCID: 0x%08x\n", ee->ccid); 538 err_printf(m, " START: 0x%08x\n", ee->start); 539 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head); 540 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n", 541 ee->tail, ee->rq_post, ee->rq_tail); 542 err_printf(m, " CTL: 0x%08x\n", ee->ctl); 543 err_printf(m, " MODE: 0x%08x\n", ee->mode); 544 err_printf(m, " HWS: 0x%08x\n", ee->hws); 545 err_printf(m, " ACTHD: 0x%08x %08x\n", 546 (u32)(ee->acthd>>32), (u32)ee->acthd); 547 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir); 548 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr); 549 err_printf(m, " ESR: 0x%08x\n", ee->esr); 550 551 error_print_instdone(m, ee); 552 553 batch = intel_gpu_error_find_batch(ee); 554 if (batch) { 555 u64 start = batch->gtt_offset; 556 u64 end = start + batch->gtt_size; 557 558 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n", 559 upper_32_bits(start), lower_32_bits(start), 560 upper_32_bits(end), lower_32_bits(end)); 561 } 562 if (GRAPHICS_VER(m->i915) >= 4) { 563 err_printf(m, " BBADDR: 0x%08x_%08x\n", 564 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr); 565 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate); 566 err_printf(m, " INSTPS: 0x%08x\n", ee->instps); 567 } 568 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm); 569 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr), 570 lower_32_bits(ee->faddr)); 571 if (GRAPHICS_VER(m->i915) >= 6) { 572 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi); 573 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg); 574 } 575 if (GRAPHICS_VER(m->i915) >= 11) { 576 err_printf(m, " NOPID: 0x%08x\n", ee->nopid); 577 err_printf(m, " EXCC: 0x%08x\n", ee->excc); 578 err_printf(m, " CMD_CCTL: 0x%08x\n", ee->cmd_cctl); 579 err_printf(m, " CSCMDOP: 0x%08x\n", ee->cscmdop); 580 err_printf(m, " CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl); 581 err_printf(m, " DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi); 582 err_printf(m, " DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo); 583 } 584 if (HAS_PPGTT(m->i915)) { 585 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode); 586 587 if (GRAPHICS_VER(m->i915) >= 8) { 588 int i; 589 for (i = 0; i < 4; i++) 590 err_printf(m, " PDP%d: 0x%016llx\n", 591 i, ee->vm_info.pdp[i]); 592 } else { 593 err_printf(m, " PP_DIR_BASE: 0x%08x\n", 594 ee->vm_info.pp_dir_base); 595 } 596 } 597 598 for (n = 0; n < ee->num_ports; n++) { 599 err_printf(m, " ELSP[%d]:", n); 600 error_print_request(m, " ", &ee->execlist[n]); 601 } 602 } 603 604 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) 605 { 606 va_list args; 607 608 va_start(args, f); 609 i915_error_vprintf(e, f, args); 610 va_end(args); 611 } 612 613 static void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m, 614 const struct intel_engine_cs *engine, 615 const struct i915_vma_coredump *vma) 616 { 617 char out[ASCII85_BUFSZ]; 618 struct page *page; 619 620 if (!vma) 621 return; 622 623 err_printf(m, "%s --- %s = 0x%08x %08x\n", 624 engine ? engine->name : "global", vma->name, 625 upper_32_bits(vma->gtt_offset), 626 lower_32_bits(vma->gtt_offset)); 627 628 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K) 629 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes); 630 631 err_compression_marker(m); 632 list_for_each_entry(page, &vma->page_list, lru) { 633 int i, len; 634 const u32 *addr = page_address(page); 635 636 len = PAGE_SIZE; 637 if (page == list_last_entry(&vma->page_list, typeof(*page), lru)) 638 len -= vma->unused; 639 len = ascii85_encode_len(len); 640 641 for (i = 0; i < len; i++) 642 err_puts(m, ascii85_encode(addr[i], out)); 643 } 644 err_puts(m, "\n"); 645 } 646 647 static void err_print_capabilities(struct drm_i915_error_state_buf *m, 648 struct i915_gpu_coredump *error) 649 { 650 struct drm_printer p = i915_error_printer(m); 651 652 intel_device_info_print(&error->device_info, &error->runtime_info, &p); 653 intel_display_device_info_print(&error->display_device_info, 654 &error->display_runtime_info, &p); 655 intel_driver_caps_print(&error->driver_caps, &p); 656 } 657 658 static void err_print_params(struct drm_i915_error_state_buf *m, 659 const struct i915_params *params) 660 { 661 struct drm_printer p = i915_error_printer(m); 662 663 i915_params_dump(params, &p); 664 intel_display_params_dump(m->i915, &p); 665 } 666 667 static void err_print_pciid(struct drm_i915_error_state_buf *m, 668 struct drm_i915_private *i915) 669 { 670 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 671 672 err_printf(m, "PCI ID: 0x%04x\n", pdev->device); 673 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision); 674 err_printf(m, "PCI Subsystem: %04x:%04x\n", 675 pdev->subsystem_vendor, 676 pdev->subsystem_device); 677 } 678 679 static void err_print_guc_ctb(struct drm_i915_error_state_buf *m, 680 const char *name, 681 const struct intel_ctb_coredump *ctb) 682 { 683 if (!ctb->size) 684 return; 685 686 err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n", 687 name, ctb->raw_status, ctb->raw_head, ctb->raw_tail, 688 ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size); 689 } 690 691 static void err_print_uc(struct drm_i915_error_state_buf *m, 692 const struct intel_uc_coredump *error_uc) 693 { 694 struct drm_printer p = i915_error_printer(m); 695 696 intel_uc_fw_dump(&error_uc->guc_fw, &p); 697 intel_uc_fw_dump(&error_uc->huc_fw, &p); 698 err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp); 699 intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log); 700 err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence); 701 err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0); 702 err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1); 703 intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_ctb); 704 } 705 706 static void err_free_sgl(struct scatterlist *sgl) 707 { 708 while (sgl) { 709 struct scatterlist *sg; 710 711 for (sg = sgl; !sg_is_chain(sg); sg++) { 712 kfree(sg_virt(sg)); 713 if (sg_is_last(sg)) 714 break; 715 } 716 717 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg); 718 free_page((unsigned long)sgl); 719 sgl = sg; 720 } 721 } 722 723 static void err_print_gt_info(struct drm_i915_error_state_buf *m, 724 struct intel_gt_coredump *gt) 725 { 726 struct drm_printer p = i915_error_printer(m); 727 728 intel_gt_info_print(>->info, &p); 729 intel_sseu_print_topology(gt->_gt->i915, >->info.sseu, &p); 730 } 731 732 static void err_print_gt_display(struct drm_i915_error_state_buf *m, 733 struct intel_gt_coredump *gt) 734 { 735 err_printf(m, "IER: 0x%08x\n", gt->ier); 736 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr); 737 } 738 739 static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m, 740 struct intel_gt_coredump *gt) 741 { 742 int i; 743 744 err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake)); 745 err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n", 746 gt->clock_frequency, gt->clock_period_ns); 747 err_printf(m, "EIR: 0x%08x\n", gt->eir); 748 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er); 749 750 for (i = 0; i < gt->ngtier; i++) 751 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]); 752 } 753 754 static void err_print_gt_global(struct drm_i915_error_state_buf *m, 755 struct intel_gt_coredump *gt) 756 { 757 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake); 758 759 if (IS_GRAPHICS_VER(m->i915, 6, 11)) { 760 err_printf(m, "ERROR: 0x%08x\n", gt->error); 761 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg); 762 } 763 764 if (GRAPHICS_VER(m->i915) >= 8) 765 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", 766 gt->fault_data1, gt->fault_data0); 767 768 if (GRAPHICS_VER(m->i915) == 7) 769 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int); 770 771 if (IS_GRAPHICS_VER(m->i915, 8, 11)) 772 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache); 773 774 if (GRAPHICS_VER(m->i915) == 12) 775 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err); 776 777 if (GRAPHICS_VER(m->i915) >= 12) { 778 int i; 779 780 for (i = 0; i < I915_MAX_SFC; i++) { 781 /* 782 * SFC_DONE resides in the VD forcewake domain, so it 783 * only exists if the corresponding VCS engine is 784 * present. 785 */ 786 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 || 787 !HAS_ENGINE(gt->_gt, _VCS(i * 2))) 788 continue; 789 790 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, 791 gt->sfc_done[i]); 792 } 793 794 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done); 795 } 796 } 797 798 static void err_print_gt_fences(struct drm_i915_error_state_buf *m, 799 struct intel_gt_coredump *gt) 800 { 801 int i; 802 803 for (i = 0; i < gt->nfence; i++) 804 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]); 805 } 806 807 static void err_print_gt_engines(struct drm_i915_error_state_buf *m, 808 struct intel_gt_coredump *gt) 809 { 810 const struct intel_engine_coredump *ee; 811 812 for (ee = gt->engine; ee; ee = ee->next) { 813 const struct i915_vma_coredump *vma; 814 815 if (gt->uc && gt->uc->guc.is_guc_capture) { 816 if (ee->guc_capture_node) 817 intel_guc_capture_print_engine_node(m, ee); 818 else 819 err_printf(m, " Missing GuC capture node for %s\n", 820 ee->engine->name); 821 } else { 822 error_print_engine(m, ee); 823 } 824 825 err_printf(m, " hung: %u\n", ee->hung); 826 err_printf(m, " engine reset count: %u\n", ee->reset_count); 827 error_print_context(m, " Active context: ", &ee->context); 828 829 for (vma = ee->vma; vma; vma = vma->next) 830 intel_gpu_error_print_vma(m, ee->engine, vma); 831 } 832 833 } 834 835 static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, 836 struct i915_gpu_coredump *error) 837 { 838 const struct intel_engine_coredump *ee; 839 struct timespec64 ts; 840 841 if (*error->error_msg) 842 err_printf(m, "%s\n", error->error_msg); 843 err_printf(m, "Kernel: %s %s\n", 844 init_utsname()->release, 845 init_utsname()->machine); 846 err_printf(m, "Driver: %s\n", DRIVER_DATE); 847 ts = ktime_to_timespec64(error->time); 848 err_printf(m, "Time: %lld s %ld us\n", 849 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); 850 ts = ktime_to_timespec64(error->boottime); 851 err_printf(m, "Boottime: %lld s %ld us\n", 852 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); 853 ts = ktime_to_timespec64(error->uptime); 854 err_printf(m, "Uptime: %lld s %ld us\n", 855 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); 856 err_printf(m, "Capture: %lu jiffies; %d ms ago\n", 857 error->capture, jiffies_to_msecs(jiffies - error->capture)); 858 859 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next) 860 err_printf(m, "Active process (on ring %s): %s [%d]\n", 861 ee->engine->name, 862 ee->context.comm, 863 ee->context.pid); 864 865 err_printf(m, "Reset count: %u\n", error->reset_count); 866 err_printf(m, "Suspend count: %u\n", error->suspend_count); 867 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform)); 868 err_printf(m, "Subplatform: 0x%x\n", 869 intel_subplatform(&error->runtime_info, 870 error->device_info.platform)); 871 err_print_pciid(m, m->i915); 872 873 err_printf(m, "IOMMU enabled?: %d\n", error->iommu); 874 875 intel_dmc_print_error_state(m, m->i915); 876 877 err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock)); 878 err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended)); 879 880 if (error->gt) { 881 bool print_guc_capture = false; 882 883 if (error->gt->uc && error->gt->uc->guc.is_guc_capture) 884 print_guc_capture = true; 885 886 err_print_gt_display(m, error->gt); 887 err_print_gt_global_nonguc(m, error->gt); 888 err_print_gt_fences(m, error->gt); 889 890 /* 891 * GuC dumped global, eng-class and eng-instance registers together 892 * as part of engine state dump so we print in err_print_gt_engines 893 */ 894 if (!print_guc_capture) 895 err_print_gt_global(m, error->gt); 896 897 err_print_gt_engines(m, error->gt); 898 899 if (error->gt->uc) 900 err_print_uc(m, error->gt->uc); 901 902 err_print_gt_info(m, error->gt); 903 } 904 905 if (error->overlay) 906 intel_overlay_print_error_state(m, error->overlay); 907 908 err_print_capabilities(m, error); 909 err_print_params(m, &error->params); 910 } 911 912 static int err_print_to_sgl(struct i915_gpu_coredump *error) 913 { 914 struct drm_i915_error_state_buf m; 915 916 if (IS_ERR(error)) 917 return PTR_ERR(error); 918 919 if (READ_ONCE(error->sgl)) 920 return 0; 921 922 memset(&m, 0, sizeof(m)); 923 m.i915 = error->i915; 924 925 __err_print_to_sgl(&m, error); 926 927 if (m.buf) { 928 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter); 929 m.bytes = 0; 930 m.buf = NULL; 931 } 932 if (m.cur) { 933 GEM_BUG_ON(m.end < m.cur); 934 sg_mark_end(m.cur - 1); 935 } 936 GEM_BUG_ON(m.sgl && !m.cur); 937 938 if (m.err) { 939 err_free_sgl(m.sgl); 940 return m.err; 941 } 942 943 if (cmpxchg(&error->sgl, NULL, m.sgl)) 944 err_free_sgl(m.sgl); 945 946 return 0; 947 } 948 949 ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error, 950 char *buf, loff_t off, size_t rem) 951 { 952 struct scatterlist *sg; 953 size_t count; 954 loff_t pos; 955 int err; 956 957 if (!error || !rem) 958 return 0; 959 960 err = err_print_to_sgl(error); 961 if (err) 962 return err; 963 964 sg = READ_ONCE(error->fit); 965 if (!sg || off < sg->dma_address) 966 sg = error->sgl; 967 if (!sg) 968 return 0; 969 970 pos = sg->dma_address; 971 count = 0; 972 do { 973 size_t len, start; 974 975 if (sg_is_chain(sg)) { 976 sg = sg_chain_ptr(sg); 977 GEM_BUG_ON(sg_is_chain(sg)); 978 } 979 980 len = sg->length; 981 if (pos + len <= off) { 982 pos += len; 983 continue; 984 } 985 986 start = sg->offset; 987 if (pos < off) { 988 GEM_BUG_ON(off - pos > len); 989 len -= off - pos; 990 start += off - pos; 991 pos = off; 992 } 993 994 len = min(len, rem); 995 GEM_BUG_ON(!len || len > sg->length); 996 997 memcpy(buf, page_address(sg_page(sg)) + start, len); 998 999 count += len; 1000 pos += len; 1001 1002 buf += len; 1003 rem -= len; 1004 if (!rem) { 1005 WRITE_ONCE(error->fit, sg); 1006 break; 1007 } 1008 } while (!sg_is_last(sg++)); 1009 1010 return count; 1011 } 1012 1013 static void i915_vma_coredump_free(struct i915_vma_coredump *vma) 1014 { 1015 while (vma) { 1016 struct i915_vma_coredump *next = vma->next; 1017 struct page *page, *n; 1018 1019 list_for_each_entry_safe(page, n, &vma->page_list, lru) { 1020 list_del_init(&page->lru); 1021 __free_page(page); 1022 } 1023 1024 kfree(vma); 1025 vma = next; 1026 } 1027 } 1028 1029 static void cleanup_params(struct i915_gpu_coredump *error) 1030 { 1031 i915_params_free(&error->params); 1032 intel_display_params_free(&error->display_params); 1033 } 1034 1035 static void cleanup_uc(struct intel_uc_coredump *uc) 1036 { 1037 kfree(uc->guc_fw.file_selected.path); 1038 kfree(uc->huc_fw.file_selected.path); 1039 kfree(uc->guc_fw.file_wanted.path); 1040 kfree(uc->huc_fw.file_wanted.path); 1041 i915_vma_coredump_free(uc->guc.vma_log); 1042 i915_vma_coredump_free(uc->guc.vma_ctb); 1043 1044 kfree(uc); 1045 } 1046 1047 static void cleanup_gt(struct intel_gt_coredump *gt) 1048 { 1049 while (gt->engine) { 1050 struct intel_engine_coredump *ee = gt->engine; 1051 1052 gt->engine = ee->next; 1053 1054 i915_vma_coredump_free(ee->vma); 1055 intel_guc_capture_free_node(ee); 1056 kfree(ee); 1057 } 1058 1059 if (gt->uc) 1060 cleanup_uc(gt->uc); 1061 1062 kfree(gt); 1063 } 1064 1065 void __i915_gpu_coredump_free(struct kref *error_ref) 1066 { 1067 struct i915_gpu_coredump *error = 1068 container_of(error_ref, typeof(*error), ref); 1069 1070 while (error->gt) { 1071 struct intel_gt_coredump *gt = error->gt; 1072 1073 error->gt = gt->next; 1074 cleanup_gt(gt); 1075 } 1076 1077 kfree(error->overlay); 1078 1079 cleanup_params(error); 1080 1081 err_free_sgl(error->sgl); 1082 kfree(error); 1083 } 1084 1085 static struct i915_vma_coredump * 1086 i915_vma_coredump_create(const struct intel_gt *gt, 1087 const struct i915_vma_resource *vma_res, 1088 struct i915_vma_compress *compress, 1089 const char *name) 1090 1091 { 1092 struct i915_ggtt *ggtt = gt->ggtt; 1093 const u64 slot = ggtt->error_capture.start; 1094 struct i915_vma_coredump *dst; 1095 struct sgt_iter iter; 1096 int ret; 1097 1098 might_sleep(); 1099 1100 if (!vma_res || !vma_res->bi.pages || !compress) 1101 return NULL; 1102 1103 dst = kmalloc(sizeof(*dst), ALLOW_FAIL); 1104 if (!dst) 1105 return NULL; 1106 1107 if (!compress_start(compress)) { 1108 kfree(dst); 1109 return NULL; 1110 } 1111 1112 INIT_LIST_HEAD(&dst->page_list); 1113 strcpy(dst->name, name); 1114 dst->next = NULL; 1115 1116 dst->gtt_offset = vma_res->start; 1117 dst->gtt_size = vma_res->node_size; 1118 dst->gtt_page_sizes = vma_res->page_sizes_gtt; 1119 dst->unused = 0; 1120 1121 ret = -EINVAL; 1122 if (drm_mm_node_allocated(&ggtt->error_capture)) { 1123 void __iomem *s; 1124 dma_addr_t dma; 1125 1126 for_each_sgt_daddr(dma, iter, vma_res->bi.pages) { 1127 mutex_lock(&ggtt->error_mutex); 1128 if (ggtt->vm.raw_insert_page) 1129 ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot, 1130 i915_gem_get_pat_index(gt->i915, 1131 I915_CACHE_NONE), 1132 0); 1133 else 1134 ggtt->vm.insert_page(&ggtt->vm, dma, slot, 1135 i915_gem_get_pat_index(gt->i915, 1136 I915_CACHE_NONE), 1137 0); 1138 mb(); 1139 1140 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE); 1141 ret = compress_page(compress, 1142 (void __force *)s, dst, 1143 true); 1144 io_mapping_unmap(s); 1145 1146 mb(); 1147 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); 1148 mutex_unlock(&ggtt->error_mutex); 1149 if (ret) 1150 break; 1151 } 1152 } else if (vma_res->bi.lmem) { 1153 struct intel_memory_region *mem = vma_res->mr; 1154 dma_addr_t dma; 1155 1156 for_each_sgt_daddr(dma, iter, vma_res->bi.pages) { 1157 dma_addr_t offset = dma - mem->region.start; 1158 void __iomem *s; 1159 1160 if (offset + PAGE_SIZE > resource_size(&mem->io)) { 1161 ret = -EINVAL; 1162 break; 1163 } 1164 1165 s = io_mapping_map_wc(&mem->iomap, offset, PAGE_SIZE); 1166 ret = compress_page(compress, 1167 (void __force *)s, dst, 1168 true); 1169 io_mapping_unmap(s); 1170 if (ret) 1171 break; 1172 } 1173 } else { 1174 struct page *page; 1175 1176 for_each_sgt_page(page, iter, vma_res->bi.pages) { 1177 void *s; 1178 1179 drm_clflush_pages(&page, 1); 1180 1181 s = kmap_local_page(page); 1182 ret = compress_page(compress, s, dst, false); 1183 kunmap_local(s); 1184 1185 drm_clflush_pages(&page, 1); 1186 1187 if (ret) 1188 break; 1189 } 1190 } 1191 1192 if (ret || compress_flush(compress, dst)) { 1193 struct page *page, *n; 1194 1195 list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) { 1196 list_del_init(&page->lru); 1197 pool_free(&compress->pool, page_address(page)); 1198 } 1199 1200 kfree(dst); 1201 dst = NULL; 1202 } 1203 compress_finish(compress); 1204 1205 return dst; 1206 } 1207 1208 static void gt_record_fences(struct intel_gt_coredump *gt) 1209 { 1210 struct i915_ggtt *ggtt = gt->_gt->ggtt; 1211 struct intel_uncore *uncore = gt->_gt->uncore; 1212 int i; 1213 1214 if (GRAPHICS_VER(uncore->i915) >= 6) { 1215 for (i = 0; i < ggtt->num_fences; i++) 1216 gt->fence[i] = 1217 intel_uncore_read64(uncore, 1218 FENCE_REG_GEN6_LO(i)); 1219 } else if (GRAPHICS_VER(uncore->i915) >= 4) { 1220 for (i = 0; i < ggtt->num_fences; i++) 1221 gt->fence[i] = 1222 intel_uncore_read64(uncore, 1223 FENCE_REG_965_LO(i)); 1224 } else { 1225 for (i = 0; i < ggtt->num_fences; i++) 1226 gt->fence[i] = 1227 intel_uncore_read(uncore, FENCE_REG(i)); 1228 } 1229 gt->nfence = i; 1230 } 1231 1232 static void engine_record_registers(struct intel_engine_coredump *ee) 1233 { 1234 const struct intel_engine_cs *engine = ee->engine; 1235 struct drm_i915_private *i915 = engine->i915; 1236 1237 if (GRAPHICS_VER(i915) >= 6) { 1238 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL); 1239 1240 /* 1241 * For the media GT, this ring fault register is not replicated, 1242 * so don't do multicast/replicated register read/write 1243 * operation on it. 1244 */ 1245 if (MEDIA_VER(i915) >= 13 && engine->gt->type == GT_MEDIA) 1246 ee->fault_reg = intel_uncore_read(engine->uncore, 1247 XELPMP_RING_FAULT_REG); 1248 else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) 1249 ee->fault_reg = intel_gt_mcr_read_any(engine->gt, 1250 XEHP_RING_FAULT_REG); 1251 else if (GRAPHICS_VER(i915) >= 12) 1252 ee->fault_reg = intel_uncore_read(engine->uncore, 1253 GEN12_RING_FAULT_REG); 1254 else if (GRAPHICS_VER(i915) >= 8) 1255 ee->fault_reg = intel_uncore_read(engine->uncore, 1256 GEN8_RING_FAULT_REG); 1257 else 1258 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine); 1259 } 1260 1261 if (GRAPHICS_VER(i915) >= 4) { 1262 ee->esr = ENGINE_READ(engine, RING_ESR); 1263 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD); 1264 ee->ipeir = ENGINE_READ(engine, RING_IPEIR); 1265 ee->ipehr = ENGINE_READ(engine, RING_IPEHR); 1266 ee->instps = ENGINE_READ(engine, RING_INSTPS); 1267 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR); 1268 ee->ccid = ENGINE_READ(engine, CCID); 1269 if (GRAPHICS_VER(i915) >= 8) { 1270 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32; 1271 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32; 1272 } 1273 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE); 1274 } else { 1275 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX); 1276 ee->ipeir = ENGINE_READ(engine, IPEIR); 1277 ee->ipehr = ENGINE_READ(engine, IPEHR); 1278 } 1279 1280 if (GRAPHICS_VER(i915) >= 11) { 1281 ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL); 1282 ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP); 1283 ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL); 1284 ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW); 1285 ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD); 1286 ee->nopid = ENGINE_READ(engine, RING_NOPID); 1287 ee->excc = ENGINE_READ(engine, RING_EXCC); 1288 } 1289 1290 intel_engine_get_instdone(engine, &ee->instdone); 1291 1292 ee->instpm = ENGINE_READ(engine, RING_INSTPM); 1293 ee->acthd = intel_engine_get_active_head(engine); 1294 ee->start = ENGINE_READ(engine, RING_START); 1295 ee->head = ENGINE_READ(engine, RING_HEAD); 1296 ee->tail = ENGINE_READ(engine, RING_TAIL); 1297 ee->ctl = ENGINE_READ(engine, RING_CTL); 1298 if (GRAPHICS_VER(i915) > 2) 1299 ee->mode = ENGINE_READ(engine, RING_MI_MODE); 1300 1301 if (!HWS_NEEDS_PHYSICAL(i915)) { 1302 i915_reg_t mmio; 1303 1304 if (GRAPHICS_VER(i915) == 7) { 1305 switch (engine->id) { 1306 default: 1307 MISSING_CASE(engine->id); 1308 fallthrough; 1309 case RCS0: 1310 mmio = RENDER_HWS_PGA_GEN7; 1311 break; 1312 case BCS0: 1313 mmio = BLT_HWS_PGA_GEN7; 1314 break; 1315 case VCS0: 1316 mmio = BSD_HWS_PGA_GEN7; 1317 break; 1318 case VECS0: 1319 mmio = VEBOX_HWS_PGA_GEN7; 1320 break; 1321 } 1322 } else if (GRAPHICS_VER(engine->i915) == 6) { 1323 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 1324 } else { 1325 /* XXX: gen8 returns to sanity */ 1326 mmio = RING_HWS_PGA(engine->mmio_base); 1327 } 1328 1329 ee->hws = intel_uncore_read(engine->uncore, mmio); 1330 } 1331 1332 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine); 1333 1334 if (HAS_PPGTT(i915)) { 1335 int i; 1336 1337 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7); 1338 1339 if (GRAPHICS_VER(i915) == 6) { 1340 ee->vm_info.pp_dir_base = 1341 ENGINE_READ(engine, RING_PP_DIR_BASE_READ); 1342 } else if (GRAPHICS_VER(i915) == 7) { 1343 ee->vm_info.pp_dir_base = 1344 ENGINE_READ(engine, RING_PP_DIR_BASE); 1345 } else if (GRAPHICS_VER(i915) >= 8) { 1346 u32 base = engine->mmio_base; 1347 1348 for (i = 0; i < 4; i++) { 1349 ee->vm_info.pdp[i] = 1350 intel_uncore_read(engine->uncore, 1351 GEN8_RING_PDP_UDW(base, i)); 1352 ee->vm_info.pdp[i] <<= 32; 1353 ee->vm_info.pdp[i] |= 1354 intel_uncore_read(engine->uncore, 1355 GEN8_RING_PDP_LDW(base, i)); 1356 } 1357 } 1358 } 1359 } 1360 1361 static void record_request(const struct i915_request *request, 1362 struct i915_request_coredump *erq) 1363 { 1364 erq->flags = request->fence.flags; 1365 erq->context = request->fence.context; 1366 erq->seqno = request->fence.seqno; 1367 erq->sched_attr = request->sched.attr; 1368 erq->head = request->head; 1369 erq->tail = request->tail; 1370 1371 erq->pid = 0; 1372 rcu_read_lock(); 1373 if (!intel_context_is_closed(request->context)) { 1374 const struct i915_gem_context *ctx; 1375 1376 ctx = rcu_dereference(request->context->gem_context); 1377 if (ctx) 1378 erq->pid = pid_nr(ctx->pid); 1379 } 1380 rcu_read_unlock(); 1381 } 1382 1383 static void engine_record_execlists(struct intel_engine_coredump *ee) 1384 { 1385 const struct intel_engine_execlists * const el = &ee->engine->execlists; 1386 struct i915_request * const *port = el->active; 1387 unsigned int n = 0; 1388 1389 while (*port) 1390 record_request(*port++, &ee->execlist[n++]); 1391 1392 ee->num_ports = n; 1393 } 1394 1395 static bool record_context(struct i915_gem_context_coredump *e, 1396 struct intel_context *ce) 1397 { 1398 struct i915_gem_context *ctx; 1399 struct task_struct *task; 1400 bool simulated; 1401 1402 rcu_read_lock(); 1403 ctx = rcu_dereference(ce->gem_context); 1404 if (ctx && !kref_get_unless_zero(&ctx->ref)) 1405 ctx = NULL; 1406 rcu_read_unlock(); 1407 if (!ctx) 1408 return true; 1409 1410 rcu_read_lock(); 1411 task = pid_task(ctx->pid, PIDTYPE_PID); 1412 if (task) { 1413 strcpy(e->comm, task->comm); 1414 e->pid = task->pid; 1415 } 1416 rcu_read_unlock(); 1417 1418 e->sched_attr = ctx->sched; 1419 e->guilty = atomic_read(&ctx->guilty_count); 1420 e->active = atomic_read(&ctx->active_count); 1421 e->hwsp_seqno = (ce->timeline && ce->timeline->hwsp_seqno) ? 1422 *ce->timeline->hwsp_seqno : ~0U; 1423 1424 e->total_runtime = intel_context_get_total_runtime_ns(ce); 1425 e->avg_runtime = intel_context_get_avg_runtime_ns(ce); 1426 1427 simulated = i915_gem_context_no_error_capture(ctx); 1428 1429 i915_gem_context_put(ctx); 1430 return simulated; 1431 } 1432 1433 struct intel_engine_capture_vma { 1434 struct intel_engine_capture_vma *next; 1435 struct i915_vma_resource *vma_res; 1436 char name[16]; 1437 bool lockdep_cookie; 1438 }; 1439 1440 static struct intel_engine_capture_vma * 1441 capture_vma_snapshot(struct intel_engine_capture_vma *next, 1442 struct i915_vma_resource *vma_res, 1443 gfp_t gfp, const char *name) 1444 { 1445 struct intel_engine_capture_vma *c; 1446 1447 if (!vma_res) 1448 return next; 1449 1450 c = kmalloc(sizeof(*c), gfp); 1451 if (!c) 1452 return next; 1453 1454 if (!i915_vma_resource_hold(vma_res, &c->lockdep_cookie)) { 1455 kfree(c); 1456 return next; 1457 } 1458 1459 strcpy(c->name, name); 1460 c->vma_res = i915_vma_resource_get(vma_res); 1461 1462 c->next = next; 1463 return c; 1464 } 1465 1466 static struct intel_engine_capture_vma * 1467 capture_vma(struct intel_engine_capture_vma *next, 1468 struct i915_vma *vma, 1469 const char *name, 1470 gfp_t gfp) 1471 { 1472 if (!vma) 1473 return next; 1474 1475 /* 1476 * If the vma isn't pinned, then the vma should be snapshotted 1477 * to a struct i915_vma_snapshot at command submission time. 1478 * Not here. 1479 */ 1480 if (GEM_WARN_ON(!i915_vma_is_pinned(vma))) 1481 return next; 1482 1483 next = capture_vma_snapshot(next, vma->resource, gfp, name); 1484 1485 return next; 1486 } 1487 1488 static struct intel_engine_capture_vma * 1489 capture_user(struct intel_engine_capture_vma *capture, 1490 const struct i915_request *rq, 1491 gfp_t gfp) 1492 { 1493 struct i915_capture_list *c; 1494 1495 for (c = rq->capture_list; c; c = c->next) 1496 capture = capture_vma_snapshot(capture, c->vma_res, gfp, 1497 "user"); 1498 1499 return capture; 1500 } 1501 1502 static void add_vma(struct intel_engine_coredump *ee, 1503 struct i915_vma_coredump *vma) 1504 { 1505 if (vma) { 1506 vma->next = ee->vma; 1507 ee->vma = vma; 1508 } 1509 } 1510 1511 static struct i915_vma_coredump * 1512 create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma, 1513 const char *name, struct i915_vma_compress *compress) 1514 { 1515 struct i915_vma_coredump *ret = NULL; 1516 struct i915_vma_resource *vma_res; 1517 bool lockdep_cookie; 1518 1519 if (!vma) 1520 return NULL; 1521 1522 vma_res = vma->resource; 1523 1524 if (i915_vma_resource_hold(vma_res, &lockdep_cookie)) { 1525 ret = i915_vma_coredump_create(gt, vma_res, compress, name); 1526 i915_vma_resource_unhold(vma_res, lockdep_cookie); 1527 } 1528 1529 return ret; 1530 } 1531 1532 static void add_vma_coredump(struct intel_engine_coredump *ee, 1533 const struct intel_gt *gt, 1534 struct i915_vma *vma, 1535 const char *name, 1536 struct i915_vma_compress *compress) 1537 { 1538 add_vma(ee, create_vma_coredump(gt, vma, name, compress)); 1539 } 1540 1541 struct intel_engine_coredump * 1542 intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags) 1543 { 1544 struct intel_engine_coredump *ee; 1545 1546 ee = kzalloc(sizeof(*ee), gfp); 1547 if (!ee) 1548 return NULL; 1549 1550 ee->engine = engine; 1551 1552 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) { 1553 engine_record_registers(ee); 1554 engine_record_execlists(ee); 1555 } 1556 1557 return ee; 1558 } 1559 1560 static struct intel_engine_capture_vma * 1561 engine_coredump_add_context(struct intel_engine_coredump *ee, 1562 struct intel_context *ce, 1563 gfp_t gfp) 1564 { 1565 struct intel_engine_capture_vma *vma = NULL; 1566 1567 ee->simulated |= record_context(&ee->context, ce); 1568 if (ee->simulated) 1569 return NULL; 1570 1571 /* 1572 * We need to copy these to an anonymous buffer 1573 * as the simplest method to avoid being overwritten 1574 * by userspace. 1575 */ 1576 vma = capture_vma(vma, ce->ring->vma, "ring", gfp); 1577 vma = capture_vma(vma, ce->state, "HW context", gfp); 1578 1579 return vma; 1580 } 1581 1582 struct intel_engine_capture_vma * 1583 intel_engine_coredump_add_request(struct intel_engine_coredump *ee, 1584 struct i915_request *rq, 1585 gfp_t gfp) 1586 { 1587 struct intel_engine_capture_vma *vma; 1588 1589 vma = engine_coredump_add_context(ee, rq->context, gfp); 1590 if (!vma) 1591 return NULL; 1592 1593 /* 1594 * We need to copy these to an anonymous buffer 1595 * as the simplest method to avoid being overwritten 1596 * by userspace. 1597 */ 1598 vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch"); 1599 vma = capture_user(vma, rq, gfp); 1600 1601 ee->rq_head = rq->head; 1602 ee->rq_post = rq->postfix; 1603 ee->rq_tail = rq->tail; 1604 1605 return vma; 1606 } 1607 1608 void 1609 intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, 1610 struct intel_engine_capture_vma *capture, 1611 struct i915_vma_compress *compress) 1612 { 1613 const struct intel_engine_cs *engine = ee->engine; 1614 1615 while (capture) { 1616 struct intel_engine_capture_vma *this = capture; 1617 struct i915_vma_resource *vma_res = this->vma_res; 1618 1619 add_vma(ee, 1620 i915_vma_coredump_create(engine->gt, vma_res, 1621 compress, this->name)); 1622 1623 i915_vma_resource_unhold(vma_res, this->lockdep_cookie); 1624 i915_vma_resource_put(vma_res); 1625 1626 capture = this->next; 1627 kfree(this); 1628 } 1629 1630 add_vma_coredump(ee, engine->gt, engine->status_page.vma, 1631 "HW Status", compress); 1632 1633 add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma, 1634 "WA context", compress); 1635 } 1636 1637 static struct intel_engine_coredump * 1638 capture_engine(struct intel_engine_cs *engine, 1639 struct i915_vma_compress *compress, 1640 u32 dump_flags) 1641 { 1642 struct intel_engine_capture_vma *capture = NULL; 1643 struct intel_engine_coredump *ee; 1644 struct intel_context *ce = NULL; 1645 struct i915_request *rq = NULL; 1646 1647 ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags); 1648 if (!ee) 1649 return NULL; 1650 1651 intel_engine_get_hung_entity(engine, &ce, &rq); 1652 if (rq && !i915_request_started(rq)) 1653 drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n", 1654 engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id); 1655 1656 if (rq) { 1657 capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL); 1658 i915_request_put(rq); 1659 } else if (ce) { 1660 capture = engine_coredump_add_context(ee, ce, ATOMIC_MAYFAIL); 1661 } 1662 1663 if (capture) { 1664 intel_engine_coredump_add_vma(ee, capture, compress); 1665 1666 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE) 1667 intel_guc_capture_get_matching_node(engine->gt, ee, ce); 1668 } else { 1669 kfree(ee); 1670 ee = NULL; 1671 } 1672 1673 return ee; 1674 } 1675 1676 static void 1677 gt_record_engines(struct intel_gt_coredump *gt, 1678 intel_engine_mask_t engine_mask, 1679 struct i915_vma_compress *compress, 1680 u32 dump_flags) 1681 { 1682 struct intel_engine_cs *engine; 1683 enum intel_engine_id id; 1684 1685 for_each_engine(engine, gt->_gt, id) { 1686 struct intel_engine_coredump *ee; 1687 1688 /* Refill our page pool before entering atomic section */ 1689 pool_refill(&compress->pool, ALLOW_FAIL); 1690 1691 ee = capture_engine(engine, compress, dump_flags); 1692 if (!ee) 1693 continue; 1694 1695 ee->hung = engine->mask & engine_mask; 1696 1697 gt->simulated |= ee->simulated; 1698 if (ee->simulated) { 1699 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE) 1700 intel_guc_capture_free_node(ee); 1701 kfree(ee); 1702 continue; 1703 } 1704 1705 ee->next = gt->engine; 1706 gt->engine = ee; 1707 } 1708 } 1709 1710 static void gt_record_guc_ctb(struct intel_ctb_coredump *saved, 1711 const struct intel_guc_ct_buffer *ctb, 1712 const void *blob_ptr, struct intel_guc *guc) 1713 { 1714 if (!ctb || !ctb->desc) 1715 return; 1716 1717 saved->raw_status = ctb->desc->status; 1718 saved->raw_head = ctb->desc->head; 1719 saved->raw_tail = ctb->desc->tail; 1720 saved->head = ctb->head; 1721 saved->tail = ctb->tail; 1722 saved->size = ctb->size; 1723 saved->desc_offset = ((void *)ctb->desc) - blob_ptr; 1724 saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr; 1725 } 1726 1727 static struct intel_uc_coredump * 1728 gt_record_uc(struct intel_gt_coredump *gt, 1729 struct i915_vma_compress *compress) 1730 { 1731 const struct intel_uc *uc = >->_gt->uc; 1732 struct intel_uc_coredump *error_uc; 1733 1734 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL); 1735 if (!error_uc) 1736 return NULL; 1737 1738 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw)); 1739 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw)); 1740 1741 error_uc->guc_fw.file_selected.path = kstrdup(uc->guc.fw.file_selected.path, ALLOW_FAIL); 1742 error_uc->huc_fw.file_selected.path = kstrdup(uc->huc.fw.file_selected.path, ALLOW_FAIL); 1743 error_uc->guc_fw.file_wanted.path = kstrdup(uc->guc.fw.file_wanted.path, ALLOW_FAIL); 1744 error_uc->huc_fw.file_wanted.path = kstrdup(uc->huc.fw.file_wanted.path, ALLOW_FAIL); 1745 1746 /* 1747 * Save the GuC log and include a timestamp reference for converting the 1748 * log times to system times (in conjunction with the error->boottime and 1749 * gt->clock_frequency fields saved elsewhere). 1750 */ 1751 error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP); 1752 error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma, 1753 "GuC log buffer", compress); 1754 error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma, 1755 "GuC CT buffer", compress); 1756 error_uc->guc.last_fence = uc->guc.ct.requests.last_fence; 1757 gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send, 1758 uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc); 1759 gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv, 1760 uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc); 1761 1762 return error_uc; 1763 } 1764 1765 /* Capture display registers. */ 1766 static void gt_record_display_regs(struct intel_gt_coredump *gt) 1767 { 1768 struct intel_uncore *uncore = gt->_gt->uncore; 1769 struct drm_i915_private *i915 = uncore->i915; 1770 1771 if (DISPLAY_VER(i915) >= 6 && DISPLAY_VER(i915) < 20) 1772 gt->derrmr = intel_uncore_read(uncore, DERRMR); 1773 1774 if (GRAPHICS_VER(i915) >= 8) 1775 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); 1776 else if (IS_VALLEYVIEW(i915)) 1777 gt->ier = intel_uncore_read(uncore, VLV_IER); 1778 else if (HAS_PCH_SPLIT(i915)) 1779 gt->ier = intel_uncore_read(uncore, DEIER); 1780 else if (GRAPHICS_VER(i915) == 2) 1781 gt->ier = intel_uncore_read16(uncore, GEN2_IER); 1782 else 1783 gt->ier = intel_uncore_read(uncore, GEN2_IER); 1784 } 1785 1786 /* Capture all other registers that GuC doesn't capture. */ 1787 static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt) 1788 { 1789 struct intel_uncore *uncore = gt->_gt->uncore; 1790 struct drm_i915_private *i915 = uncore->i915; 1791 int i; 1792 1793 if (IS_VALLEYVIEW(i915)) { 1794 gt->gtier[0] = intel_uncore_read(uncore, GTIER); 1795 gt->ngtier = 1; 1796 } else if (GRAPHICS_VER(i915) >= 11) { 1797 gt->gtier[0] = 1798 intel_uncore_read(uncore, 1799 GEN11_RENDER_COPY_INTR_ENABLE); 1800 gt->gtier[1] = 1801 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE); 1802 gt->gtier[2] = 1803 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE); 1804 gt->gtier[3] = 1805 intel_uncore_read(uncore, 1806 GEN11_GPM_WGBOXPERF_INTR_ENABLE); 1807 gt->gtier[4] = 1808 intel_uncore_read(uncore, 1809 GEN11_CRYPTO_RSVD_INTR_ENABLE); 1810 gt->gtier[5] = 1811 intel_uncore_read(uncore, 1812 GEN11_GUNIT_CSME_INTR_ENABLE); 1813 gt->ngtier = 6; 1814 } else if (GRAPHICS_VER(i915) >= 8) { 1815 for (i = 0; i < 4; i++) 1816 gt->gtier[i] = 1817 intel_uncore_read(uncore, GEN8_GT_IER(i)); 1818 gt->ngtier = 4; 1819 } else if (HAS_PCH_SPLIT(i915)) { 1820 gt->gtier[0] = intel_uncore_read(uncore, GTIER); 1821 gt->ngtier = 1; 1822 } 1823 1824 gt->eir = intel_uncore_read(uncore, EIR); 1825 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER); 1826 } 1827 1828 /* 1829 * Capture all registers that relate to workload submission. 1830 * NOTE: In GuC submission, when GuC resets an engine, it can dump these for us 1831 */ 1832 static void gt_record_global_regs(struct intel_gt_coredump *gt) 1833 { 1834 struct intel_uncore *uncore = gt->_gt->uncore; 1835 struct drm_i915_private *i915 = uncore->i915; 1836 int i; 1837 1838 /* 1839 * General organization 1840 * 1. Registers specific to a single generation 1841 * 2. Registers which belong to multiple generations 1842 * 3. Feature specific registers. 1843 * 4. Everything else 1844 * Please try to follow the order. 1845 */ 1846 1847 /* 1: Registers specific to a single generation */ 1848 if (IS_VALLEYVIEW(i915)) 1849 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV); 1850 1851 if (GRAPHICS_VER(i915) == 7) 1852 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT); 1853 1854 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) { 1855 gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt, 1856 XEHP_FAULT_TLB_DATA0); 1857 gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt, 1858 XEHP_FAULT_TLB_DATA1); 1859 } else if (GRAPHICS_VER(i915) >= 12) { 1860 gt->fault_data0 = intel_uncore_read(uncore, 1861 GEN12_FAULT_TLB_DATA0); 1862 gt->fault_data1 = intel_uncore_read(uncore, 1863 GEN12_FAULT_TLB_DATA1); 1864 } else if (GRAPHICS_VER(i915) >= 8) { 1865 gt->fault_data0 = intel_uncore_read(uncore, 1866 GEN8_FAULT_TLB_DATA0); 1867 gt->fault_data1 = intel_uncore_read(uncore, 1868 GEN8_FAULT_TLB_DATA1); 1869 } 1870 1871 if (GRAPHICS_VER(i915) == 6) { 1872 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE); 1873 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL); 1874 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE); 1875 } 1876 1877 /* 2: Registers which belong to multiple generations */ 1878 if (GRAPHICS_VER(i915) >= 7) 1879 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT); 1880 1881 if (GRAPHICS_VER(i915) >= 6) { 1882 if (GRAPHICS_VER(i915) < 12) { 1883 gt->error = intel_uncore_read(uncore, ERROR_GEN6); 1884 gt->done_reg = intel_uncore_read(uncore, DONE_REG); 1885 } 1886 } 1887 1888 /* 3: Feature specific registers */ 1889 if (IS_GRAPHICS_VER(i915, 6, 7)) { 1890 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK); 1891 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS); 1892 } 1893 1894 if (IS_GRAPHICS_VER(i915, 8, 11)) 1895 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN); 1896 1897 if (GRAPHICS_VER(i915) == 12) 1898 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG); 1899 1900 if (GRAPHICS_VER(i915) >= 12) { 1901 for (i = 0; i < I915_MAX_SFC; i++) { 1902 /* 1903 * SFC_DONE resides in the VD forcewake domain, so it 1904 * only exists if the corresponding VCS engine is 1905 * present. 1906 */ 1907 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 || 1908 !HAS_ENGINE(gt->_gt, _VCS(i * 2))) 1909 continue; 1910 1911 gt->sfc_done[i] = 1912 intel_uncore_read(uncore, GEN12_SFC_DONE(i)); 1913 } 1914 1915 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE); 1916 } 1917 } 1918 1919 static void gt_record_info(struct intel_gt_coredump *gt) 1920 { 1921 memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info)); 1922 gt->clock_frequency = gt->_gt->clock_frequency; 1923 gt->clock_period_ns = gt->_gt->clock_period_ns; 1924 } 1925 1926 /* 1927 * Generate a semi-unique error code. The code is not meant to have meaning, The 1928 * code's only purpose is to try to prevent false duplicated bug reports by 1929 * grossly estimating a GPU error state. 1930 * 1931 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine 1932 * the hang if we could strip the GTT offset information from it. 1933 * 1934 * It's only a small step better than a random number in its current form. 1935 */ 1936 static u32 generate_ecode(const struct intel_engine_coredump *ee) 1937 { 1938 /* 1939 * IPEHR would be an ideal way to detect errors, as it's the gross 1940 * measure of "the command that hung." However, has some very common 1941 * synchronization commands which almost always appear in the case 1942 * strictly a client bug. Use instdone to differentiate those some. 1943 */ 1944 return ee ? ee->ipehr ^ ee->instdone.instdone : 0; 1945 } 1946 1947 static const char *error_msg(struct i915_gpu_coredump *error) 1948 { 1949 struct intel_engine_coredump *first = NULL; 1950 unsigned int hung_classes = 0; 1951 struct intel_gt_coredump *gt; 1952 int len; 1953 1954 for (gt = error->gt; gt; gt = gt->next) { 1955 struct intel_engine_coredump *cs; 1956 1957 for (cs = gt->engine; cs; cs = cs->next) { 1958 if (cs->hung) { 1959 hung_classes |= BIT(cs->engine->uabi_class); 1960 if (!first) 1961 first = cs; 1962 } 1963 } 1964 } 1965 1966 len = scnprintf(error->error_msg, sizeof(error->error_msg), 1967 "GPU HANG: ecode %d:%x:%08x", 1968 GRAPHICS_VER(error->i915), hung_classes, 1969 generate_ecode(first)); 1970 if (first && first->context.pid) { 1971 /* Just show the first executing process, more is confusing */ 1972 len += scnprintf(error->error_msg + len, 1973 sizeof(error->error_msg) - len, 1974 ", in %s [%d]", 1975 first->context.comm, first->context.pid); 1976 } 1977 1978 return error->error_msg; 1979 } 1980 1981 static void capture_gen(struct i915_gpu_coredump *error) 1982 { 1983 struct drm_i915_private *i915 = error->i915; 1984 1985 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count); 1986 error->suspended = pm_runtime_suspended(i915->drm.dev); 1987 1988 error->iommu = i915_vtd_active(i915); 1989 error->reset_count = i915_reset_count(&i915->gpu_error); 1990 error->suspend_count = i915->suspend_count; 1991 1992 i915_params_copy(&error->params, &i915->params); 1993 intel_display_params_copy(&error->display_params); 1994 memcpy(&error->device_info, 1995 INTEL_INFO(i915), 1996 sizeof(error->device_info)); 1997 memcpy(&error->runtime_info, 1998 RUNTIME_INFO(i915), 1999 sizeof(error->runtime_info)); 2000 memcpy(&error->display_device_info, DISPLAY_INFO(i915), 2001 sizeof(error->display_device_info)); 2002 memcpy(&error->display_runtime_info, DISPLAY_RUNTIME_INFO(i915), 2003 sizeof(error->display_runtime_info)); 2004 error->driver_caps = i915->caps; 2005 } 2006 2007 struct i915_gpu_coredump * 2008 i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp) 2009 { 2010 struct i915_gpu_coredump *error; 2011 2012 if (!i915->params.error_capture) 2013 return NULL; 2014 2015 error = kzalloc(sizeof(*error), gfp); 2016 if (!error) 2017 return NULL; 2018 2019 kref_init(&error->ref); 2020 error->i915 = i915; 2021 2022 error->time = ktime_get_real(); 2023 error->boottime = ktime_get_boottime(); 2024 error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time); 2025 error->capture = jiffies; 2026 2027 capture_gen(error); 2028 2029 return error; 2030 } 2031 2032 #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x)) 2033 2034 struct intel_gt_coredump * 2035 intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags) 2036 { 2037 struct intel_gt_coredump *gc; 2038 2039 gc = kzalloc(sizeof(*gc), gfp); 2040 if (!gc) 2041 return NULL; 2042 2043 gc->_gt = gt; 2044 gc->awake = intel_gt_pm_is_awake(gt); 2045 2046 gt_record_display_regs(gc); 2047 gt_record_global_nonguc_regs(gc); 2048 2049 /* 2050 * GuC dumps global, eng-class and eng-instance registers 2051 * (that can change as part of engine state during execution) 2052 * before an engine is reset due to a hung context. 2053 * GuC captures and reports all three groups of registers 2054 * together as a single set before the engine is reset. 2055 * Thus, if GuC triggered the context reset we retrieve 2056 * the register values as part of gt_record_engines. 2057 */ 2058 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) 2059 gt_record_global_regs(gc); 2060 2061 gt_record_fences(gc); 2062 2063 return gc; 2064 } 2065 2066 struct i915_vma_compress * 2067 i915_vma_capture_prepare(struct intel_gt_coredump *gt) 2068 { 2069 struct i915_vma_compress *compress; 2070 2071 compress = kmalloc(sizeof(*compress), ALLOW_FAIL); 2072 if (!compress) 2073 return NULL; 2074 2075 if (!compress_init(compress)) { 2076 kfree(compress); 2077 return NULL; 2078 } 2079 2080 return compress; 2081 } 2082 2083 void i915_vma_capture_finish(struct intel_gt_coredump *gt, 2084 struct i915_vma_compress *compress) 2085 { 2086 if (!compress) 2087 return; 2088 2089 compress_fini(compress); 2090 kfree(compress); 2091 } 2092 2093 static struct i915_gpu_coredump * 2094 __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags) 2095 { 2096 struct drm_i915_private *i915 = gt->i915; 2097 struct i915_gpu_coredump *error; 2098 2099 /* Check if GPU capture has been disabled */ 2100 error = READ_ONCE(i915->gpu_error.first_error); 2101 if (IS_ERR(error)) 2102 return error; 2103 2104 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL); 2105 if (!error) 2106 return ERR_PTR(-ENOMEM); 2107 2108 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL, dump_flags); 2109 if (error->gt) { 2110 struct i915_vma_compress *compress; 2111 2112 compress = i915_vma_capture_prepare(error->gt); 2113 if (!compress) { 2114 kfree(error->gt); 2115 kfree(error); 2116 return ERR_PTR(-ENOMEM); 2117 } 2118 2119 if (INTEL_INFO(i915)->has_gt_uc) { 2120 error->gt->uc = gt_record_uc(error->gt, compress); 2121 if (error->gt->uc) { 2122 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE) 2123 error->gt->uc->guc.is_guc_capture = true; 2124 else 2125 GEM_BUG_ON(error->gt->uc->guc.is_guc_capture); 2126 } 2127 } 2128 2129 gt_record_info(error->gt); 2130 gt_record_engines(error->gt, engine_mask, compress, dump_flags); 2131 2132 2133 i915_vma_capture_finish(error->gt, compress); 2134 2135 error->simulated |= error->gt->simulated; 2136 } 2137 2138 error->overlay = intel_overlay_capture_error_state(i915); 2139 2140 return error; 2141 } 2142 2143 static struct i915_gpu_coredump * 2144 i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags) 2145 { 2146 static DEFINE_MUTEX(capture_mutex); 2147 int ret = mutex_lock_interruptible(&capture_mutex); 2148 struct i915_gpu_coredump *dump; 2149 2150 if (ret) 2151 return ERR_PTR(ret); 2152 2153 dump = __i915_gpu_coredump(gt, engine_mask, dump_flags); 2154 mutex_unlock(&capture_mutex); 2155 2156 return dump; 2157 } 2158 2159 void i915_error_state_store(struct i915_gpu_coredump *error) 2160 { 2161 struct drm_i915_private *i915; 2162 static bool warned; 2163 2164 if (IS_ERR_OR_NULL(error)) 2165 return; 2166 2167 i915 = error->i915; 2168 drm_info(&i915->drm, "%s\n", error_msg(error)); 2169 2170 if (error->simulated || 2171 cmpxchg(&i915->gpu_error.first_error, NULL, error)) 2172 return; 2173 2174 i915_gpu_coredump_get(error); 2175 2176 if (!xchg(&warned, true) && 2177 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) { 2178 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); 2179 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n"); 2180 pr_info("Please see https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html for details.\n"); 2181 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 2182 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n"); 2183 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n", 2184 i915->drm.primary->index); 2185 } 2186 } 2187 2188 /** 2189 * i915_capture_error_state - capture an error record for later analysis 2190 * @gt: intel_gt which originated the hang 2191 * @engine_mask: hung engines 2192 * @dump_flags: dump flags 2193 * 2194 * Should be called when an error is detected (either a hang or an error 2195 * interrupt) to capture error state from the time of the error. Fills 2196 * out a structure which becomes available in debugfs for user level tools 2197 * to pick up. 2198 */ 2199 void i915_capture_error_state(struct intel_gt *gt, 2200 intel_engine_mask_t engine_mask, u32 dump_flags) 2201 { 2202 struct i915_gpu_coredump *error; 2203 2204 error = i915_gpu_coredump(gt, engine_mask, dump_flags); 2205 if (IS_ERR(error)) { 2206 cmpxchg(>->i915->gpu_error.first_error, NULL, error); 2207 return; 2208 } 2209 2210 i915_error_state_store(error); 2211 i915_gpu_coredump_put(error); 2212 } 2213 2214 static struct i915_gpu_coredump * 2215 i915_first_error_state(struct drm_i915_private *i915) 2216 { 2217 struct i915_gpu_coredump *error; 2218 2219 spin_lock_irq(&i915->gpu_error.lock); 2220 error = i915->gpu_error.first_error; 2221 if (!IS_ERR_OR_NULL(error)) 2222 i915_gpu_coredump_get(error); 2223 spin_unlock_irq(&i915->gpu_error.lock); 2224 2225 return error; 2226 } 2227 2228 void i915_reset_error_state(struct drm_i915_private *i915) 2229 { 2230 struct i915_gpu_coredump *error; 2231 2232 spin_lock_irq(&i915->gpu_error.lock); 2233 error = i915->gpu_error.first_error; 2234 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */ 2235 i915->gpu_error.first_error = NULL; 2236 spin_unlock_irq(&i915->gpu_error.lock); 2237 2238 if (!IS_ERR_OR_NULL(error)) 2239 i915_gpu_coredump_put(error); 2240 } 2241 2242 void i915_disable_error_state(struct drm_i915_private *i915, int err) 2243 { 2244 spin_lock_irq(&i915->gpu_error.lock); 2245 if (!i915->gpu_error.first_error) 2246 i915->gpu_error.first_error = ERR_PTR(err); 2247 spin_unlock_irq(&i915->gpu_error.lock); 2248 } 2249 2250 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 2251 void intel_klog_error_capture(struct intel_gt *gt, 2252 intel_engine_mask_t engine_mask) 2253 { 2254 static int g_count; 2255 struct drm_i915_private *i915 = gt->i915; 2256 struct i915_gpu_coredump *error; 2257 intel_wakeref_t wakeref; 2258 size_t buf_size = PAGE_SIZE * 128; 2259 size_t pos_err; 2260 char *buf, *ptr, *next; 2261 int l_count = g_count++; 2262 int line = 0; 2263 2264 /* Can't allocate memory during a reset */ 2265 if (test_bit(I915_RESET_BACKOFF, >->reset.flags)) { 2266 drm_err(>->i915->drm, "[Capture/%d.%d] Inside GT reset, skipping error capture :(\n", 2267 l_count, line++); 2268 return; 2269 } 2270 2271 error = READ_ONCE(i915->gpu_error.first_error); 2272 if (error) { 2273 drm_err(&i915->drm, "[Capture/%d.%d] Clearing existing error capture first...\n", 2274 l_count, line++); 2275 i915_reset_error_state(i915); 2276 } 2277 2278 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 2279 error = i915_gpu_coredump(gt, engine_mask, CORE_DUMP_FLAG_NONE); 2280 2281 if (IS_ERR(error)) { 2282 drm_err(&i915->drm, "[Capture/%d.%d] Failed to capture error capture: %ld!\n", 2283 l_count, line++, PTR_ERR(error)); 2284 return; 2285 } 2286 2287 buf = kvmalloc(buf_size, GFP_KERNEL); 2288 if (!buf) { 2289 drm_err(&i915->drm, "[Capture/%d.%d] Failed to allocate buffer for error capture!\n", 2290 l_count, line++); 2291 i915_gpu_coredump_put(error); 2292 return; 2293 } 2294 2295 drm_info(&i915->drm, "[Capture/%d.%d] Dumping i915 error capture for %ps...\n", 2296 l_count, line++, __builtin_return_address(0)); 2297 2298 /* Largest string length safe to print via dmesg */ 2299 # define MAX_CHUNK 800 2300 2301 pos_err = 0; 2302 while (1) { 2303 ssize_t got = i915_gpu_coredump_copy_to_buffer(error, buf, pos_err, buf_size - 1); 2304 2305 if (got <= 0) 2306 break; 2307 2308 buf[got] = 0; 2309 pos_err += got; 2310 2311 ptr = buf; 2312 while (got > 0) { 2313 size_t count; 2314 char tag[2]; 2315 2316 next = strnchr(ptr, got, '\n'); 2317 if (next) { 2318 count = next - ptr; 2319 *next = 0; 2320 tag[0] = '>'; 2321 tag[1] = '<'; 2322 } else { 2323 count = got; 2324 tag[0] = '}'; 2325 tag[1] = '{'; 2326 } 2327 2328 if (count > MAX_CHUNK) { 2329 size_t pos; 2330 char *ptr2 = ptr; 2331 2332 for (pos = MAX_CHUNK; pos < count; pos += MAX_CHUNK) { 2333 char chr = ptr[pos]; 2334 2335 ptr[pos] = 0; 2336 drm_info(&i915->drm, "[Capture/%d.%d] }%s{\n", 2337 l_count, line++, ptr2); 2338 ptr[pos] = chr; 2339 ptr2 = ptr + pos; 2340 2341 /* 2342 * If spewing large amounts of data via a serial console, 2343 * this can be a very slow process. So be friendly and try 2344 * not to cause 'softlockup on CPU' problems. 2345 */ 2346 cond_resched(); 2347 } 2348 2349 if (ptr2 < (ptr + count)) 2350 drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n", 2351 l_count, line++, tag[0], ptr2, tag[1]); 2352 else if (tag[0] == '>') 2353 drm_info(&i915->drm, "[Capture/%d.%d] ><\n", 2354 l_count, line++); 2355 } else { 2356 drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n", 2357 l_count, line++, tag[0], ptr, tag[1]); 2358 } 2359 2360 ptr = next; 2361 got -= count; 2362 if (next) { 2363 ptr++; 2364 got--; 2365 } 2366 2367 /* As above. */ 2368 cond_resched(); 2369 } 2370 2371 if (got) 2372 drm_info(&i915->drm, "[Capture/%d.%d] Got %zd bytes remaining!\n", 2373 l_count, line++, got); 2374 } 2375 2376 kvfree(buf); 2377 2378 drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err); 2379 } 2380 #endif 2381 2382 static ssize_t gpu_state_read(struct file *file, char __user *ubuf, 2383 size_t count, loff_t *pos) 2384 { 2385 struct i915_gpu_coredump *error; 2386 ssize_t ret; 2387 void *buf; 2388 2389 error = file->private_data; 2390 if (!error) 2391 return 0; 2392 2393 /* Bounce buffer required because of kernfs __user API convenience. */ 2394 buf = kmalloc(count, GFP_KERNEL); 2395 if (!buf) 2396 return -ENOMEM; 2397 2398 ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count); 2399 if (ret <= 0) 2400 goto out; 2401 2402 if (!copy_to_user(ubuf, buf, ret)) 2403 *pos += ret; 2404 else 2405 ret = -EFAULT; 2406 2407 out: 2408 kfree(buf); 2409 return ret; 2410 } 2411 2412 static int gpu_state_release(struct inode *inode, struct file *file) 2413 { 2414 i915_gpu_coredump_put(file->private_data); 2415 return 0; 2416 } 2417 2418 static int i915_gpu_info_open(struct inode *inode, struct file *file) 2419 { 2420 struct drm_i915_private *i915 = inode->i_private; 2421 struct i915_gpu_coredump *gpu; 2422 intel_wakeref_t wakeref; 2423 2424 gpu = NULL; 2425 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 2426 gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE); 2427 2428 if (IS_ERR(gpu)) 2429 return PTR_ERR(gpu); 2430 2431 file->private_data = gpu; 2432 return 0; 2433 } 2434 2435 static const struct file_operations i915_gpu_info_fops = { 2436 .owner = THIS_MODULE, 2437 .open = i915_gpu_info_open, 2438 .read = gpu_state_read, 2439 .llseek = default_llseek, 2440 .release = gpu_state_release, 2441 }; 2442 2443 static ssize_t 2444 i915_error_state_write(struct file *filp, 2445 const char __user *ubuf, 2446 size_t cnt, 2447 loff_t *ppos) 2448 { 2449 struct i915_gpu_coredump *error = filp->private_data; 2450 2451 if (!error) 2452 return 0; 2453 2454 drm_dbg(&error->i915->drm, "Resetting error state\n"); 2455 i915_reset_error_state(error->i915); 2456 2457 return cnt; 2458 } 2459 2460 static int i915_error_state_open(struct inode *inode, struct file *file) 2461 { 2462 struct i915_gpu_coredump *error; 2463 2464 error = i915_first_error_state(inode->i_private); 2465 if (IS_ERR(error)) 2466 return PTR_ERR(error); 2467 2468 file->private_data = error; 2469 return 0; 2470 } 2471 2472 static const struct file_operations i915_error_state_fops = { 2473 .owner = THIS_MODULE, 2474 .open = i915_error_state_open, 2475 .read = gpu_state_read, 2476 .write = i915_error_state_write, 2477 .llseek = default_llseek, 2478 .release = gpu_state_release, 2479 }; 2480 2481 void i915_gpu_error_debugfs_register(struct drm_i915_private *i915) 2482 { 2483 struct drm_minor *minor = i915->drm.primary; 2484 2485 debugfs_create_file("i915_error_state", 0644, minor->debugfs_root, i915, 2486 &i915_error_state_fops); 2487 debugfs_create_file("i915_gpu_info", 0644, minor->debugfs_root, i915, 2488 &i915_gpu_info_fops); 2489 } 2490 2491 static ssize_t error_state_read(struct file *filp, struct kobject *kobj, 2492 struct bin_attribute *attr, char *buf, 2493 loff_t off, size_t count) 2494 { 2495 2496 struct device *kdev = kobj_to_dev(kobj); 2497 struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); 2498 struct i915_gpu_coredump *gpu; 2499 ssize_t ret = 0; 2500 2501 /* 2502 * FIXME: Concurrent clients triggering resets and reading + clearing 2503 * dumps can cause inconsistent sysfs reads when a user calls in with a 2504 * non-zero offset to complete a prior partial read but the 2505 * gpu_coredump has been cleared or replaced. 2506 */ 2507 2508 gpu = i915_first_error_state(i915); 2509 if (IS_ERR(gpu)) { 2510 ret = PTR_ERR(gpu); 2511 } else if (gpu) { 2512 ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count); 2513 i915_gpu_coredump_put(gpu); 2514 } else { 2515 const char *str = "No error state collected\n"; 2516 size_t len = strlen(str); 2517 2518 if (off < len) { 2519 ret = min_t(size_t, count, len - off); 2520 memcpy(buf, str + off, ret); 2521 } 2522 } 2523 2524 return ret; 2525 } 2526 2527 static ssize_t error_state_write(struct file *file, struct kobject *kobj, 2528 struct bin_attribute *attr, char *buf, 2529 loff_t off, size_t count) 2530 { 2531 struct device *kdev = kobj_to_dev(kobj); 2532 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 2533 2534 drm_dbg(&dev_priv->drm, "Resetting error state\n"); 2535 i915_reset_error_state(dev_priv); 2536 2537 return count; 2538 } 2539 2540 static const struct bin_attribute error_state_attr = { 2541 .attr.name = "error", 2542 .attr.mode = S_IRUSR | S_IWUSR, 2543 .size = 0, 2544 .read = error_state_read, 2545 .write = error_state_write, 2546 }; 2547 2548 void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915) 2549 { 2550 struct device *kdev = i915->drm.primary->kdev; 2551 2552 if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr)) 2553 drm_err(&i915->drm, "error_state sysfs setup failed\n"); 2554 } 2555 2556 void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915) 2557 { 2558 struct device *kdev = i915->drm.primary->kdev; 2559 2560 sysfs_remove_bin_file(&kdev->kobj, &error_state_attr); 2561 } 2562