Lines Matching defs:ee

440 				 const struct intel_engine_coredump *ee)
447 ee->instdone.instdone);
449 if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
453 ee->instdone.slice_common);
458 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
461 ee->instdone.sampler[slice][subslice]);
463 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
466 ee->instdone.row[slice][subslice]);
472 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
475 ee->instdone.geom_svg[slice][subslice]);
479 ee->instdone.slice_common_extra[0]);
481 ee->instdone.slice_common_extra[1]);
525 intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
527 return __find_vma(ee->vma, "batch");
531 const struct intel_engine_coredump *ee)
536 err_printf(m, "%s command stream:\n", ee->engine->name);
537 err_printf(m, " CCID: 0x%08x\n", ee->ccid);
538 err_printf(m, " START: 0x%08x\n", ee->start);
539 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
541 ee->tail, ee->rq_post, ee->rq_tail);
542 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
543 err_printf(m, " MODE: 0x%08x\n", ee->mode);
544 err_printf(m, " HWS: 0x%08x\n", ee->hws);
546 (u32)(ee->acthd>>32), (u32)ee->acthd);
547 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
548 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
549 err_printf(m, " ESR: 0x%08x\n", ee->esr);
551 error_print_instdone(m, ee);
553 batch = intel_gpu_error_find_batch(ee);
564 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
565 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
566 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
568 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
569 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
570 lower_32_bits(ee->faddr));
572 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
573 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
576 err_printf(m, " NOPID: 0x%08x\n", ee->nopid);
577 err_printf(m, " EXCC: 0x%08x\n", ee->excc);
578 err_printf(m, " CMD_CCTL: 0x%08x\n", ee->cmd_cctl);
579 err_printf(m, " CSCMDOP: 0x%08x\n", ee->cscmdop);
580 err_printf(m, " CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl);
581 err_printf(m, " DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi);
582 err_printf(m, " DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo);
585 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
591 i, ee->vm_info.pdp[i]);
594 ee->vm_info.pp_dir_base);
598 for (n = 0; n < ee->num_ports; n++) {
600 error_print_request(m, " ", &ee->execlist[n]);
800 const struct intel_engine_coredump *ee;
802 for (ee = gt->engine; ee; ee = ee->next) {
806 if (ee->guc_capture_node)
807 intel_guc_capture_print_engine_node(m, ee);
810 ee->engine->name);
812 error_print_engine(m, ee);
815 err_printf(m, " hung: %u\n", ee->hung);
816 err_printf(m, " engine reset count: %u\n", ee->reset_count);
817 error_print_context(m, " Active context: ", &ee->context);
819 for (vma = ee->vma; vma; vma = vma->next)
820 intel_gpu_error_print_vma(m, ee->engine, vma);
829 const struct intel_engine_coredump *ee;
849 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
851 ee->engine->name,
852 ee->context.comm,
853 ee->context.pid);
1035 struct intel_engine_coredump *ee = gt->engine;
1037 gt->engine = ee->next;
1039 i915_vma_coredump_free(ee->vma);
1040 intel_guc_capture_free_node(ee);
1041 kfree(ee);
1217 static void engine_record_registers(struct intel_engine_coredump *ee)
1219 const struct intel_engine_cs *engine = ee->engine;
1223 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
1231 ee->fault_reg = intel_uncore_read(engine->uncore,
1234 ee->fault_reg = intel_gt_mcr_read_any(engine->gt,
1237 ee->fault_reg = intel_uncore_read(engine->uncore,
1240 ee->fault_reg = intel_uncore_read(engine->uncore,
1243 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
1247 ee->esr = ENGINE_READ(engine, RING_ESR);
1248 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1249 ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1250 ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1251 ee->instps = ENGINE_READ(engine, RING_INSTPS);
1252 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
1253 ee->ccid = ENGINE_READ(engine, CCID);
1255 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1256 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
1258 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
1260 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1261 ee->ipeir = ENGINE_READ(engine, IPEIR);
1262 ee->ipehr = ENGINE_READ(engine, IPEHR);
1266 ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL);
1267 ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP);
1268 ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL);
1269 ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW);
1270 ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD);
1271 ee->nopid = ENGINE_READ(engine, RING_NOPID);
1272 ee->excc = ENGINE_READ(engine, RING_EXCC);
1275 intel_engine_get_instdone(engine, &ee->instdone);
1277 ee->instpm = ENGINE_READ(engine, RING_INSTPM);
1278 ee->acthd = intel_engine_get_active_head(engine);
1279 ee->start = ENGINE_READ(engine, RING_START);
1280 ee->head = ENGINE_READ(engine, RING_HEAD);
1281 ee->tail = ENGINE_READ(engine, RING_TAIL);
1282 ee->ctl = ENGINE_READ(engine, RING_CTL);
1284 ee->mode = ENGINE_READ(engine, RING_MI_MODE);
1314 ee->hws = intel_uncore_read(engine->uncore, mmio);
1317 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1322 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
1325 ee->vm_info.pp_dir_base =
1328 ee->vm_info.pp_dir_base =
1334 ee->vm_info.pdp[i] =
1337 ee->vm_info.pdp[i] <<= 32;
1338 ee->vm_info.pdp[i] |=
1368 static void engine_record_execlists(struct intel_engine_coredump *ee)
1370 const struct intel_engine_execlists * const el = &ee->engine->execlists;
1375 record_request(*port++, &ee->execlist[n++]);
1377 ee->num_ports = n;
1487 static void add_vma(struct intel_engine_coredump *ee,
1491 vma->next = ee->vma;
1492 ee->vma = vma;
1517 static void add_vma_coredump(struct intel_engine_coredump *ee,
1523 add_vma(ee, create_vma_coredump(gt, vma, name, compress));
1529 struct intel_engine_coredump *ee;
1531 ee = kzalloc(sizeof(*ee), gfp);
1532 if (!ee)
1535 ee->engine = engine;
1538 engine_record_registers(ee);
1539 engine_record_execlists(ee);
1542 return ee;
1546 engine_coredump_add_context(struct intel_engine_coredump *ee,
1552 ee->simulated |= record_context(&ee->context, ce);
1553 if (ee->simulated)
1568 intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1574 vma = engine_coredump_add_context(ee, rq->context, gfp);
1586 ee->rq_head = rq->head;
1587 ee->rq_post = rq->postfix;
1588 ee->rq_tail = rq->tail;
1594 intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1598 const struct intel_engine_cs *engine = ee->engine;
1604 add_vma(ee,
1615 add_vma_coredump(ee, engine->gt, engine->status_page.vma,
1618 add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma,
1628 struct intel_engine_coredump *ee;
1632 ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
1633 if (!ee)
1654 capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
1657 capture = engine_coredump_add_context(ee, ce, ATOMIC_MAYFAIL);
1661 intel_engine_coredump_add_vma(ee, capture, compress);
1664 intel_guc_capture_get_matching_node(engine->gt, ee, ce);
1666 kfree(ee);
1667 ee = NULL;
1670 return ee;
1683 struct intel_engine_coredump *ee;
1688 ee = capture_engine(engine, compress, dump_flags);
1689 if (!ee)
1692 ee->hung = engine->mask & engine_mask;
1694 gt->simulated |= ee->simulated;
1695 if (ee->simulated) {
1697 intel_guc_capture_free_node(ee);
1698 kfree(ee);
1702 ee->next = gt->engine;
1703 gt->engine = ee;
1915 static u32 generate_ecode(const struct intel_engine_coredump *ee)
1923 return ee ? ee->ipehr ^ ee->instdone.instdone : 0;