Lines Matching +full:render +full:- +full:max
1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021-2024 Intel Corporation
45 * Book-keeping structure used to track read and write pointers
46 * as we extract error capture data from the GuC-log-buffer's
47 * error-capture region as a stream of dwords.
57 * struct __guc_capture_parsed_output - extracted error capture node
59 * A single unit of extracted error-capture output data grouped together
60 * at an engine-instance level. We keep these nodes in a linked list.
65 * A single set of 3 capture lists: a global-list
66 * an engine-class-list and an engine-instance list.
93 * For engine-registers, GuC only needs the register offsets
94 * from the engine-mmio-base
157 /* Render / Compute Per-Engine-Instance */
162 /* Render / Compute Engine-Class */
168 /* Render / Compute Engine-Class for xehpg */
173 /* Media Decode/Encode Per-Engine-Instance */
178 /* Video Enhancement Engine-Class */
183 /* Video Enhancement Per-Engine-Instance */
188 /* Blitter Per-Engine-Instance */
193 /* XE_LP - GSC Per-Engine-Instance */
255 "Render/Compute",
259 "GSC-Other",
273 * (slices or dual-sub-slices) and thus depends on HW fuses discovered
327 struct xe_guc *guc = >->uc.guc; in xe_guc_capture_get_reg_desc_list()
329 reglists = guc->capture->extlists; in xe_guc_capture_get_reg_desc_list()
360 ext->reg = XE_REG(extlist->reg.__reg.addr); in __fill_ext_reg()
361 ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1); in __fill_ext_reg()
362 ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id); in __fill_ext_reg()
363 ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id); in __fill_ext_reg()
364 ext->dss_id = dss_id; in __fill_ext_reg()
365 ext->regname = extlist->name; in __fill_ext_reg()
376 return -ENOMEM; in __alloc_ext_regs()
378 newlist->list = list; in __alloc_ext_regs()
379 newlist->num_regs = num_regs; in __alloc_ext_regs()
380 newlist->owner = rootlist->owner; in __alloc_ext_regs()
381 newlist->engine = rootlist->engine; in __alloc_ext_regs()
382 newlist->type = rootlist->type; in __alloc_ext_regs()
402 const struct __guc_mmio_reg_descr_group *lists = guc->capture->reglists; in guc_capture_alloc_steered_lists()
407 struct drm_device *drm = >_to_xe(gt)->drm; in guc_capture_alloc_steered_lists()
419 if (xe_engine_class_to_guc_capture_class(hwe->class) == in guc_capture_alloc_steered_lists()
429 /* steered registers currently only exist for the render-class */ in guc_capture_alloc_steered_lists()
437 if (!list || guc->capture->extlists) in guc_capture_alloc_steered_lists()
440 total = bitmap_weight(gt->fuse_topo.g_dss_mask, sizeof(gt->fuse_topo.g_dss_mask) * 8) * in guc_capture_alloc_steered_lists()
456 /* For steering registers, the list is generated at run-time */ in guc_capture_alloc_steered_lists()
473 xe_gt_dbg(guc_to_gt(guc), "capture found %d ext-regs.\n", total); in guc_capture_alloc_steered_lists()
474 guc->capture->extlists = extlists; in guc_capture_alloc_steered_lists()
483 const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists; in guc_capture_list_init()
484 struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists; in guc_capture_list_init()
489 return -ENODEV; in guc_capture_list_init()
493 return -ENODATA; in guc_capture_list_init()
495 list_num = match->num_regs; in guc_capture_list_init()
497 ptr[ptr_idx].offset = match->list[list_idx].reg.addr; in guc_capture_list_init()
499 ptr[ptr_idx].flags = match->list[list_idx].flags; in guc_capture_list_init()
500 ptr[ptr_idx].mask = match->list[list_idx].mask; in guc_capture_list_init()
506 ptr_idx < num_entries && list_idx < match->num_regs; in guc_capture_list_init()
508 ptr[ptr_idx].offset = match->list[list_idx].reg.addr; in guc_capture_list_init()
510 ptr[ptr_idx].flags = match->list[list_idx].flags; in guc_capture_list_init()
511 ptr[ptr_idx].mask = match->list[list_idx].mask; in guc_capture_list_init()
515 xe_gt_dbg(guc_to_gt(guc), "Got short capture reglist init: %d out-of %d.\n", in guc_capture_list_init()
528 match = guc_capture_get_one_list(guc->capture->reglists, owner, type, capture_class); in guc_cap_list_num_regs()
530 num_regs = match->num_regs; in guc_cap_list_num_regs()
532 match = guc_capture_get_one_list(guc->capture->extlists, owner, type, capture_class); in guc_cap_list_num_regs()
534 num_regs += match->num_regs; in guc_cap_list_num_regs()
538 * not yet got the hw-config, which is before max_mmio_per_node in guc_cap_list_num_regs()
539 * is initialized, then provide a worst-case number for in guc_cap_list_num_regs()
540 * extlists based on max dss fuse bits, but only ever for in guc_cap_list_num_regs()
541 * render/compute in guc_cap_list_num_regs()
546 !guc->capture->max_mmio_per_node) in guc_cap_list_num_regs()
558 struct xe_guc_state_capture *gc = guc->capture; in guc_capture_getlistsize()
566 cache = &gc->ads_cache[owner][type][capture_class]; in guc_capture_getlistsize()
567 if (!gc->reglists) { in guc_capture_getlistsize()
569 return -ENODEV; in guc_capture_getlistsize()
572 if (cache->is_valid) { in guc_capture_getlistsize()
573 *size = cache->size; in guc_capture_getlistsize()
574 return cache->status; in guc_capture_getlistsize()
578 !guc_capture_get_one_list(gc->reglists, owner, type, capture_class)) { in guc_capture_getlistsize()
585 return -ENODEV; in guc_capture_getlistsize()
591 return -ENODATA; in guc_capture_getlistsize()
601 * xe_guc_capture_getlistsize - Get list size for owner/type/class combination
621 * xe_guc_capture_getlist - Get register capture list for owner/type/class
638 struct xe_guc_state_capture *gc = guc->capture; in xe_guc_capture_getlist()
639 struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][capture_class]; in xe_guc_capture_getlist()
645 if (!gc->reglists) in xe_guc_capture_getlist()
646 return -ENODEV; in xe_guc_capture_getlist()
648 if (cache->is_valid) { in xe_guc_capture_getlist()
649 *outptr = cache->ptr; in xe_guc_capture_getlist()
650 return cache->status; in xe_guc_capture_getlist()
655 cache->is_valid = true; in xe_guc_capture_getlist()
656 cache->ptr = NULL; in xe_guc_capture_getlist()
657 cache->size = 0; in xe_guc_capture_getlist()
658 cache->status = ret; in xe_guc_capture_getlist()
664 return -ENOMEM; in xe_guc_capture_getlist()
670 listnode->header.info = FIELD_PREP(GUC_CAPTURELISTHDR_NUMDESCR, (u32)num_regs); in xe_guc_capture_getlist()
678 cache->is_valid = true; in xe_guc_capture_getlist()
679 cache->ptr = caplist; in xe_guc_capture_getlist()
680 cache->size = size; in xe_guc_capture_getlist()
681 cache->status = 0; in xe_guc_capture_getlist()
689 * xe_guc_capture_getnullheader - Get a null list for register capture
701 struct xe_guc_state_capture *gc = guc->capture; in xe_guc_capture_getnullheader()
705 if (gc->ads_null_cache) { in xe_guc_capture_getnullheader()
706 *outptr = gc->ads_null_cache; in xe_guc_capture_getnullheader()
713 return -ENOMEM; in xe_guc_capture_getnullheader()
715 gc->ads_null_cache = null_header; in xe_guc_capture_getnullheader()
723 * xe_guc_capture_ads_input_worst_size - Calculate the worst size for GuC register capture
738 * first of a two-phase GuC (and ADS-population) initialization in xe_guc_capture_ads_input_worst_size()
739 * sequence, that is, during the pre-hwconfig phase before we have in xe_guc_capture_ads_input_worst_size()
774 if (!guc->capture) in guc_capture_output_size_est()
775 return -ENODEV; in guc_capture_output_size_est()
778 * If every single engine-instance suffered a failure in quick succession but in guc_capture_output_size_est()
779 * were all unrelated, then a burst of multiple error-capture events would dump in guc_capture_output_size_est()
781 * would even dump the global-registers repeatedly. in guc_capture_output_size_est()
791 capture_class = xe_engine_class_to_guc_capture_class(hwe->class); in guc_capture_output_size_est()
810 * Add on a 3x multiplier to allow for multiple back-to-back captures occurring
819 u32 buffer_size = xe_guc_log_section_size_capture(&guc->log); in check_guc_capture_size()
828 * G2H event capture-notification, search for: in check_guc_capture_size()
848 list_add(&node->link, list); in guc_capture_add_node_to_list()
856 guc_capture_add_node_to_list(node, &gc->outlist); in guc_capture_add_node_to_outlist()
863 guc_capture_add_node_to_list(node, &gc->cachelist); in guc_capture_add_node_to_cachelist()
871 n->locked = 0; in guc_capture_free_outlist_node()
872 list_del(&n->link); in guc_capture_free_outlist_node()
883 int guc_id = node->guc_id; in guc_capture_remove_stale_matches_from_list()
885 list_for_each_entry_safe(n, ntmp, &gc->outlist, link) { in guc_capture_remove_stale_matches_from_list()
886 if (n != node && !n->locked && n->guc_id == guc_id) in guc_capture_remove_stale_matches_from_list()
898 tmp[i] = node->reginfo[i].regs; in guc_capture_init_node()
900 guc->capture->max_mmio_per_node); in guc_capture_init_node()
904 node->reginfo[i].regs = tmp[i]; in guc_capture_init_node()
906 INIT_LIST_HEAD(&node->link); in guc_capture_init_node()
910 * DOC: Init, G2H-event and reporting flows for GuC-error-capture
913 * --------------------
914 * --> alloc A: GuC input capture regs lists (registered to GuC via ADS).
917 * where n = 1 for global-reg-list +
918 * num_engine_classes for class-reg-list +
919 * num_engine_classes for instance-reg-list
920 * (since all instances of the same engine-class type
921 * have an identical engine-instance register-list).
924 * --> alloc B: GuC output capture buf (registered via guc_init_params(log_param))
925 * Size = #define CAPTURE_BUFFER_SIZE (warns if on too-small)
929 * --------------------------
930 * --> G2H STATE_CAPTURE_NOTIFICATION
931 * L--> xe_guc_capture_process
932 * L--> Loop through B (head..tail) and for each engine instance's
933 * err-state-captured register-list we find, we alloc 'C':
934 * --> alloc C: A capture-output-node structure that includes misc capture info along
935 * with 3 register list dumps (global, engine-class and engine-instance)
936 * This node is created from a pre-allocated list of blank nodes in
937 * guc->capture->cachelist and populated with the error-capture
938 * data from GuC and then it's added into guc->capture->outlist linked
942 * GUC --> notify context reset:
943 * -----------------------------
944 * --> guc_exec_queue_timedout_job
945 * L--> xe_devcoredump
946 * L--> devcoredump_snapshot
947 * --> xe_hw_engine_snapshot_capture
948 * --> xe_engine_manual_capture(For manual capture)
951 * --------------------
952 * --> xe_devcoredump_read->
953 * L--> xxx_snapshot_print
954 * L--> xe_engine_snapshot_print
956 * guc->capture->outlist
962 if (buf->wr >= buf->rd) in guc_capture_buf_cnt()
963 return (buf->wr - buf->rd); in guc_capture_buf_cnt()
964 return (buf->size - buf->rd) + buf->wr; in guc_capture_buf_cnt()
969 if (buf->rd > buf->wr) in guc_capture_buf_cnt_to_end()
970 return (buf->size - buf->rd); in guc_capture_buf_cnt_to_end()
971 return (buf->wr - buf->rd); in guc_capture_buf_cnt_to_end()
975 * GuC's error-capture output is a ring buffer populated in a byte-stream fashion:
977 * The GuC Log buffer region for error-capture is managed like a ring buffer.
978 * The GuC firmware dumps error capture logs into this ring in a byte-stream flow.
979 * Additionally, as per the current and foreseeable future, all packed error-
983 * than one dword but the tail end of the err-capture buffer-region has lesser space left,
988 * function would typically do a straight-up memcpy from the ring contents and will only
989 * call this helper if their structure-extraction is straddling across the end of the
990 * ring. GuC firmware does not add any padding. The reason for the no-padding is to ease
1006 return -1; in guc_capture_log_remove_bytes()
1008 while (bytes_needed > 0 && tries--) { in guc_capture_log_remove_bytes()
1016 if (!buf->rd) in guc_capture_log_remove_bytes()
1018 buf->rd = 0; in guc_capture_log_remove_bytes()
1023 copy_size = avail < bytes_needed ? avail - misaligned : bytes_needed; in guc_capture_log_remove_bytes()
1024 xe_map_memcpy_from(guc_to_xe(guc), out + fill_size, &guc->log.bo->vmap, in guc_capture_log_remove_bytes()
1025 buf->data_offset + buf->rd, copy_size); in guc_capture_log_remove_bytes()
1026 buf->rd += copy_size; in guc_capture_log_remove_bytes()
1028 bytes_needed -= copy_size; in guc_capture_log_remove_bytes()
1045 return -1; in guc_capture_log_get_group_hdr()
1056 return -1; in guc_capture_log_get_data_hdr()
1067 return -1; in guc_capture_log_get_register()
1076 if (!list_empty(&guc->capture->cachelist)) { in guc_capture_get_prealloc_node()
1080 list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) { in guc_capture_get_prealloc_node()
1091 list_for_each_entry_safe_reverse(n, ntmp, &guc->capture->outlist, link) { in guc_capture_get_prealloc_node()
1092 if (!n->locked) in guc_capture_get_prealloc_node()
1097 list_del(&found->link); in guc_capture_get_prealloc_node()
1117 new->is_partial = original->is_partial; in guc_capture_clone_node()
1119 /* copy reg-lists that we want to clone */ in guc_capture_clone_node()
1122 XE_WARN_ON(original->reginfo[i].num_regs > in guc_capture_clone_node()
1123 guc->capture->max_mmio_per_node); in guc_capture_clone_node()
1125 memcpy(new->reginfo[i].regs, original->reginfo[i].regs, in guc_capture_clone_node()
1126 original->reginfo[i].num_regs * sizeof(struct guc_mmio_reg)); in guc_capture_clone_node()
1128 new->reginfo[i].num_regs = original->reginfo[i].num_regs; in guc_capture_clone_node()
1129 new->reginfo[i].vfid = original->reginfo[i].vfid; in guc_capture_clone_node()
1132 new->eng_class = original->eng_class; in guc_capture_clone_node()
1134 new->eng_inst = original->eng_inst; in guc_capture_clone_node()
1135 new->guc_id = original->guc_id; in guc_capture_clone_node()
1136 new->lrca = original->lrca; in guc_capture_clone_node()
1159 return -ENODATA; in guc_capture_extract_reglists()
1162 xe_gt_warn(gt, "Got mis-aligned register capture entries\n"); in guc_capture_extract_reglists()
1163 ret = -EIO; in guc_capture_extract_reglists()
1169 ret = -EIO; in guc_capture_extract_reglists()
1180 * | | - num_captures = 5 | | in guc_capture_extract_reglists()
1187 * | | | - reg1, reg2, ... rega | | | in guc_capture_extract_reglists()
1192 * | | Hdr: CLASS=RENDER/COMPUTE, numregs=b| | in guc_capture_extract_reglists()
1195 * | | | - reg1, reg2, ... regb | | | in guc_capture_extract_reglists()
1203 * | | | - reg1, reg2, ... regc | | | in guc_capture_extract_reglists()
1208 * | | Hdr: CLASS=RENDER/COMPUTE, numregs=d| | in guc_capture_extract_reglists()
1211 * | | | - reg1, reg2, ... regd | | | in guc_capture_extract_reglists()
1219 * | | | - reg1, reg2, ... rege | | | in guc_capture_extract_reglists()
1227 while (numlists--) { in guc_capture_extract_reglists()
1229 ret = -EIO; in guc_capture_extract_reglists()
1235 /* unknown capture type - skip over to next capture set */ in guc_capture_extract_reglists()
1238 while (numregs--) { in guc_capture_extract_reglists()
1240 ret = -EIO; in guc_capture_extract_reglists()
1249 * linked list for match-up when xe_devcoredump calls later in guc_capture_extract_reglists()
1256 guc_capture_add_node_to_outlist(guc->capture, node); in guc_capture_extract_reglists()
1259 node->reginfo[GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS].num_regs) { in guc_capture_extract_reglists()
1261 guc_capture_add_node_to_outlist(guc->capture, node); in guc_capture_extract_reglists()
1265 node->reginfo[GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE].num_regs) { in guc_capture_extract_reglists()
1267 guc_capture_add_node_to_outlist(guc->capture, node); in guc_capture_extract_reglists()
1277 ret = -ENOMEM; in guc_capture_extract_reglists()
1284 node->is_partial = is_partial; in guc_capture_extract_reglists()
1285 node->reginfo[datatype].vfid = FIELD_GET(GUC_STATE_CAPTURE_HEADER_VFID, hdr.owner); in guc_capture_extract_reglists()
1286 node->source = XE_ENGINE_CAPTURE_SOURCE_GUC; in guc_capture_extract_reglists()
1287 node->type = datatype; in guc_capture_extract_reglists()
1291 node->eng_class = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS, in guc_capture_extract_reglists()
1293 node->eng_inst = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_INSTANCE, in guc_capture_extract_reglists()
1295 node->lrca = hdr.lrca; in guc_capture_extract_reglists()
1296 node->guc_id = hdr.guc_id; in guc_capture_extract_reglists()
1299 node->eng_class = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS, in guc_capture_extract_reglists()
1308 if (numregs > guc->capture->max_mmio_per_node) { in guc_capture_extract_reglists()
1310 numregs = guc->capture->max_mmio_per_node; in guc_capture_extract_reglists()
1312 node->reginfo[datatype].num_regs = numregs; in guc_capture_extract_reglists()
1313 regs = node->reginfo[datatype].regs; in guc_capture_extract_reglists()
1315 while (numregs--) { in guc_capture_extract_reglists()
1317 ret = -EIO; in guc_capture_extract_reglists()
1325 /* If we have data, add to linked list for match-up when xe_devcoredump calls */ in guc_capture_extract_reglists()
1327 if (node->reginfo[i].regs) { in guc_capture_extract_reglists()
1328 guc_capture_add_node_to_outlist(guc->capture, node); in guc_capture_extract_reglists()
1334 guc_capture_add_node_to_cachelist(guc->capture, node); in guc_capture_extract_reglists()
1346 return xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action)); in __guc_capture_flushlog_complete()
1361 src_data_offset = xe_guc_get_log_buffer_offset(&guc->log, GUC_LOG_BUFFER_CAPTURE); in __guc_capture_process_output()
1368 xe_map_memcpy_from(guc_to_xe(guc), &log_buf_state_local, &guc->log.bo->vmap, in __guc_capture_process_output()
1371 buffer_size = xe_guc_get_log_buffer_size(&guc->log, GUC_LOG_BUFFER_CAPTURE); in __guc_capture_process_output()
1378 guc->log.stats[GUC_LOG_BUFFER_CAPTURE].flush += tmp; in __guc_capture_process_output()
1379 new_overflow = xe_guc_check_log_buf_overflow(&guc->log, GUC_LOG_BUFFER_CAPTURE, in __guc_capture_process_output()
1405 if (ret && ret != -ENODATA) in __guc_capture_process_output()
1410 /* Update the state of log buffer err-cap state */ in __guc_capture_process_output()
1411 xe_map_wr(guc_to_xe(guc), &guc->log.bo->vmap, in __guc_capture_process_output()
1421 xe_map_wr(guc_to_xe(guc), &guc->log.bo->vmap, in __guc_capture_process_output()
1428 * xe_guc_capture_process - Process GuC register captured data
1439 if (guc->capture) in xe_guc_capture_process()
1455 new->reginfo[i].regs = drmm_kzalloc(drm, guc->capture->max_mmio_per_node * in guc_capture_alloc_one_node()
1457 if (!new->reginfo[i].regs) { in guc_capture_alloc_one_node()
1459 drmm_kfree(drm, new->reginfo[--i].regs); in guc_capture_alloc_one_node()
1478 xe_gt_warn(guc_to_gt(guc), "Register capture pre-alloc-cache failure\n"); in __guc_capture_create_prealloc_nodes()
1482 guc_capture_add_node_to_cachelist(guc->capture, node); in __guc_capture_create_prealloc_nodes()
1500 match = guc_capture_get_one_list(guc->capture->reglists, i, j, k); in guc_get_max_reglist_count()
1502 tmp = match->num_regs; in guc_get_max_reglist_count()
1504 match = guc_capture_get_one_list(guc->capture->extlists, i, j, k); in guc_get_max_reglist_count()
1506 tmp += match->num_regs; in guc_get_max_reglist_count()
1522 /* skip if we've already done the pre-alloc */ in guc_capture_create_prealloc_nodes()
1523 if (guc->capture->max_mmio_per_node) in guc_capture_create_prealloc_nodes()
1526 guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc); in guc_capture_create_prealloc_nodes()
1536 if (!list || !list->list || list->num_regs == 0) in read_reg_to_node()
1542 for (i = 0; i < list->num_regs; i++) { in read_reg_to_node()
1543 struct __guc_mmio_reg_descr desc = list->list[i]; in read_reg_to_node()
1546 if (list->type == GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE) { in read_reg_to_node()
1549 if (list->type == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS && in read_reg_to_node()
1555 value = xe_gt_mcr_unicast_read(hwe->gt, XE_REG_MCR(desc.reg.addr), in read_reg_to_node()
1558 value = xe_mmio_read32(&hwe->gt->mmio, desc.reg); in read_reg_to_node()
1570 * xe_engine_manual_capture - Take a manual engine snapshot from engine.
1581 struct xe_gt *gt = hwe->gt; in xe_engine_manual_capture()
1583 struct xe_guc *guc = >->uc.guc; in xe_engine_manual_capture()
1584 struct xe_devcoredump *devcoredump = &xe->devcoredump; in xe_engine_manual_capture()
1599 capture_class = xe_engine_class_to_guc_capture_class(hwe->class); in xe_engine_manual_capture()
1601 struct gcap_reg_list_info *reginfo = &new->reginfo[type]; in xe_engine_manual_capture()
1603 * regsinfo->regs is allocated based on guc->capture->max_mmio_per_node in xe_engine_manual_capture()
1613 hwe->name); in xe_engine_manual_capture()
1617 read_reg_to_node(hwe, list, reginfo->regs); in xe_engine_manual_capture()
1618 reginfo->num_regs = list->num_regs; in xe_engine_manual_capture()
1625 read_reg_to_node(hwe, list, ®info->regs[reginfo->num_regs]); in xe_engine_manual_capture()
1626 reginfo->num_regs += list->num_regs; in xe_engine_manual_capture()
1631 if (devcoredump && devcoredump->captured) { in xe_engine_manual_capture()
1632 struct xe_guc_submit_exec_queue_snapshot *ge = devcoredump->snapshot.ge; in xe_engine_manual_capture()
1635 guc_id = ge->guc.id; in xe_engine_manual_capture()
1636 if (ge->lrc[0]) in xe_engine_manual_capture()
1637 lrca = ge->lrc[0]->context_desc; in xe_engine_manual_capture()
1641 new->eng_class = xe_engine_class_to_guc_class(hwe->class); in xe_engine_manual_capture()
1642 new->eng_inst = hwe->instance; in xe_engine_manual_capture()
1643 new->guc_id = guc_id; in xe_engine_manual_capture()
1644 new->lrca = lrca; in xe_engine_manual_capture()
1645 new->is_partial = 0; in xe_engine_manual_capture()
1646 new->locked = 1; in xe_engine_manual_capture()
1647 new->source = XE_ENGINE_CAPTURE_SOURCE_MANUAL; in xe_engine_manual_capture()
1649 guc_capture_add_node_to_outlist(guc->capture, new); in xe_engine_manual_capture()
1650 devcoredump->snapshot.matched_node = new; in xe_engine_manual_capture()
1658 if (reginfo && reginfo->num_regs > 0) { in guc_capture_find_reg()
1659 struct guc_mmio_reg *regs = reginfo->regs; in guc_capture_find_reg()
1662 for (i = 0; i < reginfo->num_regs; i++) in guc_capture_find_reg()
1674 struct xe_gt *gt = snapshot->hwe->gt; in snapshot_print_by_list_order()
1676 struct xe_devcoredump *devcoredump = &xe->devcoredump; in snapshot_print_by_list_order()
1677 struct xe_devcoredump_snapshot *devcore_snapshot = &devcoredump->snapshot; in snapshot_print_by_list_order()
1682 if (!list || !list->list || list->num_regs == 0) in snapshot_print_by_list_order()
1684 XE_WARN_ON(!devcore_snapshot->matched_node); in snapshot_print_by_list_order()
1686 reginfo = &devcore_snapshot->matched_node->reginfo[type]; in snapshot_print_by_list_order()
1692 * table-of-lists in snapshot_print_by_list_order()
1694 for (i = 0; i < list->num_regs; i++) { in snapshot_print_by_list_order()
1695 const struct __guc_mmio_reg_descr *reg_desc = &list->list[i]; in snapshot_print_by_list_order()
1699 reg = guc_capture_find_reg(reginfo, reg_desc->reg.addr, reg_desc->flags); in snapshot_print_by_list_order()
1703 value = reg->value; in snapshot_print_by_list_order()
1704 switch (reg_desc->data_type) { in snapshot_print_by_list_order()
1738 drm_printf(p, "\t%s: 0x%016llx\n", reg_desc->regname, value_qw); in snapshot_print_by_list_order()
1751 if (FIELD_GET(GUC_REGSET_STEERING_NEEDED, reg_desc->flags)) in snapshot_print_by_list_order()
1752 drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, in snapshot_print_by_list_order()
1753 reg_desc->dss_id, value); in snapshot_print_by_list_order()
1755 drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value); in snapshot_print_by_list_order()
1765 * } // <- Register list end in snapshot_print_by_list_order()
1771 * xe_engine_snapshot_print - Print out a given Xe HW Engine snapshot.
1780 "full-capture", in xe_engine_snapshot_print()
1781 "partial-capture" in xe_engine_snapshot_print()
1795 gt = snapshot->hwe->gt; in xe_engine_snapshot_print()
1797 devcoredump = &xe->devcoredump; in xe_engine_snapshot_print()
1798 devcore_snapshot = &devcoredump->snapshot; in xe_engine_snapshot_print()
1800 if (!devcore_snapshot->matched_node) in xe_engine_snapshot_print()
1803 xe_gt_assert(gt, snapshot->hwe); in xe_engine_snapshot_print()
1805 capture_class = xe_engine_class_to_guc_capture_class(snapshot->hwe->class); in xe_engine_snapshot_print()
1808 snapshot->name ? snapshot->name : "", in xe_engine_snapshot_print()
1809 snapshot->logical_instance); in xe_engine_snapshot_print()
1811 devcore_snapshot->matched_node->source == XE_ENGINE_CAPTURE_SOURCE_GUC ? in xe_engine_snapshot_print()
1813 drm_printf(p, "\tCoverage: %s\n", grptype[devcore_snapshot->matched_node->is_partial]); in xe_engine_snapshot_print()
1815 snapshot->forcewake.domain, snapshot->forcewake.ref); in xe_engine_snapshot_print()
1817 str_yes_no(snapshot->kernel_reserved)); in xe_engine_snapshot_print()
1837 * xe_guc_capture_get_matching_and_lock - Matching GuC capture for the queue.
1844 * Returns: found guc-capture node ptr else NULL
1855 if (!q || !q->gt) in xe_guc_capture_get_matching_and_lock()
1858 xe = gt_to_xe(q->gt); in xe_guc_capture_get_matching_and_lock()
1859 if (xe->wedged.mode >= 2 || !xe_device_uc_enabled(xe) || IS_SRIOV_VF(xe)) in xe_guc_capture_get_matching_and_lock()
1862 ss = &xe->devcoredump.snapshot; in xe_guc_capture_get_matching_and_lock()
1863 if (ss->matched_node && ss->matched_node->source == XE_ENGINE_CAPTURE_SOURCE_GUC) in xe_guc_capture_get_matching_and_lock()
1864 return ss->matched_node; in xe_guc_capture_get_matching_and_lock()
1867 for_each_hw_engine(hwe, q->gt, id) { in xe_guc_capture_get_matching_and_lock()
1868 if (hwe != q->hwe) in xe_guc_capture_get_matching_and_lock()
1870 guc_class = xe_engine_class_to_guc_class(hwe->class); in xe_guc_capture_get_matching_and_lock()
1876 struct xe_guc *guc = &q->gt->uc.guc; in xe_guc_capture_get_matching_and_lock()
1877 u16 guc_id = q->guc->id; in xe_guc_capture_get_matching_and_lock()
1878 u32 lrca = xe_lrc_ggtt_addr(q->lrc[0]); in xe_guc_capture_get_matching_and_lock()
1882 * the internal output link-list based on engine, guc id and in xe_guc_capture_get_matching_and_lock()
1885 list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) { in xe_guc_capture_get_matching_and_lock()
1886 if (n->eng_class == guc_class && n->eng_inst == hwe->instance && in xe_guc_capture_get_matching_and_lock()
1887 n->guc_id == guc_id && n->lrca == lrca && in xe_guc_capture_get_matching_and_lock()
1888 n->source == XE_ENGINE_CAPTURE_SOURCE_GUC) { in xe_guc_capture_get_matching_and_lock()
1889 n->locked = 1; in xe_guc_capture_get_matching_and_lock()
1898 * xe_engine_snapshot_capture_for_queue - Take snapshot of associated engine
1908 struct xe_device *xe = gt_to_xe(q->gt); in xe_engine_snapshot_capture_for_queue()
1909 struct xe_devcoredump *coredump = &xe->devcoredump; in xe_engine_snapshot_capture_for_queue()
1912 u32 adj_logical_mask = q->logical_mask; in xe_engine_snapshot_capture_for_queue()
1917 for_each_hw_engine(hwe, q->gt, id) { in xe_engine_snapshot_capture_for_queue()
1918 if (hwe->class != q->hwe->class || in xe_engine_snapshot_capture_for_queue()
1919 !(BIT(hwe->logical_instance) & adj_logical_mask)) { in xe_engine_snapshot_capture_for_queue()
1920 coredump->snapshot.hwe[id] = NULL; in xe_engine_snapshot_capture_for_queue()
1924 if (!coredump->snapshot.hwe[id]) { in xe_engine_snapshot_capture_for_queue()
1925 coredump->snapshot.hwe[id] = in xe_engine_snapshot_capture_for_queue()
1932 struct xe_guc *guc = &q->gt->uc.guc; in xe_engine_snapshot_capture_for_queue()
1936 * GuC-err-capture node for this engine after in xe_engine_snapshot_capture_for_queue()
1941 guc_capture_free_outlist_node(guc->capture, in xe_engine_snapshot_capture_for_queue()
1942 coredump->snapshot.matched_node); in xe_engine_snapshot_capture_for_queue()
1943 coredump->snapshot.matched_node = new; in xe_engine_snapshot_capture_for_queue()
1952 * xe_guc_capture_put_matched_nodes - Cleanup matched nodes
1961 struct xe_devcoredump *devcoredump = &xe->devcoredump; in xe_guc_capture_put_matched_nodes()
1962 struct __guc_capture_parsed_output *n = devcoredump->snapshot.matched_node; in xe_guc_capture_put_matched_nodes()
1965 guc_capture_remove_stale_matches_from_list(guc->capture, n); in xe_guc_capture_put_matched_nodes()
1966 guc_capture_free_outlist_node(guc->capture, n); in xe_guc_capture_put_matched_nodes()
1967 devcoredump->snapshot.matched_node = NULL; in xe_guc_capture_put_matched_nodes()
1972 * xe_guc_capture_steered_list_init - Init steering register list
1975 * Init steering register list for GuC register capture, create pre-alloc node
1983 * the end of the pre-populated render list. in xe_guc_capture_steered_list_init()
1991 * xe_guc_capture_init - Init for GuC register capture
1997 * -ENOMEM if out of memory
2001 guc->capture = drmm_kzalloc(guc_to_drm(guc), sizeof(*guc->capture), GFP_KERNEL); in xe_guc_capture_init()
2002 if (!guc->capture) in xe_guc_capture_init()
2003 return -ENOMEM; in xe_guc_capture_init()
2005 guc->capture->reglists = guc_capture_get_device_reglist(guc_to_xe(guc)); in xe_guc_capture_init()
2007 INIT_LIST_HEAD(&guc->capture->outlist); in xe_guc_capture_init()
2008 INIT_LIST_HEAD(&guc->capture->cachelist); in xe_guc_capture_init()