Lines Matching defs:dmc_id

299 	u8 dmc_id;
394 static bool is_valid_dmc_id(enum intel_dmc_id dmc_id)
396 return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX;
399 static bool has_dmc_id_fw(struct intel_display *display, enum intel_dmc_id dmc_id)
403 return dmc && dmc->dmc_info[dmc_id].payload;
442 enum intel_dmc_id dmc_id)
450 if (!has_dmc_id_fw(display, dmc_id))
455 DMC_EVT_CTL(display, dmc_id, handler),
456 DMC_EVT_HTP(display, dmc_id, handler));
521 enum intel_dmc_id dmc_id, i915_reg_t reg)
524 u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0));
525 u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
531 enum intel_dmc_id dmc_id, i915_reg_t reg)
534 u32 start = i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0));
535 u32 end = i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
541 enum intel_dmc_id dmc_id,
545 return is_dmc_evt_ctl_reg(display, dmc_id, reg) &&
550 enum intel_dmc_id dmc_id,
553 if (!is_dmc_evt_ctl_reg(display, dmc_id, reg))
557 if (dmc_id != DMC_FW_MAIN)
562 is_event_handler(display, dmc_id, MAINDMC_EVENT_CLK_MSEC, reg, data))
567 is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg, data))
575 enum intel_dmc_id dmc_id, int i)
577 if (disable_dmc_evt(display, dmc_id,
578 dmc->dmc_info[dmc_id].mmioaddr[i],
579 dmc->dmc_info[dmc_id].mmiodata[i]))
582 return dmc->dmc_info[dmc_id].mmiodata[i];
585 static void dmc_load_mmio(struct intel_display *display, enum intel_dmc_id dmc_id)
590 for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
591 intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i],
592 dmc_mmiodata(display, dmc, dmc_id, i));
596 static void dmc_load_program(struct intel_display *display, enum intel_dmc_id dmc_id)
601 disable_all_event_handlers(display, dmc_id);
605 for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
607 DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
608 dmc->dmc_info[dmc_id].payload[i]);
613 dmc_load_mmio(display, dmc_id);
617 enum intel_dmc_id dmc_id)
623 if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
626 found = intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, 0));
627 expected = dmc->dmc_info[dmc_id].payload[0];
631 dmc_id, expected, found);
633 for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
634 i915_reg_t reg = dmc->dmc_info[dmc_id].mmioaddr[i];
637 expected = dmc_mmiodata(display, dmc, dmc_id, i);
640 if (is_dmc_evt_ctl_reg(display, dmc_id, reg)) {
647 dmc_id, i, i915_mmio_reg_offset(reg), expected, found);
728 enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
730 if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
739 dmc_load_program(display, dmc_id);
741 dmc_load_mmio(display, dmc_id);
743 assert_dmc_loaded(display, dmc_id);
763 enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
765 if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
782 enum intel_dmc_id dmc_id,
790 for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
791 i915_reg_t reg = dmc->dmc_info[dmc_id].mmioaddr[i];
792 u32 data = dmc->dmc_info[dmc_id].mmiodata[i];
794 if (!is_event_handler(display, dmc_id, event_id, reg, data))
803 dmc_id, num_handlers, event_id);
836 enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
838 dmc_configure_event(display, dmc_id, PIPEDMC_EVENT_VBLANK, enable);
852 enum intel_dmc_id dmc_id;
861 for_each_dmc_id(dmc_id) {
862 dmc_load_program(display, dmc_id);
863 assert_dmc_loaded(display, dmc_id);
889 enum intel_dmc_id dmc_id;
896 for_each_dmc_id(dmc_id)
897 disable_all_event_handlers(display, dmc_id);
930 enum intel_dmc_id dmc_id;
934 dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
936 if (!is_valid_dmc_id(dmc_id)) {
937 drm_dbg(display->drm, "Unsupported firmware id: %u\n", dmc_id);
945 if (dmc->dmc_info[dmc_id].present)
949 dmc->dmc_info[dmc_id].present = true;
950 dmc->dmc_info[dmc_id].dmc_offset = fw_info[i].offset;
957 int header_ver, enum intel_dmc_id dmc_id)
966 } else if (dmc_id == DMC_FW_MAIN) {
973 start_range = TGL_PIPE_MMIO_START(dmc_id);
974 end_range = TGL_PIPE_MMIO_END(dmc_id);
990 size_t rem_size, enum intel_dmc_id dmc_id)
993 struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
1058 dmc_header->header_ver, dmc_id)) {
1063 drm_dbg_kms(display->drm, "DMC %d:\n", dmc_id);
1070 is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
1071 is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
1072 disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i],
1193 enum intel_dmc_id dmc_id;
1216 for_each_dmc_id(dmc_id) {
1217 if (!dmc->dmc_info[dmc_id].present)
1220 offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4;
1227 parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id);
1432 enum intel_dmc_id dmc_id;
1441 for_each_dmc_id(dmc_id)
1442 kfree(dmc->dmc_info[dmc_id].payload);
1659 enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
1661 dmc_configure_event(display, dmc_id, event, true);
1668 enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
1670 dmc_configure_event(display, dmc_id, event, false);
1677 enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
1679 return dmc ? dmc->dmc_info[dmc_id].start_mmioaddr : 0;