Lines Matching full:vdev

65 int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev)  in ivpu_hw_btrs_irqs_clear_with_0_mtl()  argument
77 static void freq_ratios_init_mtl(struct ivpu_device *vdev) in freq_ratios_init_mtl() argument
79 struct ivpu_hw_info *hw = vdev->hw; in freq_ratios_init_mtl()
90 static void freq_ratios_init_lnl(struct ivpu_device *vdev) in freq_ratios_init_lnl() argument
92 struct ivpu_hw_info *hw = vdev->hw; in freq_ratios_init_lnl()
103 void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev) in ivpu_hw_btrs_freq_ratios_init() argument
105 struct ivpu_hw_info *hw = vdev->hw; in ivpu_hw_btrs_freq_ratios_init()
107 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in ivpu_hw_btrs_freq_ratios_init()
108 freq_ratios_init_mtl(vdev); in ivpu_hw_btrs_freq_ratios_init()
110 freq_ratios_init_lnl(vdev); in ivpu_hw_btrs_freq_ratios_init()
132 static int read_tile_config_fuse(struct ivpu_device *vdev, u32 *tile_fuse_config) in read_tile_config_fuse() argument
139 ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse); in read_tile_config_fuse()
145 ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", config); in read_tile_config_fuse()
150 ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n", in read_tile_config_fuse()
153 ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", BTRS_LNL_TILE_MAX_NUM); in read_tile_config_fuse()
159 static int info_init_mtl(struct ivpu_device *vdev) in info_init_mtl() argument
161 struct ivpu_hw_info *hw = vdev->hw; in info_init_mtl()
171 static int info_init_lnl(struct ivpu_device *vdev) in info_init_lnl() argument
173 struct ivpu_hw_info *hw = vdev->hw; in info_init_lnl()
177 ret = read_tile_config_fuse(vdev, &tile_fuse_config); in info_init_lnl()
188 int ivpu_hw_btrs_info_init(struct ivpu_device *vdev) in ivpu_hw_btrs_info_init() argument
190 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in ivpu_hw_btrs_info_init()
191 return info_init_mtl(vdev); in ivpu_hw_btrs_info_init()
193 return info_init_lnl(vdev); in ivpu_hw_btrs_info_init()
196 static int wp_request_sync(struct ivpu_device *vdev) in wp_request_sync() argument
198 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in wp_request_sync()
204 static int wait_for_status_ready(struct ivpu_device *vdev, bool enable) in wait_for_status_ready() argument
211 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in wait_for_status_ready()
226 static void wp_request_mtl(struct ivpu_device *vdev, struct wp_request *wp) in wp_request_mtl() argument
249 static void wp_request_lnl(struct ivpu_device *vdev, struct wp_request *wp) in wp_request_lnl() argument
273 static void wp_request(struct ivpu_device *vdev, struct wp_request *wp) in wp_request() argument
275 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in wp_request()
276 wp_request_mtl(vdev, wp); in wp_request()
278 wp_request_lnl(vdev, wp); in wp_request()
281 static int wp_request_send(struct ivpu_device *vdev, struct wp_request *wp) in wp_request_send() argument
285 ret = wp_request_sync(vdev); in wp_request_send()
287 ivpu_err(vdev, "Failed to sync before workpoint request: %d\n", ret); in wp_request_send()
291 wp_request(vdev, wp); in wp_request_send()
293 ret = wp_request_sync(vdev); in wp_request_send()
295 ivpu_err(vdev, "Failed to sync after workpoint request: %d\n", ret); in wp_request_send()
300 static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp, bool enable) in prepare_wp_request() argument
302 struct ivpu_hw_info *hw = vdev->hw; in prepare_wp_request()
307 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) { in prepare_wp_request()
320 if (enable && ivpu_is_simics(vdev)) in prepare_wp_request()
324 static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable) in wait_for_pll_lock() argument
328 if (ivpu_hw_btrs_gen(vdev) != IVPU_HW_BTRS_MTL) in wait_for_pll_lock()
337 int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable) in ivpu_hw_btrs_wp_drive() argument
343 ivpu_dbg(vdev, PM, "Skipping workpoint request\n"); in ivpu_hw_btrs_wp_drive()
347 prepare_wp_request(vdev, &wp, enable); in ivpu_hw_btrs_wp_drive()
349 ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n", in ivpu_hw_btrs_wp_drive()
352 ret = wp_request_send(vdev, &wp); in ivpu_hw_btrs_wp_drive()
354 ivpu_err(vdev, "Failed to send workpoint request: %d\n", ret); in ivpu_hw_btrs_wp_drive()
358 ret = wait_for_pll_lock(vdev, enable); in ivpu_hw_btrs_wp_drive()
360 ivpu_err(vdev, "Timed out waiting for PLL lock\n"); in ivpu_hw_btrs_wp_drive()
364 ret = wait_for_status_ready(vdev, enable); in ivpu_hw_btrs_wp_drive()
366 ivpu_err(vdev, "Timed out waiting for NPU ready status\n"); in ivpu_hw_btrs_wp_drive()
373 static int d0i3_drive_mtl(struct ivpu_device *vdev, bool enable) in d0i3_drive_mtl() argument
380 ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret); in d0i3_drive_mtl()
393 ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret); in d0i3_drive_mtl()
398 static int d0i3_drive_lnl(struct ivpu_device *vdev, bool enable) in d0i3_drive_lnl() argument
405 ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret); in d0i3_drive_lnl()
418 ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret); in d0i3_drive_lnl()
425 static int d0i3_drive(struct ivpu_device *vdev, bool enable) in d0i3_drive() argument
427 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in d0i3_drive()
428 return d0i3_drive_mtl(vdev, enable); in d0i3_drive()
430 return d0i3_drive_lnl(vdev, enable); in d0i3_drive()
433 int ivpu_hw_btrs_d0i3_enable(struct ivpu_device *vdev) in ivpu_hw_btrs_d0i3_enable() argument
440 ret = d0i3_drive(vdev, true); in ivpu_hw_btrs_d0i3_enable()
442 ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret); in ivpu_hw_btrs_d0i3_enable()
449 int ivpu_hw_btrs_d0i3_disable(struct ivpu_device *vdev) in ivpu_hw_btrs_d0i3_disable() argument
456 ret = d0i3_drive(vdev, false); in ivpu_hw_btrs_d0i3_disable()
458 ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret); in ivpu_hw_btrs_d0i3_disable()
463 int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev) in ivpu_hw_btrs_wait_for_clock_res_own_ack() argument
465 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in ivpu_hw_btrs_wait_for_clock_res_own_ack()
468 if (ivpu_is_simics(vdev)) in ivpu_hw_btrs_wait_for_clock_res_own_ack()
474 void ivpu_hw_btrs_set_port_arbitration_weights_lnl(struct ivpu_device *vdev) in ivpu_hw_btrs_set_port_arbitration_weights_lnl() argument
480 static int ip_reset_mtl(struct ivpu_device *vdev) in ip_reset_mtl() argument
487 ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n"); in ip_reset_mtl()
497 ivpu_err(vdev, "Timed out waiting for RESET completion\n"); in ip_reset_mtl()
502 static int ip_reset_lnl(struct ivpu_device *vdev) in ip_reset_lnl() argument
507 ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev); in ip_reset_lnl()
511 ivpu_err(vdev, "Wait for *_TRIGGER timed out\n"); in ip_reset_lnl()
521 ivpu_err(vdev, "Timed out waiting for RESET completion\n"); in ip_reset_lnl()
526 int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev) in ivpu_hw_btrs_ip_reset() argument
531 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in ivpu_hw_btrs_ip_reset()
532 return ip_reset_mtl(vdev); in ivpu_hw_btrs_ip_reset()
534 return ip_reset_lnl(vdev); in ivpu_hw_btrs_ip_reset()
537 void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev) in ivpu_hw_btrs_profiling_freq_reg_set_lnl() argument
541 if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT) in ivpu_hw_btrs_profiling_freq_reg_set_lnl()
549 void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev) in ivpu_hw_btrs_ats_print_lnl() argument
551 ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n", in ivpu_hw_btrs_ats_print_lnl()
555 void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev) in ivpu_hw_btrs_clock_relinquish_disable_lnl() argument
563 bool ivpu_hw_btrs_is_idle(struct ivpu_device *vdev) in ivpu_hw_btrs_is_idle() argument
570 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) { in ivpu_hw_btrs_is_idle()
583 int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev) in ivpu_hw_btrs_wait_for_idle() argument
585 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in ivpu_hw_btrs_wait_for_idle()
592 bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq) in ivpu_hw_btrs_irq_handler_mtl() argument
601 ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", in ivpu_hw_btrs_irq_handler_mtl()
605 ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0)); in ivpu_hw_btrs_irq_handler_mtl()
613 ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx", in ivpu_hw_btrs_irq_handler_mtl()
632 ivpu_pm_trigger_recovery(vdev, "Buttress IRQ"); in ivpu_hw_btrs_irq_handler_mtl()
638 bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq) in ivpu_hw_btrs_irq_handler_lnl() argument
647 ivpu_dbg(vdev, IRQ, "Survivability IRQ\n"); in ivpu_hw_btrs_irq_handler_lnl()
648 if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_DCT)) in ivpu_hw_btrs_irq_handler_lnl()
649 ivpu_err_ratelimited(vdev, "IRQ FIFO full\n"); in ivpu_hw_btrs_irq_handler_lnl()
653 ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ)); in ivpu_hw_btrs_irq_handler_lnl()
656 ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", in ivpu_hw_btrs_irq_handler_lnl()
664 ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG)); in ivpu_hw_btrs_irq_handler_lnl()
670 ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG)); in ivpu_hw_btrs_irq_handler_lnl()
676 ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x", in ivpu_hw_btrs_irq_handler_lnl()
684 ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x", in ivpu_hw_btrs_irq_handler_lnl()
695 ivpu_pm_trigger_recovery(vdev, "Buttress IRQ"); in ivpu_hw_btrs_irq_handler_lnl()
700 int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable) in ivpu_hw_btrs_dct_get_request() argument
707 ivpu_err_ratelimited(vdev, "Unsupported PCODE command: 0x%x\n", cmd); in ivpu_hw_btrs_dct_get_request()
719 ivpu_err_ratelimited(vdev, "Invalid PARAM1 value: %u\n", param1); in ivpu_hw_btrs_dct_get_request()
724 void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent) in ivpu_hw_btrs_dct_set_status() argument
749 u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio) in ivpu_hw_btrs_ratio_to_freq() argument
751 struct ivpu_hw_info *hw = vdev->hw; in ivpu_hw_btrs_ratio_to_freq()
753 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in ivpu_hw_btrs_ratio_to_freq()
759 static u32 pll_freq_get_mtl(struct ivpu_device *vdev) in pll_freq_get_mtl() argument
766 if (!ivpu_is_silicon(vdev)) in pll_freq_get_mtl()
769 return pll_ratio_to_freq_mtl(pll_curr_ratio, vdev->hw->config); in pll_freq_get_mtl()
772 static u32 pll_freq_get_lnl(struct ivpu_device *vdev) in pll_freq_get_lnl() argument
782 u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev) in ivpu_hw_btrs_pll_freq_get() argument
784 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in ivpu_hw_btrs_pll_freq_get()
785 return pll_freq_get_mtl(vdev); in ivpu_hw_btrs_pll_freq_get()
787 return pll_freq_get_lnl(vdev); in ivpu_hw_btrs_pll_freq_get()
790 u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev) in ivpu_hw_btrs_telemetry_offset_get() argument
792 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in ivpu_hw_btrs_telemetry_offset_get()
798 u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev) in ivpu_hw_btrs_telemetry_size_get() argument
800 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in ivpu_hw_btrs_telemetry_size_get()
806 u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev) in ivpu_hw_btrs_telemetry_enable_get() argument
808 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in ivpu_hw_btrs_telemetry_enable_get()
814 void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev) in ivpu_hw_btrs_global_int_disable() argument
816 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in ivpu_hw_btrs_global_int_disable()
822 void ivpu_hw_btrs_global_int_enable(struct ivpu_device *vdev) in ivpu_hw_btrs_global_int_enable() argument
824 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in ivpu_hw_btrs_global_int_enable()
830 void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev) in ivpu_hw_btrs_irq_enable() argument
832 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) { in ivpu_hw_btrs_irq_enable()
841 void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev) in ivpu_hw_btrs_irq_disable() argument
843 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) { in ivpu_hw_btrs_irq_disable()
852 static void diagnose_failure_mtl(struct ivpu_device *vdev) in diagnose_failure_mtl() argument
857 ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0)); in diagnose_failure_mtl()
862 ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx", in diagnose_failure_mtl()
869 static void diagnose_failure_lnl(struct ivpu_device *vdev) in diagnose_failure_lnl() argument
874 ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", in diagnose_failure_lnl()
880 ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG)); in diagnose_failure_lnl()
883 ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG)); in diagnose_failure_lnl()
886 ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n", in diagnose_failure_lnl()
891 ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n", in diagnose_failure_lnl()
896 ivpu_err(vdev, "Survivability IRQ\n"); in diagnose_failure_lnl()
899 void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev) in ivpu_hw_btrs_diagnose_failure() argument
901 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) in ivpu_hw_btrs_diagnose_failure()
902 return diagnose_failure_mtl(vdev); in ivpu_hw_btrs_diagnose_failure()
904 return diagnose_failure_lnl(vdev); in ivpu_hw_btrs_diagnose_failure()