Lines Matching full:hba

113 static bool is_mcq_supported(struct ufs_hba *hba)  in is_mcq_supported()  argument
115 return hba->mcq_sup && use_mcq_mode; in is_mcq_supported()
155 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, in ufshcd_dump_regs() argument
173 regs[pos / 4] = ufshcd_readl(hba, offset + pos); in ufshcd_dump_regs()
261 static bool ufshcd_has_pending_tasks(struct ufs_hba *hba) in ufshcd_has_pending_tasks() argument
263 return hba->outstanding_tasks || hba->active_uic_cmd || in ufshcd_has_pending_tasks()
264 hba->uic_async_done; in ufshcd_has_pending_tasks()
267 static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba) in ufshcd_is_ufs_dev_busy() argument
269 return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba); in ufshcd_is_ufs_dev_busy()
300 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
302 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
304 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
305 static void ufshcd_hba_exit(struct ufs_hba *hba);
306 static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params);
307 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
308 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
309 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
310 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
311 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
312 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
313 static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
316 static int ufshcd_change_power_mode(struct ufs_hba *hba,
318 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
319 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
320 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
322 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
324 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
325 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
327 void ufshcd_enable_irq(struct ufs_hba *hba) in ufshcd_enable_irq() argument
329 if (!hba->is_irq_enabled) { in ufshcd_enable_irq()
330 enable_irq(hba->irq); in ufshcd_enable_irq()
331 hba->is_irq_enabled = true; in ufshcd_enable_irq()
336 void ufshcd_disable_irq(struct ufs_hba *hba) in ufshcd_disable_irq() argument
338 if (hba->is_irq_enabled) { in ufshcd_disable_irq()
339 disable_irq(hba->irq); in ufshcd_disable_irq()
340 hba->is_irq_enabled = false; in ufshcd_disable_irq()
345 static void ufshcd_configure_wb(struct ufs_hba *hba) in ufshcd_configure_wb() argument
347 if (!ufshcd_is_wb_allowed(hba)) in ufshcd_configure_wb()
350 ufshcd_wb_toggle(hba, true); in ufshcd_configure_wb()
352 ufshcd_wb_toggle_buf_flush_during_h8(hba, true); in ufshcd_configure_wb()
354 if (ufshcd_is_wb_buf_flush_allowed(hba)) in ufshcd_configure_wb()
355 ufshcd_wb_toggle_buf_flush(hba, true); in ufshcd_configure_wb()
358 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, in ufshcd_add_cmd_upiu_trace() argument
361 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; in ufshcd_add_cmd_upiu_trace()
370 header = &hba->lrb[tag].ucd_rsp_ptr->header; in ufshcd_add_cmd_upiu_trace()
372 trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb, in ufshcd_add_cmd_upiu_trace()
376 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, in ufshcd_add_query_upiu_trace() argument
383 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header, in ufshcd_add_query_upiu_trace()
387 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, in ufshcd_add_tm_upiu_trace() argument
390 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag]; in ufshcd_add_tm_upiu_trace()
396 trace_ufshcd_upiu(dev_name(hba->dev), str_t, in ufshcd_add_tm_upiu_trace()
401 trace_ufshcd_upiu(dev_name(hba->dev), str_t, in ufshcd_add_tm_upiu_trace()
407 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, in ufshcd_add_uic_command_trace() argument
419 cmd = ufshcd_readl(hba, REG_UIC_COMMAND); in ufshcd_add_uic_command_trace()
421 trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd, in ufshcd_add_uic_command_trace()
422 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1), in ufshcd_add_uic_command_trace()
423 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2), in ufshcd_add_uic_command_trace()
424 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3)); in ufshcd_add_uic_command_trace()
427 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, in ufshcd_add_command_trace() argument
435 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_add_command_trace()
444 ufshcd_add_cmd_upiu_trace(hba, tag, str_t); in ufshcd_add_command_trace()
467 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); in ufshcd_add_command_trace()
469 if (hba->mcq_enabled) { in ufshcd_add_command_trace()
470 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq); in ufshcd_add_command_trace()
474 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_add_command_trace()
480 static void ufshcd_print_clk_freqs(struct ufs_hba *hba) in ufshcd_print_clk_freqs() argument
483 struct list_head *head = &hba->clk_list_head; in ufshcd_print_clk_freqs()
491 dev_err(hba->dev, "clk: %s, rate: %u\n", in ufshcd_print_clk_freqs()
496 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id, in ufshcd_print_evt() argument
506 e = &hba->ufs_stats.event[id]; in ufshcd_print_evt()
513 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, in ufshcd_print_evt()
519 dev_err(hba->dev, "No record of %s\n", err_name); in ufshcd_print_evt()
521 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt); in ufshcd_print_evt()
524 static void ufshcd_print_evt_hist(struct ufs_hba *hba) in ufshcd_print_evt_hist() argument
526 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); in ufshcd_print_evt_hist()
528 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err"); in ufshcd_print_evt_hist()
529 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err"); in ufshcd_print_evt_hist()
530 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err"); in ufshcd_print_evt_hist()
531 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err"); in ufshcd_print_evt_hist()
532 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err"); in ufshcd_print_evt_hist()
533 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR, in ufshcd_print_evt_hist()
535 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err"); in ufshcd_print_evt_hist()
536 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL, in ufshcd_print_evt_hist()
538 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail"); in ufshcd_print_evt_hist()
539 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR, in ufshcd_print_evt_hist()
541 ufshcd_print_evt(hba, UFS_EVT_WL_RES_ERR, "wlun resume_fail"); in ufshcd_print_evt_hist()
542 ufshcd_print_evt(hba, UFS_EVT_WL_SUSP_ERR, in ufshcd_print_evt_hist()
544 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset"); in ufshcd_print_evt_hist()
545 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset"); in ufshcd_print_evt_hist()
546 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort"); in ufshcd_print_evt_hist()
548 ufshcd_vops_dbg_register_dump(hba); in ufshcd_print_evt_hist()
552 void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt) in ufshcd_print_tr() argument
557 lrbp = &hba->lrb[tag]; in ufshcd_print_tr()
559 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", in ufshcd_print_tr()
561 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", in ufshcd_print_tr()
563 dev_err(hba->dev, in ufshcd_print_tr()
569 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag, in ufshcd_print_tr()
573 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag, in ufshcd_print_tr()
580 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) in ufshcd_print_tr()
581 prdt_length /= ufshcd_sg_entry_size(hba); in ufshcd_print_tr()
583 dev_err(hba->dev, in ufshcd_print_tr()
590 ufshcd_sg_entry_size(hba) * prdt_length); in ufshcd_print_tr()
597 struct ufs_hba *hba = shost_priv(shost); in ufshcd_print_tr_iter() local
599 ufshcd_print_tr(hba, req->tag, *(bool *)priv); in ufshcd_print_tr_iter()
606 * @hba: per-adapter instance.
609 static void ufshcd_print_trs_all(struct ufs_hba *hba, bool pr_prdt) in ufshcd_print_trs_all() argument
611 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_print_tr_iter, &pr_prdt); in ufshcd_print_trs_all()
614 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) in ufshcd_print_tmrs() argument
618 for_each_set_bit(tag, &bitmap, hba->nutmrs) { in ufshcd_print_tmrs()
619 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag]; in ufshcd_print_tmrs()
621 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag); in ufshcd_print_tmrs()
626 static void ufshcd_print_host_state(struct ufs_hba *hba) in ufshcd_print_host_state() argument
628 const struct scsi_device *sdev_ufs = hba->ufs_device_wlun; in ufshcd_print_host_state()
630 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); in ufshcd_print_host_state()
631 dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n", in ufshcd_print_host_state()
632 scsi_host_busy(hba->host), hba->outstanding_tasks); in ufshcd_print_host_state()
633 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", in ufshcd_print_host_state()
634 hba->saved_err, hba->saved_uic_err); in ufshcd_print_host_state()
635 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", in ufshcd_print_host_state()
636 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_print_host_state()
637 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n", in ufshcd_print_host_state()
638 hba->pm_op_in_progress, hba->is_sys_suspended); in ufshcd_print_host_state()
639 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n", in ufshcd_print_host_state()
640 hba->auto_bkops_enabled, hba->host->host_self_blocked); in ufshcd_print_host_state()
641 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state); in ufshcd_print_host_state()
642 dev_err(hba->dev, in ufshcd_print_host_state()
644 div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000), in ufshcd_print_host_state()
645 hba->ufs_stats.hibern8_exit_cnt); in ufshcd_print_host_state()
646 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n", in ufshcd_print_host_state()
647 div_u64(hba->ufs_stats.last_intr_ts, 1000), in ufshcd_print_host_state()
648 hba->ufs_stats.last_intr_status); in ufshcd_print_host_state()
649 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", in ufshcd_print_host_state()
650 hba->eh_flags, hba->req_abort_count); in ufshcd_print_host_state()
651 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n", in ufshcd_print_host_state()
652 hba->ufs_version, hba->capabilities, hba->caps); in ufshcd_print_host_state()
653 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks, in ufshcd_print_host_state()
654 hba->dev_quirks); in ufshcd_print_host_state()
656 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n", in ufshcd_print_host_state()
659 ufshcd_print_clk_freqs(hba); in ufshcd_print_host_state()
663 * ufshcd_print_pwr_info - print power params as saved in hba
665 * @hba: per-adapter instance
667 static void ufshcd_print_pwr_info(struct ufs_hba *hba) in ufshcd_print_pwr_info() argument
684 dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", in ufshcd_print_pwr_info()
686 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, in ufshcd_print_pwr_info()
687 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, in ufshcd_print_pwr_info()
688 names[hba->pwr_info.pwr_rx], in ufshcd_print_pwr_info()
689 names[hba->pwr_info.pwr_tx], in ufshcd_print_pwr_info()
690 hba->pwr_info.hs_rate); in ufshcd_print_pwr_info()
693 static void ufshcd_device_reset(struct ufs_hba *hba) in ufshcd_device_reset() argument
697 err = ufshcd_vops_device_reset(hba); in ufshcd_device_reset()
700 ufshcd_set_ufs_dev_active(hba); in ufshcd_device_reset()
701 if (ufshcd_is_wb_allowed(hba)) { in ufshcd_device_reset()
702 hba->dev_info.wb_enabled = false; in ufshcd_device_reset()
703 hba->dev_info.wb_buf_flush_enabled = false; in ufshcd_device_reset()
705 if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE) in ufshcd_device_reset()
706 hba->dev_info.rtc_time_baseline = 0; in ufshcd_device_reset()
709 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err); in ufshcd_device_reset()
726 * @hba: per-adapter interface
735 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, in ufshcd_wait_for_register() argument
744 interval_us, timeout_ms * 1000, false, hba, reg); in ufshcd_wait_for_register()
749 * @hba: Pointer to adapter instance
753 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) in ufshcd_get_intr_mask() argument
755 if (hba->ufs_version <= ufshci_version(2, 0)) in ufshcd_get_intr_mask()
762 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
763 * @hba: Pointer to adapter instance
767 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) in ufshcd_get_ufs_version() argument
771 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) in ufshcd_get_ufs_version()
772 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba); in ufshcd_get_ufs_version()
774 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION); in ufshcd_get_ufs_version()
790 * @hba: pointer to adapter instance
794 static inline bool ufshcd_is_device_present(struct ufs_hba *hba) in ufshcd_is_device_present() argument
796 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT; in ufshcd_is_device_present()
819 * @hba: per adapter instance
822 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask) in ufshcd_utrl_clear() argument
824 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) in ufshcd_utrl_clear()
839 ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR); in ufshcd_utrl_clear()
844 * @hba: per adapter instance
847 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos) in ufshcd_utmrl_clear() argument
849 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) in ufshcd_utmrl_clear()
850 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); in ufshcd_utmrl_clear()
852 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); in ufshcd_utmrl_clear()
868 * @hba: Pointer to adapter instance
874 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) in ufshcd_get_uic_cmd_result() argument
876 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & in ufshcd_get_uic_cmd_result()
882 * @hba: Pointer to adapter instance
888 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) in ufshcd_get_dme_attr_val() argument
890 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); in ufshcd_get_dme_attr_val()
921 * @hba: per adapter instance
924 ufshcd_reset_intr_aggr(struct ufs_hba *hba) in ufshcd_reset_intr_aggr() argument
926 ufshcd_writel(hba, INT_AGGR_ENABLE | in ufshcd_reset_intr_aggr()
933 * @hba: per adapter instance
938 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) in ufshcd_config_intr_aggr() argument
940 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | in ufshcd_config_intr_aggr()
948 * @hba: per adapter instance
950 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) in ufshcd_disable_intr_aggr() argument
952 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); in ufshcd_disable_intr_aggr()
959 * @hba: per adapter instance
961 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) in ufshcd_enable_run_stop_reg() argument
963 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, in ufshcd_enable_run_stop_reg()
965 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, in ufshcd_enable_run_stop_reg()
971 * @hba: per adapter instance
973 static inline void ufshcd_hba_start(struct ufs_hba *hba) in ufshcd_hba_start() argument
977 if (ufshcd_crypto_enable(hba)) in ufshcd_hba_start()
980 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE); in ufshcd_hba_start()
985 * @hba: per adapter instance
989 bool ufshcd_is_hba_active(struct ufs_hba *hba) in ufshcd_is_hba_active() argument
991 return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE; in ufshcd_is_hba_active()
997 * @hba: per adapter instance
999 void ufshcd_pm_qos_init(struct ufs_hba *hba) in ufshcd_pm_qos_init() argument
1002 if (hba->pm_qos_enabled) in ufshcd_pm_qos_init()
1005 cpu_latency_qos_add_request(&hba->pm_qos_req, PM_QOS_DEFAULT_VALUE); in ufshcd_pm_qos_init()
1007 if (cpu_latency_qos_request_active(&hba->pm_qos_req)) in ufshcd_pm_qos_init()
1008 hba->pm_qos_enabled = true; in ufshcd_pm_qos_init()
1013 * @hba: per adapter instance
1015 void ufshcd_pm_qos_exit(struct ufs_hba *hba) in ufshcd_pm_qos_exit() argument
1017 if (!hba->pm_qos_enabled) in ufshcd_pm_qos_exit()
1020 cpu_latency_qos_remove_request(&hba->pm_qos_req); in ufshcd_pm_qos_exit()
1021 hba->pm_qos_enabled = false; in ufshcd_pm_qos_exit()
1026 * @hba: per adapter instance
1029 static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on) in ufshcd_pm_qos_update() argument
1031 if (!hba->pm_qos_enabled) in ufshcd_pm_qos_update()
1034 cpu_latency_qos_update_request(&hba->pm_qos_req, on ? 0 : PM_QOS_DEFAULT_VALUE); in ufshcd_pm_qos_update()
1039 * @hba: per adapter instance
1044 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up) in ufshcd_set_clk_freq() argument
1048 struct list_head *head = &hba->clk_list_head; in ufshcd_set_clk_freq()
1061 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_set_clk_freq()
1066 trace_ufshcd_clk_scaling(dev_name(hba->dev), in ufshcd_set_clk_freq()
1079 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_set_clk_freq()
1084 trace_ufshcd_clk_scaling(dev_name(hba->dev), in ufshcd_set_clk_freq()
1091 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, in ufshcd_set_clk_freq()
1103 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_opp_config_clks() local
1104 struct list_head *head = &hba->clk_list_head; in ufshcd_opp_config_clks()
1127 clki->name, hba->clk_scaling.target_freq, freq); in ufshcd_opp_config_clks()
1135 static int ufshcd_opp_set_rate(struct ufs_hba *hba, unsigned long freq) in ufshcd_opp_set_rate() argument
1140 opp = dev_pm_opp_find_freq_floor_indexed(hba->dev, in ufshcd_opp_set_rate()
1145 ret = dev_pm_opp_set_opp(hba->dev, opp); in ufshcd_opp_set_rate()
1153 * @hba: per adapter instance
1159 static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, in ufshcd_scale_clks() argument
1165 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); in ufshcd_scale_clks()
1169 if (hba->use_pm_opp) in ufshcd_scale_clks()
1170 ret = ufshcd_opp_set_rate(hba, freq); in ufshcd_scale_clks()
1172 ret = ufshcd_set_clk_freq(hba, scale_up); in ufshcd_scale_clks()
1176 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); in ufshcd_scale_clks()
1178 if (hba->use_pm_opp) in ufshcd_scale_clks()
1179 ufshcd_opp_set_rate(hba, in ufshcd_scale_clks()
1180 hba->devfreq->previous_freq); in ufshcd_scale_clks()
1182 ufshcd_set_clk_freq(hba, !scale_up); in ufshcd_scale_clks()
1186 ufshcd_pm_qos_update(hba, scale_up); in ufshcd_scale_clks()
1189 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), in ufshcd_scale_clks()
1197 * @hba: per adapter instance
1203 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, in ufshcd_is_devfreq_scaling_required() argument
1207 struct list_head *head = &hba->clk_list_head; in ufshcd_is_devfreq_scaling_required()
1212 if (hba->use_pm_opp) in ufshcd_is_devfreq_scaling_required()
1213 return freq != hba->clk_scaling.target_freq; in ufshcd_is_devfreq_scaling_required()
1239 static u32 ufshcd_pending_cmds(struct ufs_hba *hba) in ufshcd_pending_cmds() argument
1245 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_pending_cmds()
1246 __shost_for_each_device(sdev, hba->host) in ufshcd_pending_cmds()
1248 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_pending_cmds()
1259 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, in ufshcd_wait_for_doorbell_clr() argument
1268 ufshcd_hold(hba); in ufshcd_wait_for_doorbell_clr()
1275 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { in ufshcd_wait_for_doorbell_clr()
1280 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); in ufshcd_wait_for_doorbell_clr()
1281 tr_pending = ufshcd_pending_cmds(hba); in ufshcd_wait_for_doorbell_clr()
1303 dev_err(hba->dev, in ufshcd_wait_for_doorbell_clr()
1309 ufshcd_release(hba); in ufshcd_wait_for_doorbell_clr()
1315 * @hba: per adapter instance
1321 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) in ufshcd_scale_gear() argument
1327 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info, in ufshcd_scale_gear()
1330 memcpy(&new_pwr_info, &hba->pwr_info, in ufshcd_scale_gear()
1333 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear || in ufshcd_scale_gear()
1334 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) { in ufshcd_scale_gear()
1336 memcpy(&hba->clk_scaling.saved_pwr_info, in ufshcd_scale_gear()
1337 &hba->pwr_info, in ufshcd_scale_gear()
1341 new_pwr_info.gear_tx = hba->clk_scaling.min_gear; in ufshcd_scale_gear()
1342 new_pwr_info.gear_rx = hba->clk_scaling.min_gear; in ufshcd_scale_gear()
1347 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info); in ufshcd_scale_gear()
1349 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", in ufshcd_scale_gear()
1351 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx, in ufshcd_scale_gear()
1363 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) in ufshcd_clock_scaling_prepare() argument
1370 blk_mq_quiesce_tagset(&hba->host->tag_set); in ufshcd_clock_scaling_prepare()
1371 mutex_lock(&hba->wb_mutex); in ufshcd_clock_scaling_prepare()
1372 down_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_prepare()
1374 if (!hba->clk_scaling.is_allowed || in ufshcd_clock_scaling_prepare()
1375 ufshcd_wait_for_doorbell_clr(hba, timeout_us)) { in ufshcd_clock_scaling_prepare()
1377 up_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_prepare()
1378 mutex_unlock(&hba->wb_mutex); in ufshcd_clock_scaling_prepare()
1379 blk_mq_unquiesce_tagset(&hba->host->tag_set); in ufshcd_clock_scaling_prepare()
1384 ufshcd_hold(hba); in ufshcd_clock_scaling_prepare()
1390 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up) in ufshcd_clock_scaling_unprepare() argument
1392 up_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_unprepare()
1395 if (ufshcd_enable_wb_if_scaling_up(hba) && !err) in ufshcd_clock_scaling_unprepare()
1396 ufshcd_wb_toggle(hba, scale_up); in ufshcd_clock_scaling_unprepare()
1398 mutex_unlock(&hba->wb_mutex); in ufshcd_clock_scaling_unprepare()
1400 blk_mq_unquiesce_tagset(&hba->host->tag_set); in ufshcd_clock_scaling_unprepare()
1401 ufshcd_release(hba); in ufshcd_clock_scaling_unprepare()
1406 * @hba: per adapter instance
1413 static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq, in ufshcd_devfreq_scale() argument
1418 ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC); in ufshcd_devfreq_scale()
1424 ret = ufshcd_scale_gear(hba, false); in ufshcd_devfreq_scale()
1429 ret = ufshcd_scale_clks(hba, freq, scale_up); in ufshcd_devfreq_scale()
1432 ufshcd_scale_gear(hba, true); in ufshcd_devfreq_scale()
1438 ret = ufshcd_scale_gear(hba, true); in ufshcd_devfreq_scale()
1440 ufshcd_scale_clks(hba, hba->devfreq->previous_freq, in ufshcd_devfreq_scale()
1447 ufshcd_clock_scaling_unprepare(hba, ret, scale_up); in ufshcd_devfreq_scale()
1453 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_clk_scaling_suspend_work() local
1456 scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) in ufshcd_clk_scaling_suspend_work()
1458 if (hba->clk_scaling.active_reqs || in ufshcd_clk_scaling_suspend_work()
1459 hba->clk_scaling.is_suspended) in ufshcd_clk_scaling_suspend_work()
1462 hba->clk_scaling.is_suspended = true; in ufshcd_clk_scaling_suspend_work()
1463 hba->clk_scaling.window_start_t = 0; in ufshcd_clk_scaling_suspend_work()
1466 devfreq_suspend_device(hba->devfreq); in ufshcd_clk_scaling_suspend_work()
1471 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_clk_scaling_resume_work() local
1474 scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) in ufshcd_clk_scaling_resume_work()
1476 if (!hba->clk_scaling.is_suspended) in ufshcd_clk_scaling_resume_work()
1478 hba->clk_scaling.is_suspended = false; in ufshcd_clk_scaling_resume_work()
1481 devfreq_resume_device(hba->devfreq); in ufshcd_clk_scaling_resume_work()
1488 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_devfreq_target() local
1491 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_target()
1494 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_devfreq_target()
1497 if (hba->use_pm_opp) { in ufshcd_devfreq_target()
1508 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, in ufshcd_devfreq_target()
1513 scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) in ufshcd_devfreq_target()
1515 if (ufshcd_eh_in_progress(hba)) in ufshcd_devfreq_target()
1519 if (hba->clk_scaling.is_suspended) { in ufshcd_devfreq_target()
1520 dev_warn(hba->dev, "clock scaling is suspended, skip"); in ufshcd_devfreq_target()
1524 if (!hba->clk_scaling.active_reqs) in ufshcd_devfreq_target()
1531 if (hba->use_pm_opp) in ufshcd_devfreq_target()
1532 scale_up = *freq > hba->clk_scaling.target_freq; in ufshcd_devfreq_target()
1536 if (!hba->use_pm_opp && !scale_up) in ufshcd_devfreq_target()
1540 if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) { in ufshcd_devfreq_target()
1547 ret = ufshcd_devfreq_scale(hba, *freq, scale_up); in ufshcd_devfreq_target()
1549 hba->clk_scaling.target_freq = *freq; in ufshcd_devfreq_target()
1551 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), in ufshcd_devfreq_target()
1557 (!scale_up || hba->clk_scaling.suspend_on_no_request)) in ufshcd_devfreq_target()
1558 queue_work(hba->clk_scaling.workq, in ufshcd_devfreq_target()
1559 &hba->clk_scaling.suspend_work); in ufshcd_devfreq_target()
1567 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_devfreq_get_dev_status() local
1568 struct ufs_clk_scaling *scaling = &hba->clk_scaling; in ufshcd_devfreq_get_dev_status()
1571 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_devfreq_get_dev_status()
1576 guard(spinlock_irqsave)(&hba->clk_scaling.lock); in ufshcd_devfreq_get_dev_status()
1587 if (hba->use_pm_opp) { in ufshcd_devfreq_get_dev_status()
1588 stat->current_frequency = hba->clk_scaling.target_freq; in ufshcd_devfreq_get_dev_status()
1590 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_get_dev_status()
1617 static int ufshcd_devfreq_init(struct ufs_hba *hba) in ufshcd_devfreq_init() argument
1619 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_init()
1628 if (!hba->use_pm_opp) { in ufshcd_devfreq_init()
1630 dev_pm_opp_add(hba->dev, clki->min_freq, 0); in ufshcd_devfreq_init()
1631 dev_pm_opp_add(hba->dev, clki->max_freq, 0); in ufshcd_devfreq_init()
1634 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile, in ufshcd_devfreq_init()
1635 &hba->vps->ondemand_data); in ufshcd_devfreq_init()
1636 devfreq = devfreq_add_device(hba->dev, in ufshcd_devfreq_init()
1637 &hba->vps->devfreq_profile, in ufshcd_devfreq_init()
1639 &hba->vps->ondemand_data); in ufshcd_devfreq_init()
1642 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); in ufshcd_devfreq_init()
1644 if (!hba->use_pm_opp) { in ufshcd_devfreq_init()
1645 dev_pm_opp_remove(hba->dev, clki->min_freq); in ufshcd_devfreq_init()
1646 dev_pm_opp_remove(hba->dev, clki->max_freq); in ufshcd_devfreq_init()
1651 hba->devfreq = devfreq; in ufshcd_devfreq_init()
1656 static void ufshcd_devfreq_remove(struct ufs_hba *hba) in ufshcd_devfreq_remove() argument
1658 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_remove()
1660 if (!hba->devfreq) in ufshcd_devfreq_remove()
1663 devfreq_remove_device(hba->devfreq); in ufshcd_devfreq_remove()
1664 hba->devfreq = NULL; in ufshcd_devfreq_remove()
1666 if (!hba->use_pm_opp) { in ufshcd_devfreq_remove()
1670 dev_pm_opp_remove(hba->dev, clki->min_freq); in ufshcd_devfreq_remove()
1671 dev_pm_opp_remove(hba->dev, clki->max_freq); in ufshcd_devfreq_remove()
1675 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) in ufshcd_suspend_clkscaling() argument
1679 cancel_work_sync(&hba->clk_scaling.suspend_work); in ufshcd_suspend_clkscaling()
1680 cancel_work_sync(&hba->clk_scaling.resume_work); in ufshcd_suspend_clkscaling()
1682 scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) in ufshcd_suspend_clkscaling()
1684 if (!hba->clk_scaling.is_suspended) { in ufshcd_suspend_clkscaling()
1686 hba->clk_scaling.is_suspended = true; in ufshcd_suspend_clkscaling()
1687 hba->clk_scaling.window_start_t = 0; in ufshcd_suspend_clkscaling()
1692 devfreq_suspend_device(hba->devfreq); in ufshcd_suspend_clkscaling()
1695 static void ufshcd_resume_clkscaling(struct ufs_hba *hba) in ufshcd_resume_clkscaling() argument
1699 scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock) in ufshcd_resume_clkscaling()
1701 if (hba->clk_scaling.is_suspended) { in ufshcd_resume_clkscaling()
1703 hba->clk_scaling.is_suspended = false; in ufshcd_resume_clkscaling()
1708 devfreq_resume_device(hba->devfreq); in ufshcd_resume_clkscaling()
1714 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkscale_enable_show() local
1716 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled); in ufshcd_clkscale_enable_show()
1722 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkscale_enable_store() local
1729 down(&hba->host_sem); in ufshcd_clkscale_enable_store()
1730 if (!ufshcd_is_user_access_allowed(hba)) { in ufshcd_clkscale_enable_store()
1736 if (value == hba->clk_scaling.is_enabled) in ufshcd_clkscale_enable_store()
1739 ufshcd_rpm_get_sync(hba); in ufshcd_clkscale_enable_store()
1740 ufshcd_hold(hba); in ufshcd_clkscale_enable_store()
1742 hba->clk_scaling.is_enabled = value; in ufshcd_clkscale_enable_store()
1745 ufshcd_resume_clkscaling(hba); in ufshcd_clkscale_enable_store()
1747 ufshcd_suspend_clkscaling(hba); in ufshcd_clkscale_enable_store()
1748 err = ufshcd_devfreq_scale(hba, ULONG_MAX, true); in ufshcd_clkscale_enable_store()
1750 dev_err(hba->dev, "%s: failed to scale clocks up %d\n", in ufshcd_clkscale_enable_store()
1754 ufshcd_release(hba); in ufshcd_clkscale_enable_store()
1755 ufshcd_rpm_put_sync(hba); in ufshcd_clkscale_enable_store()
1757 up(&hba->host_sem); in ufshcd_clkscale_enable_store()
1761 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba) in ufshcd_init_clk_scaling_sysfs() argument
1763 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; in ufshcd_init_clk_scaling_sysfs()
1764 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; in ufshcd_init_clk_scaling_sysfs()
1765 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); in ufshcd_init_clk_scaling_sysfs()
1766 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; in ufshcd_init_clk_scaling_sysfs()
1767 hba->clk_scaling.enable_attr.attr.mode = 0644; in ufshcd_init_clk_scaling_sysfs()
1768 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) in ufshcd_init_clk_scaling_sysfs()
1769 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); in ufshcd_init_clk_scaling_sysfs()
1772 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba) in ufshcd_remove_clk_scaling_sysfs() argument
1774 if (hba->clk_scaling.enable_attr.attr.name) in ufshcd_remove_clk_scaling_sysfs()
1775 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); in ufshcd_remove_clk_scaling_sysfs()
1778 static void ufshcd_init_clk_scaling(struct ufs_hba *hba) in ufshcd_init_clk_scaling() argument
1780 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_init_clk_scaling()
1783 if (!hba->clk_scaling.min_gear) in ufshcd_init_clk_scaling()
1784 hba->clk_scaling.min_gear = UFS_HS_G1; in ufshcd_init_clk_scaling()
1786 INIT_WORK(&hba->clk_scaling.suspend_work, in ufshcd_init_clk_scaling()
1788 INIT_WORK(&hba->clk_scaling.resume_work, in ufshcd_init_clk_scaling()
1791 spin_lock_init(&hba->clk_scaling.lock); in ufshcd_init_clk_scaling()
1793 hba->clk_scaling.workq = alloc_ordered_workqueue( in ufshcd_init_clk_scaling()
1794 "ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no); in ufshcd_init_clk_scaling()
1796 hba->clk_scaling.is_initialized = true; in ufshcd_init_clk_scaling()
1799 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) in ufshcd_exit_clk_scaling() argument
1801 if (!hba->clk_scaling.is_initialized) in ufshcd_exit_clk_scaling()
1804 ufshcd_remove_clk_scaling_sysfs(hba); in ufshcd_exit_clk_scaling()
1805 destroy_workqueue(hba->clk_scaling.workq); in ufshcd_exit_clk_scaling()
1806 ufshcd_devfreq_remove(hba); in ufshcd_exit_clk_scaling()
1807 hba->clk_scaling.is_initialized = false; in ufshcd_exit_clk_scaling()
1813 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_ungate_work() local
1816 cancel_delayed_work_sync(&hba->clk_gating.gate_work); in ufshcd_ungate_work()
1818 scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) { in ufshcd_ungate_work()
1819 if (hba->clk_gating.state == CLKS_ON) in ufshcd_ungate_work()
1823 ufshcd_hba_vreg_set_hpm(hba); in ufshcd_ungate_work()
1824 ufshcd_setup_clocks(hba, true); in ufshcd_ungate_work()
1826 ufshcd_enable_irq(hba); in ufshcd_ungate_work()
1829 if (ufshcd_can_hibern8_during_gating(hba)) { in ufshcd_ungate_work()
1831 hba->clk_gating.is_suspended = true; in ufshcd_ungate_work()
1832 if (ufshcd_is_link_hibern8(hba)) { in ufshcd_ungate_work()
1833 ret = ufshcd_uic_hibern8_exit(hba); in ufshcd_ungate_work()
1835 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", in ufshcd_ungate_work()
1838 ufshcd_set_link_active(hba); in ufshcd_ungate_work()
1840 hba->clk_gating.is_suspended = false; in ufshcd_ungate_work()
1847 * @hba: per adapter instance
1849 void ufshcd_hold(struct ufs_hba *hba) in ufshcd_hold() argument
1854 if (!ufshcd_is_clkgating_allowed(hba) || in ufshcd_hold()
1855 !hba->clk_gating.is_initialized) in ufshcd_hold()
1857 spin_lock_irqsave(&hba->clk_gating.lock, flags); in ufshcd_hold()
1858 hba->clk_gating.active_reqs++; in ufshcd_hold()
1861 switch (hba->clk_gating.state) { in ufshcd_hold()
1871 if (ufshcd_can_hibern8_during_gating(hba) && in ufshcd_hold()
1872 ufshcd_is_link_hibern8(hba)) { in ufshcd_hold()
1873 spin_unlock_irqrestore(&hba->clk_gating.lock, flags); in ufshcd_hold()
1874 flush_result = flush_work(&hba->clk_gating.ungate_work); in ufshcd_hold()
1875 if (hba->clk_gating.is_suspended && !flush_result) in ufshcd_hold()
1877 spin_lock_irqsave(&hba->clk_gating.lock, flags); in ufshcd_hold()
1882 if (cancel_delayed_work(&hba->clk_gating.gate_work)) { in ufshcd_hold()
1883 hba->clk_gating.state = CLKS_ON; in ufshcd_hold()
1884 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_hold()
1885 hba->clk_gating.state); in ufshcd_hold()
1895 hba->clk_gating.state = REQ_CLKS_ON; in ufshcd_hold()
1896 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_hold()
1897 hba->clk_gating.state); in ufshcd_hold()
1898 queue_work(hba->clk_gating.clk_gating_workq, in ufshcd_hold()
1899 &hba->clk_gating.ungate_work); in ufshcd_hold()
1906 spin_unlock_irqrestore(&hba->clk_gating.lock, flags); in ufshcd_hold()
1907 flush_work(&hba->clk_gating.ungate_work); in ufshcd_hold()
1909 spin_lock_irqsave(&hba->clk_gating.lock, flags); in ufshcd_hold()
1912 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", in ufshcd_hold()
1913 __func__, hba->clk_gating.state); in ufshcd_hold()
1916 spin_unlock_irqrestore(&hba->clk_gating.lock, flags); in ufshcd_hold()
1922 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_gate_work() local
1926 scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) { in ufshcd_gate_work()
1933 if (hba->clk_gating.is_suspended || in ufshcd_gate_work()
1934 hba->clk_gating.state != REQ_CLKS_OFF) { in ufshcd_gate_work()
1935 hba->clk_gating.state = CLKS_ON; in ufshcd_gate_work()
1936 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1937 hba->clk_gating.state); in ufshcd_gate_work()
1941 if (hba->clk_gating.active_reqs) in ufshcd_gate_work()
1945 scoped_guard(spinlock_irqsave, hba->host->host_lock) { in ufshcd_gate_work()
1946 if (ufshcd_is_ufs_dev_busy(hba) || in ufshcd_gate_work()
1947 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) in ufshcd_gate_work()
1952 if (ufshcd_can_hibern8_during_gating(hba)) { in ufshcd_gate_work()
1953 ret = ufshcd_uic_hibern8_enter(hba); in ufshcd_gate_work()
1955 hba->clk_gating.state = CLKS_ON; in ufshcd_gate_work()
1956 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_gate_work()
1958 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1959 hba->clk_gating.state); in ufshcd_gate_work()
1962 ufshcd_set_link_hibern8(hba); in ufshcd_gate_work()
1965 ufshcd_disable_irq(hba); in ufshcd_gate_work()
1967 ufshcd_setup_clocks(hba, false); in ufshcd_gate_work()
1970 ufshcd_hba_vreg_set_lpm(hba); in ufshcd_gate_work()
1980 guard(spinlock_irqsave)(&hba->clk_gating.lock); in ufshcd_gate_work()
1981 if (hba->clk_gating.state == REQ_CLKS_OFF) { in ufshcd_gate_work()
1982 hba->clk_gating.state = CLKS_OFF; in ufshcd_gate_work()
1983 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1984 hba->clk_gating.state); in ufshcd_gate_work()
1988 static void __ufshcd_release(struct ufs_hba *hba) in __ufshcd_release() argument
1990 lockdep_assert_held(&hba->clk_gating.lock); in __ufshcd_release()
1992 if (!ufshcd_is_clkgating_allowed(hba)) in __ufshcd_release()
1995 hba->clk_gating.active_reqs--; in __ufshcd_release()
1997 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || in __ufshcd_release()
1998 !hba->clk_gating.is_initialized || in __ufshcd_release()
1999 hba->clk_gating.state == CLKS_OFF) in __ufshcd_release()
2002 scoped_guard(spinlock_irqsave, hba->host->host_lock) { in __ufshcd_release()
2003 if (ufshcd_has_pending_tasks(hba) || in __ufshcd_release()
2004 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) in __ufshcd_release()
2008 hba->clk_gating.state = REQ_CLKS_OFF; in __ufshcd_release()
2009 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); in __ufshcd_release()
2010 queue_delayed_work(hba->clk_gating.clk_gating_workq, in __ufshcd_release()
2011 &hba->clk_gating.gate_work, in __ufshcd_release()
2012 msecs_to_jiffies(hba->clk_gating.delay_ms)); in __ufshcd_release()
2015 void ufshcd_release(struct ufs_hba *hba) in ufshcd_release() argument
2017 guard(spinlock_irqsave)(&hba->clk_gating.lock); in ufshcd_release()
2018 __ufshcd_release(hba); in ufshcd_release()
2025 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_delay_show() local
2027 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms); in ufshcd_clkgate_delay_show()
2032 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_delay_set() local
2034 guard(spinlock_irqsave)(&hba->clk_gating.lock); in ufshcd_clkgate_delay_set()
2035 hba->clk_gating.delay_ms = value; in ufshcd_clkgate_delay_set()
2054 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_enable_show() local
2056 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled); in ufshcd_clkgate_enable_show()
2062 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_enable_store() local
2070 guard(spinlock_irqsave)(&hba->clk_gating.lock); in ufshcd_clkgate_enable_store()
2072 if (value == hba->clk_gating.is_enabled) in ufshcd_clkgate_enable_store()
2076 __ufshcd_release(hba); in ufshcd_clkgate_enable_store()
2078 hba->clk_gating.active_reqs++; in ufshcd_clkgate_enable_store()
2080 hba->clk_gating.is_enabled = value; in ufshcd_clkgate_enable_store()
2085 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba) in ufshcd_init_clk_gating_sysfs() argument
2087 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; in ufshcd_init_clk_gating_sysfs()
2088 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; in ufshcd_init_clk_gating_sysfs()
2089 sysfs_attr_init(&hba->clk_gating.delay_attr.attr); in ufshcd_init_clk_gating_sysfs()
2090 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; in ufshcd_init_clk_gating_sysfs()
2091 hba->clk_gating.delay_attr.attr.mode = 0644; in ufshcd_init_clk_gating_sysfs()
2092 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) in ufshcd_init_clk_gating_sysfs()
2093 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); in ufshcd_init_clk_gating_sysfs()
2095 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; in ufshcd_init_clk_gating_sysfs()
2096 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; in ufshcd_init_clk_gating_sysfs()
2097 sysfs_attr_init(&hba->clk_gating.enable_attr.attr); in ufshcd_init_clk_gating_sysfs()
2098 hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; in ufshcd_init_clk_gating_sysfs()
2099 hba->clk_gating.enable_attr.attr.mode = 0644; in ufshcd_init_clk_gating_sysfs()
2100 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) in ufshcd_init_clk_gating_sysfs()
2101 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); in ufshcd_init_clk_gating_sysfs()
2104 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba) in ufshcd_remove_clk_gating_sysfs() argument
2106 if (hba->clk_gating.delay_attr.attr.name) in ufshcd_remove_clk_gating_sysfs()
2107 device_remove_file(hba->dev, &hba->clk_gating.delay_attr); in ufshcd_remove_clk_gating_sysfs()
2108 if (hba->clk_gating.enable_attr.attr.name) in ufshcd_remove_clk_gating_sysfs()
2109 device_remove_file(hba->dev, &hba->clk_gating.enable_attr); in ufshcd_remove_clk_gating_sysfs()
2112 static void ufshcd_init_clk_gating(struct ufs_hba *hba) in ufshcd_init_clk_gating() argument
2114 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_init_clk_gating()
2117 hba->clk_gating.state = CLKS_ON; in ufshcd_init_clk_gating()
2119 hba->clk_gating.delay_ms = 150; in ufshcd_init_clk_gating()
2120 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); in ufshcd_init_clk_gating()
2121 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); in ufshcd_init_clk_gating()
2123 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue( in ufshcd_init_clk_gating()
2125 hba->host->host_no); in ufshcd_init_clk_gating()
2127 ufshcd_init_clk_gating_sysfs(hba); in ufshcd_init_clk_gating()
2129 hba->clk_gating.is_enabled = true; in ufshcd_init_clk_gating()
2130 hba->clk_gating.is_initialized = true; in ufshcd_init_clk_gating()
2133 static void ufshcd_exit_clk_gating(struct ufs_hba *hba) in ufshcd_exit_clk_gating() argument
2135 if (!hba->clk_gating.is_initialized) in ufshcd_exit_clk_gating()
2138 ufshcd_remove_clk_gating_sysfs(hba); in ufshcd_exit_clk_gating()
2141 ufshcd_hold(hba); in ufshcd_exit_clk_gating()
2142 hba->clk_gating.is_initialized = false; in ufshcd_exit_clk_gating()
2143 ufshcd_release(hba); in ufshcd_exit_clk_gating()
2145 destroy_workqueue(hba->clk_gating.clk_gating_workq); in ufshcd_exit_clk_gating()
2148 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) in ufshcd_clk_scaling_start_busy() argument
2153 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_clk_scaling_start_busy()
2156 guard(spinlock_irqsave)(&hba->clk_scaling.lock); in ufshcd_clk_scaling_start_busy()
2158 if (!hba->clk_scaling.active_reqs++) in ufshcd_clk_scaling_start_busy()
2161 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) in ufshcd_clk_scaling_start_busy()
2165 queue_work(hba->clk_scaling.workq, in ufshcd_clk_scaling_start_busy()
2166 &hba->clk_scaling.resume_work); in ufshcd_clk_scaling_start_busy()
2168 if (!hba->clk_scaling.window_start_t) { in ufshcd_clk_scaling_start_busy()
2169 hba->clk_scaling.window_start_t = curr_t; in ufshcd_clk_scaling_start_busy()
2170 hba->clk_scaling.tot_busy_t = 0; in ufshcd_clk_scaling_start_busy()
2171 hba->clk_scaling.is_busy_started = false; in ufshcd_clk_scaling_start_busy()
2174 if (!hba->clk_scaling.is_busy_started) { in ufshcd_clk_scaling_start_busy()
2175 hba->clk_scaling.busy_start_t = curr_t; in ufshcd_clk_scaling_start_busy()
2176 hba->clk_scaling.is_busy_started = true; in ufshcd_clk_scaling_start_busy()
2180 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) in ufshcd_clk_scaling_update_busy() argument
2182 struct ufs_clk_scaling *scaling = &hba->clk_scaling; in ufshcd_clk_scaling_update_busy()
2184 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_clk_scaling_update_busy()
2187 guard(spinlock_irqsave)(&hba->clk_scaling.lock); in ufshcd_clk_scaling_update_busy()
2189 hba->clk_scaling.active_reqs--; in ufshcd_clk_scaling_update_busy()
2208 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba, in ufshcd_should_inform_monitor() argument
2211 const struct ufs_hba_monitor *m = &hba->monitor; in ufshcd_should_inform_monitor()
2215 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp)); in ufshcd_should_inform_monitor()
2218 static void ufshcd_start_monitor(struct ufs_hba *hba, in ufshcd_start_monitor() argument
2224 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_start_monitor()
2225 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0) in ufshcd_start_monitor()
2226 hba->monitor.busy_start_ts[dir] = ktime_get(); in ufshcd_start_monitor()
2227 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_start_monitor()
2230 static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp) in ufshcd_update_monitor() argument
2235 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_update_monitor()
2236 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) { in ufshcd_update_monitor()
2238 struct ufs_hba_monitor *m = &hba->monitor; in ufshcd_update_monitor()
2259 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_update_monitor()
2264 * @hba: per adapter instance
2269 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag, in ufshcd_send_command() argument
2272 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; in ufshcd_send_command()
2279 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND); in ufshcd_send_command()
2281 ufshcd_clk_scaling_start_busy(hba); in ufshcd_send_command()
2282 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) in ufshcd_send_command()
2283 ufshcd_start_monitor(hba, lrbp); in ufshcd_send_command()
2285 if (hba->mcq_enabled) { in ufshcd_send_command()
2296 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_send_command()
2297 if (hba->vops && hba->vops->setup_xfer_req) in ufshcd_send_command()
2298 hba->vops->setup_xfer_req(hba, lrbp->task_tag, in ufshcd_send_command()
2300 __set_bit(lrbp->task_tag, &hba->outstanding_reqs); in ufshcd_send_command()
2301 ufshcd_writel(hba, 1 << lrbp->task_tag, in ufshcd_send_command()
2303 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_send_command()
2332 * @hba: per adapter instance
2338 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_copy_query_response() argument
2340 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; in ufshcd_copy_query_response()
2345 if (hba->dev_cmd.query.descriptor && in ufshcd_copy_query_response()
2356 hba->dev_cmd.query.request.upiu_req.length); in ufshcd_copy_query_response()
2358 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); in ufshcd_copy_query_response()
2360 dev_warn(hba->dev, in ufshcd_copy_query_response()
2372 * @hba: per adapter instance
2376 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) in ufshcd_hba_capabilities() argument
2380 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); in ufshcd_hba_capabilities()
2383 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1; in ufshcd_hba_capabilities()
2384 hba->nutmrs = in ufshcd_hba_capabilities()
2385 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; in ufshcd_hba_capabilities()
2386 hba->reserved_slot = hba->nutrs - 1; in ufshcd_hba_capabilities()
2388 hba->nortt = FIELD_GET(MASK_NUMBER_OUTSTANDING_RTT, hba->capabilities) + 1; in ufshcd_hba_capabilities()
2391 err = ufshcd_hba_init_crypto_capabilities(hba); in ufshcd_hba_capabilities()
2393 dev_err(hba->dev, "crypto setup failed\n"); in ufshcd_hba_capabilities()
2402 hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities); in ufshcd_hba_capabilities()
2407 if (!(hba->quirks & UFSHCD_QUIRK_BROKEN_LSDBS_CAP)) in ufshcd_hba_capabilities()
2408 hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities); in ufshcd_hba_capabilities()
2410 hba->lsdb_sup = true; in ufshcd_hba_capabilities()
2412 hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP); in ufshcd_hba_capabilities()
2420 * @hba: per adapter instance
2424 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) in ufshcd_ready_for_uic_cmd() argument
2428 500, uic_cmd_timeout * 1000, false, hba, in ufshcd_ready_for_uic_cmd()
2435 * @hba: Pointer to adapter instance
2441 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) in ufshcd_get_upmcrs() argument
2443 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; in ufshcd_get_upmcrs()
2448 * @hba: per adapter instance
2452 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_dispatch_uic_cmd() argument
2454 lockdep_assert_held(&hba->uic_cmd_mutex); in ufshcd_dispatch_uic_cmd()
2456 WARN_ON(hba->active_uic_cmd); in ufshcd_dispatch_uic_cmd()
2458 hba->active_uic_cmd = uic_cmd; in ufshcd_dispatch_uic_cmd()
2461 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); in ufshcd_dispatch_uic_cmd()
2462 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); in ufshcd_dispatch_uic_cmd()
2463 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); in ufshcd_dispatch_uic_cmd()
2465 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND); in ufshcd_dispatch_uic_cmd()
2468 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, in ufshcd_dispatch_uic_cmd()
2474 * @hba: per adapter instance
2480 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_wait_for_uic_cmd() argument
2485 lockdep_assert_held(&hba->uic_cmd_mutex); in ufshcd_wait_for_uic_cmd()
2492 dev_err(hba->dev, in ufshcd_wait_for_uic_cmd()
2497 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n", in ufshcd_wait_for_uic_cmd()
2503 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_uic_cmd()
2504 hba->active_uic_cmd = NULL; in ufshcd_wait_for_uic_cmd()
2505 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_uic_cmd()
2512 * @hba: per adapter instance
2518 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in __ufshcd_send_uic_cmd() argument
2520 lockdep_assert_held(&hba->uic_cmd_mutex); in __ufshcd_send_uic_cmd()
2522 if (!ufshcd_ready_for_uic_cmd(hba)) { in __ufshcd_send_uic_cmd()
2523 dev_err(hba->dev, in __ufshcd_send_uic_cmd()
2531 ufshcd_dispatch_uic_cmd(hba, uic_cmd); in __ufshcd_send_uic_cmd()
2538 * @hba: per adapter instance
2543 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_send_uic_cmd() argument
2547 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) in ufshcd_send_uic_cmd()
2550 ufshcd_hold(hba); in ufshcd_send_uic_cmd()
2551 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_send_uic_cmd()
2552 ufshcd_add_delay_before_dme_cmd(hba); in ufshcd_send_uic_cmd()
2554 ret = __ufshcd_send_uic_cmd(hba, uic_cmd); in ufshcd_send_uic_cmd()
2556 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); in ufshcd_send_uic_cmd()
2558 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_send_uic_cmd()
2560 ufshcd_release(hba); in ufshcd_send_uic_cmd()
2566 * @hba: per-adapter instance
2571 static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries, in ufshcd_sgl_to_prdt() argument
2580 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) in ufshcd_sgl_to_prdt()
2582 cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba)); in ufshcd_sgl_to_prdt()
2603 prd = (void *)prd + ufshcd_sg_entry_size(hba); in ufshcd_sgl_to_prdt()
2612 * @hba: per adapter instance
2617 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_map_sg() argument
2625 ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd)); in ufshcd_map_sg()
2627 return ufshcd_crypto_fill_prdt(hba, lrbp); in ufshcd_map_sg()
2632 * @hba: per adapter instance
2635 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) in ufshcd_enable_intr() argument
2637 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_enable_intr()
2640 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); in ufshcd_enable_intr()
2645 * @hba: per adapter instance
2648 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) in ufshcd_disable_intr() argument
2650 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_disable_intr()
2653 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); in ufshcd_disable_intr()
2659 * @hba: per adapter instance
2666 ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, in ufshcd_prepare_req_desc_hdr() argument
2742 * @hba: UFS hba
2746 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, in ufshcd_prepare_utp_query_req_upiu() argument
2750 struct ufs_query *query = &hba->dev_cmd.query; in ufshcd_prepare_utp_query_req_upiu()
2796 * @hba: per adapter instance
2801 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba, in ufshcd_compose_devman_upiu() argument
2807 ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0); in ufshcd_compose_devman_upiu()
2809 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) in ufshcd_compose_devman_upiu()
2810 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags); in ufshcd_compose_devman_upiu()
2811 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) in ufshcd_compose_devman_upiu()
2822 * @hba: per adapter instance
2825 static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_comp_scsi_upiu() argument
2831 ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0); in ufshcd_comp_scsi_upiu()
2847 static void ufshcd_setup_scsi_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, in ufshcd_setup_scsi_cmd() argument
2851 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); in ufshcd_setup_scsi_cmd()
2854 ufshcd_comp_scsi_upiu(hba, lrbp); in ufshcd_setup_scsi_cmd()
2880 struct ufs_hba *hba = shost_priv(shost); in ufshcd_map_queues() local
2883 if (!is_mcq_supported(hba)) { in ufshcd_map_queues()
2884 hba->nr_queues[HCTX_TYPE_DEFAULT] = 1; in ufshcd_map_queues()
2885 hba->nr_queues[HCTX_TYPE_READ] = 0; in ufshcd_map_queues()
2886 hba->nr_queues[HCTX_TYPE_POLL] = 1; in ufshcd_map_queues()
2887 hba->nr_hw_queues = 1; in ufshcd_map_queues()
2893 map->nr_queues = hba->nr_queues[i]; in ufshcd_map_queues()
2897 if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba)) in ufshcd_map_queues()
2905 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) in ufshcd_init_lrb() argument
2907 struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr + in ufshcd_init_lrb()
2908 i * ufshcd_get_ucd_size(hba); in ufshcd_init_lrb()
2909 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; in ufshcd_init_lrb()
2910 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + in ufshcd_init_lrb()
2911 i * ufshcd_get_ucd_size(hba); in ufshcd_init_lrb()
2916 lrb->utrd_dma_addr = hba->utrdl_dma_addr + in ufshcd_init_lrb()
2935 struct ufs_hba *hba = shost_priv(host); in ufshcd_queuecommand() local
2941 switch (hba->ufshcd_state) { in ufshcd_queuecommand()
2952 if (ufshcd_eh_in_progress(hba)) { in ufshcd_queuecommand()
2960 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's in ufshcd_queuecommand()
2968 if (hba->pm_op_in_progress) { in ufshcd_queuecommand()
2969 hba->force_reset = true; in ufshcd_queuecommand()
2984 hba->req_abort_count = 0; in ufshcd_queuecommand()
2986 ufshcd_hold(hba); in ufshcd_queuecommand()
2988 lrbp = &hba->lrb[tag]; in ufshcd_queuecommand()
2990 ufshcd_setup_scsi_cmd(hba, lrbp, cmd, ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag); in ufshcd_queuecommand()
2992 err = ufshcd_map_sg(hba, lrbp); in ufshcd_queuecommand()
2994 ufshcd_release(hba); in ufshcd_queuecommand()
2998 if (hba->mcq_enabled) in ufshcd_queuecommand()
2999 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); in ufshcd_queuecommand()
3001 ufshcd_send_command(hba, tag, hwq); in ufshcd_queuecommand()
3004 if (ufs_trigger_eh(hba)) { in ufshcd_queuecommand()
3007 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_queuecommand()
3008 ufshcd_schedule_eh_work(hba); in ufshcd_queuecommand()
3009 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_queuecommand()
3015 static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, in ufshcd_setup_dev_cmd() argument
3020 hba->dev_cmd.type = cmd_type; in ufshcd_setup_dev_cmd()
3023 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, in ufshcd_compose_dev_cmd() argument
3026 ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag); in ufshcd_compose_dev_cmd()
3028 return ufshcd_compose_devman_upiu(hba, lrbp); in ufshcd_compose_dev_cmd()
3045 * @hba: per adapter instance
3048 static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) in ufshcd_clear_cmd() argument
3053 if (hba->mcq_enabled) { in ufshcd_clear_cmd()
3058 err = ufshcd_mcq_sq_cleanup(hba, task_tag); in ufshcd_clear_cmd()
3060 dev_err(hba->dev, "%s: failed tag=%d. err=%d\n", in ufshcd_clear_cmd()
3070 ufshcd_utrl_clear(hba, mask); in ufshcd_clear_cmd()
3076 return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL, in ufshcd_clear_cmd()
3082 * @hba: per adapter instance
3088 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_dev_cmd_completion() argument
3093 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_dev_cmd_completion()
3098 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { in ufshcd_dev_cmd_completion()
3100 dev_err(hba->dev, "%s: unexpected response %x\n", in ufshcd_dev_cmd_completion()
3108 err = ufshcd_copy_query_response(hba, lrbp); in ufshcd_dev_cmd_completion()
3111 dev_err(hba->dev, "%s: unexpected response in Query RSP: %x\n", in ufshcd_dev_cmd_completion()
3119 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", in ufshcd_dev_cmd_completion()
3123 if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) { in ufshcd_dev_cmd_completion()
3125 dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp); in ufshcd_dev_cmd_completion()
3130 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", in ufshcd_dev_cmd_completion()
3138 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, in ufshcd_wait_for_dev_cmd() argument
3147 time_left = wait_for_completion_timeout(hba->dev_cmd.complete, in ufshcd_wait_for_dev_cmd()
3156 hba->dev_cmd.complete = NULL; in ufshcd_wait_for_dev_cmd()
3159 err = ufshcd_dev_cmd_completion(hba, lrbp); in ufshcd_wait_for_dev_cmd()
3162 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", in ufshcd_wait_for_dev_cmd()
3166 if (hba->mcq_enabled) { in ufshcd_wait_for_dev_cmd()
3168 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) in ufshcd_wait_for_dev_cmd()
3170 hba->dev_cmd.complete = NULL; in ufshcd_wait_for_dev_cmd()
3175 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) { in ufshcd_wait_for_dev_cmd()
3183 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_wait_for_dev_cmd()
3185 &hba->outstanding_reqs); in ufshcd_wait_for_dev_cmd()
3187 hba->dev_cmd.complete = NULL; in ufshcd_wait_for_dev_cmd()
3189 &hba->outstanding_reqs); in ufshcd_wait_for_dev_cmd()
3191 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_wait_for_dev_cmd()
3202 dev_err(hba->dev, "%s: failed to clear tag %d\n", in ufshcd_wait_for_dev_cmd()
3205 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_wait_for_dev_cmd()
3207 &hba->outstanding_reqs); in ufshcd_wait_for_dev_cmd()
3209 hba->dev_cmd.complete = NULL; in ufshcd_wait_for_dev_cmd()
3210 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_wait_for_dev_cmd()
3226 static void ufshcd_dev_man_lock(struct ufs_hba *hba) in ufshcd_dev_man_lock() argument
3228 ufshcd_hold(hba); in ufshcd_dev_man_lock()
3229 mutex_lock(&hba->dev_cmd.lock); in ufshcd_dev_man_lock()
3230 down_read(&hba->clk_scaling_lock); in ufshcd_dev_man_lock()
3233 static void ufshcd_dev_man_unlock(struct ufs_hba *hba) in ufshcd_dev_man_unlock() argument
3235 up_read(&hba->clk_scaling_lock); in ufshcd_dev_man_unlock()
3236 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_dev_man_unlock()
3237 ufshcd_release(hba); in ufshcd_dev_man_unlock()
3240 static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, in ufshcd_issue_dev_cmd() argument
3246 hba->dev_cmd.complete = &wait; in ufshcd_issue_dev_cmd()
3248 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); in ufshcd_issue_dev_cmd()
3250 ufshcd_send_command(hba, tag, hba->dev_cmd_queue); in ufshcd_issue_dev_cmd()
3251 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); in ufshcd_issue_dev_cmd()
3253 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, in ufshcd_issue_dev_cmd()
3261 * @hba: UFS hba
3268 * it is expected you hold the hba->dev_cmd.lock mutex.
3270 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, in ufshcd_exec_dev_cmd() argument
3273 const u32 tag = hba->reserved_slot; in ufshcd_exec_dev_cmd()
3274 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_exec_dev_cmd()
3277 /* Protects use of hba->reserved_slot. */ in ufshcd_exec_dev_cmd()
3278 lockdep_assert_held(&hba->dev_cmd.lock); in ufshcd_exec_dev_cmd()
3280 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); in ufshcd_exec_dev_cmd()
3284 return ufshcd_issue_dev_cmd(hba, lrbp, tag, timeout); in ufshcd_exec_dev_cmd()
3289 * @hba: per-adapter instance
3297 static inline void ufshcd_init_query(struct ufs_hba *hba, in ufshcd_init_query() argument
3301 *request = &hba->dev_cmd.query.request; in ufshcd_init_query()
3302 *response = &hba->dev_cmd.query.response; in ufshcd_init_query()
3311 static int ufshcd_query_flag_retry(struct ufs_hba *hba, in ufshcd_query_flag_retry() argument
3318 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res); in ufshcd_query_flag_retry()
3320 dev_dbg(hba->dev, in ufshcd_query_flag_retry()
3328 dev_err(hba->dev, in ufshcd_query_flag_retry()
3336 * @hba: per-adapter instance
3344 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, in ufshcd_query_flag() argument
3352 BUG_ON(!hba); in ufshcd_query_flag()
3354 ufshcd_dev_man_lock(hba); in ufshcd_query_flag()
3356 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in ufshcd_query_flag()
3369 dev_err(hba->dev, "%s: Invalid argument for read request\n", in ufshcd_query_flag()
3376 dev_err(hba->dev, in ufshcd_query_flag()
3383 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); in ufshcd_query_flag()
3386 dev_err(hba->dev, in ufshcd_query_flag()
3397 ufshcd_dev_man_unlock(hba); in ufshcd_query_flag()
3403 * @hba: per-adapter instance
3412 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, in ufshcd_query_attr() argument
3419 BUG_ON(!hba); in ufshcd_query_attr()
3422 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", in ufshcd_query_attr()
3427 ufshcd_dev_man_lock(hba); in ufshcd_query_attr()
3429 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in ufshcd_query_attr()
3441 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", in ufshcd_query_attr()
3447 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in ufshcd_query_attr()
3450 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", in ufshcd_query_attr()
3458 ufshcd_dev_man_unlock(hba); in ufshcd_query_attr()
3465 * @hba: per-adapter instance
3475 int ufshcd_query_attr_retry(struct ufs_hba *hba, in ufshcd_query_attr_retry() argument
3483 ret = ufshcd_query_attr(hba, opcode, idn, index, in ufshcd_query_attr_retry()
3486 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", in ufshcd_query_attr_retry()
3493 dev_err(hba->dev, in ufshcd_query_attr_retry()
3499 static int __ufshcd_query_descriptor(struct ufs_hba *hba, in __ufshcd_query_descriptor() argument
3507 BUG_ON(!hba); in __ufshcd_query_descriptor()
3510 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", in __ufshcd_query_descriptor()
3516 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", in __ufshcd_query_descriptor()
3521 ufshcd_dev_man_lock(hba); in __ufshcd_query_descriptor()
3523 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in __ufshcd_query_descriptor()
3525 hba->dev_cmd.query.descriptor = desc_buf; in __ufshcd_query_descriptor()
3536 dev_err(hba->dev, in __ufshcd_query_descriptor()
3543 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in __ufshcd_query_descriptor()
3546 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", in __ufshcd_query_descriptor()
3554 hba->dev_cmd.query.descriptor = NULL; in __ufshcd_query_descriptor()
3555 ufshcd_dev_man_unlock(hba); in __ufshcd_query_descriptor()
3561 * @hba: per-adapter instance
3574 int ufshcd_query_descriptor_retry(struct ufs_hba *hba, in ufshcd_query_descriptor_retry() argument
3584 err = __ufshcd_query_descriptor(hba, opcode, idn, index, in ufshcd_query_descriptor_retry()
3595 * @hba: Pointer to adapter instance
3604 int ufshcd_read_desc_param(struct ufs_hba *hba, in ufshcd_read_desc_param() argument
3631 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, in ufshcd_read_desc_param()
3635 …dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret … in ufshcd_read_desc_param()
3644 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n", in ufshcd_read_desc_param()
3652 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", in ufshcd_read_desc_param()
3693 * @hba: pointer to adapter instance
3705 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, in ufshcd_read_string_desc() argument
3719 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0, in ufshcd_read_string_desc()
3722 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", in ufshcd_read_string_desc()
3729 dev_dbg(hba->dev, "String Desc is of zero length\n"); in ufshcd_read_string_desc()
3776 * @hba: Pointer to adapter instance
3784 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, in ufshcd_read_unit_desc_param() argument
3794 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) in ufshcd_read_unit_desc_param()
3797 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, in ufshcd_read_unit_desc_param()
3801 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba) in ufshcd_get_ref_clk_gating_wait() argument
3806 if (hba->dev_info.wspecversion >= 0x300) { in ufshcd_get_ref_clk_gating_wait()
3807 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_ref_clk_gating_wait()
3811 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n", in ufshcd_get_ref_clk_gating_wait()
3816 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n", in ufshcd_get_ref_clk_gating_wait()
3820 hba->dev_info.clk_gating_wait_us = gating_wait; in ufshcd_get_ref_clk_gating_wait()
3828 * @hba: per adapter instance
3839 static int ufshcd_memory_alloc(struct ufs_hba *hba) in ufshcd_memory_alloc() argument
3844 ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs; in ufshcd_memory_alloc()
3845 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3847 &hba->ucdl_dma_addr, in ufshcd_memory_alloc()
3853 if (!hba->ucdl_base_addr || in ufshcd_memory_alloc()
3854 WARN_ON(hba->ucdl_dma_addr & (128 - 1))) { in ufshcd_memory_alloc()
3855 dev_err(hba->dev, in ufshcd_memory_alloc()
3864 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); in ufshcd_memory_alloc()
3865 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3867 &hba->utrdl_dma_addr, in ufshcd_memory_alloc()
3869 if (!hba->utrdl_base_addr || in ufshcd_memory_alloc()
3870 WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) { in ufshcd_memory_alloc()
3871 dev_err(hba->dev, in ufshcd_memory_alloc()
3882 if (hba->utmrdl_base_addr) in ufshcd_memory_alloc()
3888 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; in ufshcd_memory_alloc()
3889 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3891 &hba->utmrdl_dma_addr, in ufshcd_memory_alloc()
3893 if (!hba->utmrdl_base_addr || in ufshcd_memory_alloc()
3894 WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) { in ufshcd_memory_alloc()
3895 dev_err(hba->dev, in ufshcd_memory_alloc()
3902 hba->lrb = devm_kcalloc(hba->dev, in ufshcd_memory_alloc()
3903 hba->nutrs, sizeof(struct ufshcd_lrb), in ufshcd_memory_alloc()
3905 if (!hba->lrb) { in ufshcd_memory_alloc()
3906 dev_err(hba->dev, "LRB Memory allocation failed\n"); in ufshcd_memory_alloc()
3917 * @hba: per adapter instance
3927 static void ufshcd_host_memory_configure(struct ufs_hba *hba) in ufshcd_host_memory_configure() argument
3937 utrdlp = hba->utrdl_base_addr; in ufshcd_host_memory_configure()
3944 cmd_desc_size = ufshcd_get_ucd_size(hba); in ufshcd_host_memory_configure()
3945 cmd_desc_dma_addr = hba->ucdl_dma_addr; in ufshcd_host_memory_configure()
3947 for (i = 0; i < hba->nutrs; i++) { in ufshcd_host_memory_configure()
3955 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { in ufshcd_host_memory_configure()
3971 ufshcd_init_lrb(hba, &hba->lrb[i], i); in ufshcd_host_memory_configure()
3977 * @hba: per adapter instance
3986 static int ufshcd_dme_link_startup(struct ufs_hba *hba) in ufshcd_dme_link_startup() argument
3993 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_link_startup()
3995 dev_dbg(hba->dev, in ufshcd_dme_link_startup()
4001 * @hba: per adapter instance
4008 static int ufshcd_dme_reset(struct ufs_hba *hba) in ufshcd_dme_reset() argument
4015 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_reset()
4017 dev_err(hba->dev, in ufshcd_dme_reset()
4023 int ufshcd_dme_configure_adapt(struct ufs_hba *hba, in ufshcd_dme_configure_adapt() argument
4032 ret = ufshcd_dme_set(hba, in ufshcd_dme_configure_adapt()
4041 * @hba: per adapter instance
4047 static int ufshcd_dme_enable(struct ufs_hba *hba) in ufshcd_dme_enable() argument
4054 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_enable()
4056 dev_err(hba->dev, in ufshcd_dme_enable()
4062 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) in ufshcd_add_delay_before_dme_cmd() argument
4067 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) in ufshcd_add_delay_before_dme_cmd()
4074 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { in ufshcd_add_delay_before_dme_cmd()
4080 hba->last_dme_cmd_tstamp)); in ufshcd_add_delay_before_dme_cmd()
4095 hba->last_dme_cmd_tstamp = ktime_get(); in ufshcd_add_delay_before_dme_cmd()
4100 * @hba: per adapter instance
4108 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, in ufshcd_dme_set_attr() argument
4127 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_set_attr()
4129 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", in ufshcd_dme_set_attr()
4134 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", in ufshcd_dme_set_attr()
4144 * @hba: per adapter instance
4151 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, in ufshcd_dme_get_attr() argument
4169 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { in ufshcd_dme_get_attr()
4170 orig_pwr_info = hba->pwr_info; in ufshcd_dme_get_attr()
4185 ret = ufshcd_change_power_mode(hba, &temp_pwr_info); in ufshcd_dme_get_attr()
4193 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_get_attr()
4195 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", in ufshcd_dme_get_attr()
4200 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", in ufshcd_dme_get_attr()
4207 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) in ufshcd_dme_get_attr()
4209 ufshcd_change_power_mode(hba, &orig_pwr_info); in ufshcd_dme_get_attr()
4219 * @hba: per adapter instance
4231 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) in ufshcd_uic_pwr_ctrl() argument
4239 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_uic_pwr_ctrl()
4240 ufshcd_add_delay_before_dme_cmd(hba); in ufshcd_uic_pwr_ctrl()
4242 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4243 if (ufshcd_is_link_broken(hba)) { in ufshcd_uic_pwr_ctrl()
4247 hba->uic_async_done = &uic_async_done; in ufshcd_uic_pwr_ctrl()
4248 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { in ufshcd_uic_pwr_ctrl()
4249 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); in ufshcd_uic_pwr_ctrl()
4254 ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_uic_pwr_ctrl()
4257 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4258 ret = __ufshcd_send_uic_cmd(hba, cmd); in ufshcd_uic_pwr_ctrl()
4260 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
4266 if (!wait_for_completion_timeout(hba->uic_async_done, in ufshcd_uic_pwr_ctrl()
4268 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
4273 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n", in ufshcd_uic_pwr_ctrl()
4283 status = ufshcd_get_upmcrs(hba); in ufshcd_uic_pwr_ctrl()
4285 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
4292 ufshcd_print_host_state(hba); in ufshcd_uic_pwr_ctrl()
4293 ufshcd_print_pwr_info(hba); in ufshcd_uic_pwr_ctrl()
4294 ufshcd_print_evt_hist(hba); in ufshcd_uic_pwr_ctrl()
4297 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4298 hba->active_uic_cmd = NULL; in ufshcd_uic_pwr_ctrl()
4299 hba->uic_async_done = NULL; in ufshcd_uic_pwr_ctrl()
4301 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); in ufshcd_uic_pwr_ctrl()
4303 ufshcd_set_link_broken(hba); in ufshcd_uic_pwr_ctrl()
4304 ufshcd_schedule_eh_work(hba); in ufshcd_uic_pwr_ctrl()
4307 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4308 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_uic_pwr_ctrl()
4315 * @hba: per adapter instance
4320 int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_send_bsg_uic_cmd() argument
4324 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) in ufshcd_send_bsg_uic_cmd()
4327 ufshcd_hold(hba); in ufshcd_send_bsg_uic_cmd()
4331 ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd); in ufshcd_send_bsg_uic_cmd()
4335 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_send_bsg_uic_cmd()
4336 ufshcd_add_delay_before_dme_cmd(hba); in ufshcd_send_bsg_uic_cmd()
4338 ret = __ufshcd_send_uic_cmd(hba, uic_cmd); in ufshcd_send_bsg_uic_cmd()
4340 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); in ufshcd_send_bsg_uic_cmd()
4342 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_send_bsg_uic_cmd()
4345 ufshcd_release(hba); in ufshcd_send_bsg_uic_cmd()
4352 * @hba: per adapter instance
4357 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) in ufshcd_uic_change_pwr_mode() argument
4366 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { in ufshcd_uic_change_pwr_mode()
4367 ret = ufshcd_dme_set(hba, in ufshcd_uic_change_pwr_mode()
4370 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", in ufshcd_uic_change_pwr_mode()
4376 ufshcd_hold(hba); in ufshcd_uic_change_pwr_mode()
4377 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_change_pwr_mode()
4378 ufshcd_release(hba); in ufshcd_uic_change_pwr_mode()
4385 int ufshcd_link_recovery(struct ufs_hba *hba) in ufshcd_link_recovery() argument
4390 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_link_recovery()
4391 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_link_recovery()
4392 ufshcd_set_eh_in_progress(hba); in ufshcd_link_recovery()
4393 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_link_recovery()
4396 ufshcd_device_reset(hba); in ufshcd_link_recovery()
4398 ret = ufshcd_host_reset_and_restore(hba); in ufshcd_link_recovery()
4400 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_link_recovery()
4402 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_link_recovery()
4403 ufshcd_clear_eh_in_progress(hba); in ufshcd_link_recovery()
4404 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_link_recovery()
4407 dev_err(hba->dev, "%s: link recovery failed, err %d", in ufshcd_link_recovery()
4414 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) in ufshcd_uic_hibern8_enter() argument
4422 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); in ufshcd_uic_hibern8_enter()
4424 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_hibern8_enter()
4425 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", in ufshcd_uic_hibern8_enter()
4429 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", in ufshcd_uic_hibern8_enter()
4432 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, in ufshcd_uic_hibern8_enter()
4439 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) in ufshcd_uic_hibern8_exit() argument
4447 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); in ufshcd_uic_hibern8_exit()
4449 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_hibern8_exit()
4450 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", in ufshcd_uic_hibern8_exit()
4454 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", in ufshcd_uic_hibern8_exit()
4457 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, in ufshcd_uic_hibern8_exit()
4459 hba->ufs_stats.last_hibern8_exit_tstamp = local_clock(); in ufshcd_uic_hibern8_exit()
4460 hba->ufs_stats.hibern8_exit_cnt++; in ufshcd_uic_hibern8_exit()
4467 static void ufshcd_configure_auto_hibern8(struct ufs_hba *hba) in ufshcd_configure_auto_hibern8() argument
4469 if (!ufshcd_is_auto_hibern8_supported(hba)) in ufshcd_configure_auto_hibern8()
4472 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); in ufshcd_configure_auto_hibern8()
4475 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) in ufshcd_auto_hibern8_update() argument
4477 const u32 cur_ahit = READ_ONCE(hba->ahit); in ufshcd_auto_hibern8_update()
4479 if (!ufshcd_is_auto_hibern8_supported(hba) || cur_ahit == ahit) in ufshcd_auto_hibern8_update()
4482 WRITE_ONCE(hba->ahit, ahit); in ufshcd_auto_hibern8_update()
4483 if (!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) { in ufshcd_auto_hibern8_update()
4484 ufshcd_rpm_get_sync(hba); in ufshcd_auto_hibern8_update()
4485 ufshcd_hold(hba); in ufshcd_auto_hibern8_update()
4486 ufshcd_configure_auto_hibern8(hba); in ufshcd_auto_hibern8_update()
4487 ufshcd_release(hba); in ufshcd_auto_hibern8_update()
4488 ufshcd_rpm_put_sync(hba); in ufshcd_auto_hibern8_update()
4495 * values in hba power info
4496 * @hba: per-adapter instance
4498 static void ufshcd_init_pwr_info(struct ufs_hba *hba) in ufshcd_init_pwr_info() argument
4500 hba->pwr_info.gear_rx = UFS_PWM_G1; in ufshcd_init_pwr_info()
4501 hba->pwr_info.gear_tx = UFS_PWM_G1; in ufshcd_init_pwr_info()
4502 hba->pwr_info.lane_rx = UFS_LANE_1; in ufshcd_init_pwr_info()
4503 hba->pwr_info.lane_tx = UFS_LANE_1; in ufshcd_init_pwr_info()
4504 hba->pwr_info.pwr_rx = SLOWAUTO_MODE; in ufshcd_init_pwr_info()
4505 hba->pwr_info.pwr_tx = SLOWAUTO_MODE; in ufshcd_init_pwr_info()
4506 hba->pwr_info.hs_rate = 0; in ufshcd_init_pwr_info()
4511 * @hba: per-adapter instance
4515 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) in ufshcd_get_max_pwr_mode() argument
4517 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; in ufshcd_get_max_pwr_mode()
4519 if (hba->max_pwr_info.is_valid) in ufshcd_get_max_pwr_mode()
4522 if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) { in ufshcd_get_max_pwr_mode()
4532 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), in ufshcd_get_max_pwr_mode()
4534 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_get_max_pwr_mode()
4538 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", in ufshcd_get_max_pwr_mode()
4546 dev_err(hba->dev, "%s: asymmetric connected lanes. rx=%d, tx=%d\n", in ufshcd_get_max_pwr_mode()
4558 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); in ufshcd_get_max_pwr_mode()
4560 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), in ufshcd_get_max_pwr_mode()
4563 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", in ufshcd_get_max_pwr_mode()
4570 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), in ufshcd_get_max_pwr_mode()
4573 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), in ufshcd_get_max_pwr_mode()
4576 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", in ufshcd_get_max_pwr_mode()
4583 hba->max_pwr_info.is_valid = true; in ufshcd_get_max_pwr_mode()
4587 static int ufshcd_change_power_mode(struct ufs_hba *hba, in ufshcd_change_power_mode() argument
4593 if (!hba->force_pmc && in ufshcd_change_power_mode()
4594 pwr_mode->gear_rx == hba->pwr_info.gear_rx && in ufshcd_change_power_mode()
4595 pwr_mode->gear_tx == hba->pwr_info.gear_tx && in ufshcd_change_power_mode()
4596 pwr_mode->lane_rx == hba->pwr_info.lane_rx && in ufshcd_change_power_mode()
4597 pwr_mode->lane_tx == hba->pwr_info.lane_tx && in ufshcd_change_power_mode()
4598 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && in ufshcd_change_power_mode()
4599 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && in ufshcd_change_power_mode()
4600 pwr_mode->hs_rate == hba->pwr_info.hs_rate) { in ufshcd_change_power_mode()
4601 dev_dbg(hba->dev, "%s: power already configured\n", __func__); in ufshcd_change_power_mode()
4611 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); in ufshcd_change_power_mode()
4612 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), in ufshcd_change_power_mode()
4616 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true); in ufshcd_change_power_mode()
4618 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false); in ufshcd_change_power_mode()
4620 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); in ufshcd_change_power_mode()
4621 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), in ufshcd_change_power_mode()
4625 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); in ufshcd_change_power_mode()
4627 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false); in ufshcd_change_power_mode()
4633 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), in ufshcd_change_power_mode()
4636 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { in ufshcd_change_power_mode()
4637 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), in ufshcd_change_power_mode()
4639 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), in ufshcd_change_power_mode()
4641 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), in ufshcd_change_power_mode()
4643 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), in ufshcd_change_power_mode()
4645 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), in ufshcd_change_power_mode()
4647 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), in ufshcd_change_power_mode()
4650 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), in ufshcd_change_power_mode()
4652 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), in ufshcd_change_power_mode()
4654 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), in ufshcd_change_power_mode()
4658 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 in ufshcd_change_power_mode()
4662 dev_err(hba->dev, in ufshcd_change_power_mode()
4665 memcpy(&hba->pwr_info, pwr_mode, in ufshcd_change_power_mode()
4674 * @hba: per-adapter instance
4679 int ufshcd_config_pwr_mode(struct ufs_hba *hba, in ufshcd_config_pwr_mode() argument
4685 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, in ufshcd_config_pwr_mode()
4691 ret = ufshcd_change_power_mode(hba, &final_params); in ufshcd_config_pwr_mode()
4694 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, in ufshcd_config_pwr_mode()
4703 * @hba: per-adapter instance
4709 static int ufshcd_complete_dev_init(struct ufs_hba *hba) in ufshcd_complete_dev_init() argument
4715 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, in ufshcd_complete_dev_init()
4718 dev_err(hba->dev, in ufshcd_complete_dev_init()
4727 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, in ufshcd_complete_dev_init()
4735 dev_err(hba->dev, in ufshcd_complete_dev_init()
4739 dev_err(hba->dev, in ufshcd_complete_dev_init()
4750 * @hba: per adapter instance
4760 int ufshcd_make_hba_operational(struct ufs_hba *hba) in ufshcd_make_hba_operational() argument
4766 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); in ufshcd_make_hba_operational()
4769 if (ufshcd_is_intr_aggr_allowed(hba)) in ufshcd_make_hba_operational()
4770 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); in ufshcd_make_hba_operational()
4772 ufshcd_disable_intr_aggr(hba); in ufshcd_make_hba_operational()
4775 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), in ufshcd_make_hba_operational()
4777 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), in ufshcd_make_hba_operational()
4779 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), in ufshcd_make_hba_operational()
4781 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), in ufshcd_make_hba_operational()
4787 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); in ufshcd_make_hba_operational()
4789 ufshcd_enable_run_stop_reg(hba); in ufshcd_make_hba_operational()
4791 dev_err(hba->dev, in ufshcd_make_hba_operational()
4802 * @hba: per adapter instance
4804 void ufshcd_hba_stop(struct ufs_hba *hba) in ufshcd_hba_stop() argument
4808 ufshcd_disable_irq(hba); in ufshcd_hba_stop()
4809 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); in ufshcd_hba_stop()
4810 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, in ufshcd_hba_stop()
4813 ufshcd_enable_irq(hba); in ufshcd_hba_stop()
4815 dev_err(hba->dev, "%s: Controller disable failed\n", __func__); in ufshcd_hba_stop()
4821 * @hba: per adapter instance
4829 static int ufshcd_hba_execute_hce(struct ufs_hba *hba) in ufshcd_hba_execute_hce() argument
4834 if (ufshcd_is_hba_active(hba)) in ufshcd_hba_execute_hce()
4836 ufshcd_hba_stop(hba); in ufshcd_hba_execute_hce()
4839 ufshcd_set_link_off(hba); in ufshcd_hba_execute_hce()
4841 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); in ufshcd_hba_execute_hce()
4844 ufshcd_hba_start(hba); in ufshcd_hba_execute_hce()
4856 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); in ufshcd_hba_execute_hce()
4859 if (!ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, CONTROLLER_ENABLE, in ufshcd_hba_execute_hce()
4863 dev_err(hba->dev, "Enabling the controller failed\n"); in ufshcd_hba_execute_hce()
4870 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); in ufshcd_hba_execute_hce()
4872 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); in ufshcd_hba_execute_hce()
4877 int ufshcd_hba_enable(struct ufs_hba *hba) in ufshcd_hba_enable() argument
4881 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) { in ufshcd_hba_enable()
4882 ufshcd_set_link_off(hba); in ufshcd_hba_enable()
4883 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); in ufshcd_hba_enable()
4886 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); in ufshcd_hba_enable()
4887 ret = ufshcd_dme_reset(hba); in ufshcd_hba_enable()
4889 dev_err(hba->dev, "DME_RESET failed\n"); in ufshcd_hba_enable()
4893 ret = ufshcd_dme_enable(hba); in ufshcd_hba_enable()
4895 dev_err(hba->dev, "Enabling DME failed\n"); in ufshcd_hba_enable()
4899 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); in ufshcd_hba_enable()
4901 ret = ufshcd_hba_execute_hce(hba); in ufshcd_hba_enable()
4908 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) in ufshcd_disable_tx_lcc() argument
4913 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_disable_tx_lcc()
4916 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_disable_tx_lcc()
4920 err = ufshcd_dme_set(hba, in ufshcd_disable_tx_lcc()
4925 err = ufshcd_dme_peer_set(hba, in ufshcd_disable_tx_lcc()
4930 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", in ufshcd_disable_tx_lcc()
4939 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) in ufshcd_disable_device_tx_lcc() argument
4941 return ufshcd_disable_tx_lcc(hba, true); in ufshcd_disable_device_tx_lcc()
4944 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val) in ufshcd_update_evt_hist() argument
4951 e = &hba->ufs_stats.event[id]; in ufshcd_update_evt_hist()
4957 ufshcd_vops_event_notify(hba, id, &val); in ufshcd_update_evt_hist()
4963 * @hba: per adapter instance
4967 static int ufshcd_link_startup(struct ufs_hba *hba) in ufshcd_link_startup() argument
4977 if (!ufshcd_is_ufs_dev_active(hba)) in ufshcd_link_startup()
4982 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); in ufshcd_link_startup()
4984 ret = ufshcd_dme_link_startup(hba); in ufshcd_link_startup()
4987 if (!ret && !ufshcd_is_device_present(hba)) { in ufshcd_link_startup()
4988 ufshcd_update_evt_hist(hba, in ufshcd_link_startup()
4991 dev_err(hba->dev, "%s: Device not present\n", __func__); in ufshcd_link_startup()
5001 if (ret && retries && ufshcd_hba_enable(hba)) { in ufshcd_link_startup()
5002 ufshcd_update_evt_hist(hba, in ufshcd_link_startup()
5011 ufshcd_update_evt_hist(hba, in ufshcd_link_startup()
5024 ufshcd_init_pwr_info(hba); in ufshcd_link_startup()
5025 ufshcd_print_pwr_info(hba); in ufshcd_link_startup()
5027 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { in ufshcd_link_startup()
5028 ret = ufshcd_disable_device_tx_lcc(hba); in ufshcd_link_startup()
5034 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE); in ufshcd_link_startup()
5039 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); in ufshcd_link_startup()
5040 ret = ufshcd_make_hba_operational(hba); in ufshcd_link_startup()
5043 dev_err(hba->dev, "link startup failed %d\n", ret); in ufshcd_link_startup()
5044 ufshcd_print_host_state(hba); in ufshcd_link_startup()
5045 ufshcd_print_pwr_info(hba); in ufshcd_link_startup()
5046 ufshcd_print_evt_hist(hba); in ufshcd_link_startup()
5053 * @hba: per-adapter instance
5063 static int ufshcd_verify_dev_init(struct ufs_hba *hba) in ufshcd_verify_dev_init() argument
5068 ufshcd_dev_man_lock(hba); in ufshcd_verify_dev_init()
5071 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, in ufshcd_verify_dev_init()
5072 hba->nop_out_timeout); in ufshcd_verify_dev_init()
5077 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); in ufshcd_verify_dev_init()
5080 ufshcd_dev_man_unlock(hba); in ufshcd_verify_dev_init()
5083 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); in ufshcd_verify_dev_init()
5090 * @hba: pointer to ufs hba
5092 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev) in ufshcd_setup_links() argument
5100 if (hba->ufs_device_wlun) { in ufshcd_setup_links()
5102 &hba->ufs_device_wlun->sdev_gendev, in ufshcd_setup_links()
5106 dev_name(&hba->ufs_device_wlun->sdev_gendev)); in ufshcd_setup_links()
5109 hba->luns_avail--; in ufshcd_setup_links()
5111 if (hba->luns_avail == 1) { in ufshcd_setup_links()
5112 ufshcd_rpm_put(hba); in ufshcd_setup_links()
5120 hba->luns_avail--; in ufshcd_setup_links()
5126 * @hba: per-adapter instance
5129 static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev) in ufshcd_lu_init() argument
5133 u8 lun_qdepth = hba->nutrs; in ufshcd_lu_init()
5141 ret = ufshcd_read_unit_desc_param(hba, lun, 0, desc_buf, len); in ufshcd_lu_init()
5155 lun_qdepth = min_t(int, desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH], hba->nutrs); in ufshcd_lu_init()
5161 if (hba->dev_info.f_power_on_wp_en && lun < hba->dev_info.max_lu_supported && in ufshcd_lu_init()
5162 !hba->dev_info.is_lu_power_on_wp && in ufshcd_lu_init()
5164 hba->dev_info.is_lu_power_on_wp = true; in ufshcd_lu_init()
5169 hba->dev_info.b_advanced_rpmb_en = true; in ufshcd_lu_init()
5178 dev_dbg(hba->dev, "Set LU %x queue depth %d\n", lun, lun_qdepth); in ufshcd_lu_init()
5190 struct ufs_hba *hba; in ufshcd_sdev_init() local
5192 hba = shost_priv(sdev->host); in ufshcd_sdev_init()
5209 ufshcd_lu_init(hba, sdev); in ufshcd_sdev_init()
5211 ufshcd_setup_links(hba, sdev); in ufshcd_sdev_init()
5240 struct ufs_hba *hba = shost_priv(sdev->host); in ufshcd_sdev_configure() local
5251 else if (ufshcd_is_rpm_autosuspend_allowed(hba)) in ufshcd_sdev_configure()
5260 if (hba->vops && hba->vops->config_scsi_dev) in ufshcd_sdev_configure()
5261 hba->vops->config_scsi_dev(sdev); in ufshcd_sdev_configure()
5263 ufshcd_crypto_register(hba, q); in ufshcd_sdev_configure()
5274 struct ufs_hba *hba; in ufshcd_sdev_destroy() local
5277 hba = shost_priv(sdev->host); in ufshcd_sdev_destroy()
5281 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_sdev_destroy()
5282 hba->ufs_device_wlun = NULL; in ufshcd_sdev_destroy()
5283 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_sdev_destroy()
5284 } else if (hba->ufs_device_wlun) { in ufshcd_sdev_destroy()
5288 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_sdev_destroy()
5289 if (hba->ufs_device_wlun) { in ufshcd_sdev_destroy()
5290 supplier = &hba->ufs_device_wlun->sdev_gendev; in ufshcd_sdev_destroy()
5293 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_sdev_destroy()
5342 * @hba: per adapter instance
5349 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, in ufshcd_transfer_rsp_status() argument
5370 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) { in ufshcd_transfer_rsp_status()
5378 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_transfer_rsp_status()
5400 if (!hba->pm_op_in_progress && in ufshcd_transfer_rsp_status()
5401 !ufshcd_eh_in_progress(hba) && in ufshcd_transfer_rsp_status()
5404 schedule_work(&hba->eeh_work); in ufshcd_transfer_rsp_status()
5409 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5413 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5423 dev_warn(hba->dev, in ufshcd_transfer_rsp_status()
5439 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5442 ufshcd_print_evt_hist(hba); in ufshcd_transfer_rsp_status()
5443 ufshcd_print_host_state(hba); in ufshcd_transfer_rsp_status()
5448 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs) in ufshcd_transfer_rsp_status()
5449 ufshcd_print_tr(hba, lrbp->task_tag, true); in ufshcd_transfer_rsp_status()
5453 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, in ufshcd_is_auto_hibern8_error() argument
5456 if (!ufshcd_is_auto_hibern8_supported(hba) || in ufshcd_is_auto_hibern8_error()
5457 !ufshcd_is_auto_hibern8_enabled(hba)) in ufshcd_is_auto_hibern8_error()
5463 if (hba->active_uic_cmd && in ufshcd_is_auto_hibern8_error()
5464 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER || in ufshcd_is_auto_hibern8_error()
5465 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT)) in ufshcd_is_auto_hibern8_error()
5473 * @hba: per adapter instance
5480 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) in ufshcd_uic_cmd_compl() argument
5485 spin_lock(hba->host->host_lock); in ufshcd_uic_cmd_compl()
5486 cmd = hba->active_uic_cmd; in ufshcd_uic_cmd_compl()
5490 if (ufshcd_is_auto_hibern8_error(hba, intr_status)) in ufshcd_uic_cmd_compl()
5491 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); in ufshcd_uic_cmd_compl()
5494 cmd->argument2 |= ufshcd_get_uic_cmd_result(hba); in ufshcd_uic_cmd_compl()
5495 cmd->argument3 = ufshcd_get_dme_attr_val(hba); in ufshcd_uic_cmd_compl()
5496 if (!hba->uic_async_done) in ufshcd_uic_cmd_compl()
5502 if (intr_status & UFSHCD_UIC_PWR_MASK && hba->uic_async_done) { in ufshcd_uic_cmd_compl()
5504 complete(hba->uic_async_done); in ufshcd_uic_cmd_compl()
5509 ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP); in ufshcd_uic_cmd_compl()
5512 spin_unlock(hba->host->host_lock); in ufshcd_uic_cmd_compl()
5518 void ufshcd_release_scsi_cmd(struct ufs_hba *hba, in ufshcd_release_scsi_cmd() argument
5524 ufshcd_crypto_clear_prdt(hba, lrbp); in ufshcd_release_scsi_cmd()
5525 ufshcd_release(hba); in ufshcd_release_scsi_cmd()
5526 ufshcd_clk_scaling_update_busy(hba); in ufshcd_release_scsi_cmd()
5531 * @hba: per adapter instance
5535 void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, in ufshcd_compl_one_cqe() argument
5542 lrbp = &hba->lrb[task_tag]; in ufshcd_compl_one_cqe()
5547 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) in ufshcd_compl_one_cqe()
5548 ufshcd_update_monitor(hba, lrbp); in ufshcd_compl_one_cqe()
5549 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP); in ufshcd_compl_one_cqe()
5550 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe); in ufshcd_compl_one_cqe()
5551 ufshcd_release_scsi_cmd(hba, lrbp); in ufshcd_compl_one_cqe()
5554 } else if (hba->dev_cmd.complete) { in ufshcd_compl_one_cqe()
5559 complete(hba->dev_cmd.complete); in ufshcd_compl_one_cqe()
5565 * @hba: per adapter instance
5568 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, in __ufshcd_transfer_req_compl() argument
5573 for_each_set_bit(tag, &completed_reqs, hba->nutrs) in __ufshcd_transfer_req_compl()
5574 ufshcd_compl_one_cqe(hba, tag, NULL); in __ufshcd_transfer_req_compl()
5582 static void ufshcd_clear_polled(struct ufs_hba *hba, in ufshcd_clear_polled() argument
5587 for_each_set_bit(tag, completed_reqs, hba->nutrs) { in ufshcd_clear_polled()
5588 struct scsi_cmnd *cmd = hba->lrb[tag].cmd; in ufshcd_clear_polled()
5603 struct ufs_hba *hba = shost_priv(shost); in ufshcd_poll() local
5608 if (hba->mcq_enabled) { in ufshcd_poll()
5609 hwq = &hba->uhq[queue_num]; in ufshcd_poll()
5611 return ufshcd_mcq_poll_cqe_lock(hba, hwq); in ufshcd_poll()
5614 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_poll()
5615 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_poll()
5616 completed_reqs = ~tr_doorbell & hba->outstanding_reqs; in ufshcd_poll()
5617 WARN_ONCE(completed_reqs & ~hba->outstanding_reqs, in ufshcd_poll()
5619 hba->outstanding_reqs); in ufshcd_poll()
5622 ufshcd_clear_polled(hba, &completed_reqs); in ufshcd_poll()
5624 hba->outstanding_reqs &= ~completed_reqs; in ufshcd_poll()
5625 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_poll()
5628 __ufshcd_transfer_req_compl(hba, completed_reqs); in ufshcd_poll()
5639 * @hba: per adapter instance
5644 static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba, in ufshcd_mcq_compl_pending_transfer() argument
5653 for (tag = 0; tag < hba->nutrs; tag++) { in ufshcd_mcq_compl_pending_transfer()
5654 lrbp = &hba->lrb[tag]; in ufshcd_mcq_compl_pending_transfer()
5660 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); in ufshcd_mcq_compl_pending_transfer()
5663 ufshcd_mcq_compl_all_cqes_lock(hba, hwq); in ufshcd_mcq_compl_pending_transfer()
5671 ufshcd_release_scsi_cmd(hba, lrbp); in ufshcd_mcq_compl_pending_transfer()
5676 ufshcd_mcq_poll_cqe_lock(hba, hwq); in ufshcd_mcq_compl_pending_transfer()
5683 * @hba: per adapter instance
5689 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) in ufshcd_transfer_req_compl() argument
5698 if (ufshcd_is_intr_aggr_allowed(hba) && in ufshcd_transfer_req_compl()
5699 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) in ufshcd_transfer_req_compl()
5700 ufshcd_reset_intr_aggr(hba); in ufshcd_transfer_req_compl()
5702 if (ufs_fail_completion(hba)) in ufshcd_transfer_req_compl()
5709 ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT); in ufshcd_transfer_req_compl()
5714 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask) in __ufshcd_write_ee_control() argument
5716 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in __ufshcd_write_ee_control()
5721 int ufshcd_write_ee_control(struct ufs_hba *hba) in ufshcd_write_ee_control() argument
5725 mutex_lock(&hba->ee_ctrl_mutex); in ufshcd_write_ee_control()
5726 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask); in ufshcd_write_ee_control()
5727 mutex_unlock(&hba->ee_ctrl_mutex); in ufshcd_write_ee_control()
5729 dev_err(hba->dev, "%s: failed to write ee control %d\n", in ufshcd_write_ee_control()
5734 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, in ufshcd_update_ee_control() argument
5740 mutex_lock(&hba->ee_ctrl_mutex); in ufshcd_update_ee_control()
5743 if (ee_ctrl_mask != hba->ee_ctrl_mask) in ufshcd_update_ee_control()
5744 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask); in ufshcd_update_ee_control()
5747 hba->ee_ctrl_mask = ee_ctrl_mask; in ufshcd_update_ee_control()
5750 mutex_unlock(&hba->ee_ctrl_mutex); in ufshcd_update_ee_control()
5756 * @hba: per-adapter instance
5764 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) in ufshcd_disable_ee() argument
5766 return ufshcd_update_ee_drv_mask(hba, 0, mask); in ufshcd_disable_ee()
5771 * @hba: per-adapter instance
5779 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) in ufshcd_enable_ee() argument
5781 return ufshcd_update_ee_drv_mask(hba, mask, 0); in ufshcd_enable_ee()
5786 * @hba: per-adapter instance
5795 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) in ufshcd_enable_auto_bkops() argument
5799 if (hba->auto_bkops_enabled) in ufshcd_enable_auto_bkops()
5802 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, in ufshcd_enable_auto_bkops()
5805 dev_err(hba->dev, "%s: failed to enable bkops %d\n", in ufshcd_enable_auto_bkops()
5810 hba->auto_bkops_enabled = true; in ufshcd_enable_auto_bkops()
5811 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled"); in ufshcd_enable_auto_bkops()
5814 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_enable_auto_bkops()
5816 dev_err(hba->dev, "%s: failed to disable exception event %d\n", in ufshcd_enable_auto_bkops()
5824 * @hba: per-adapter instance
5834 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) in ufshcd_disable_auto_bkops() argument
5838 if (!hba->auto_bkops_enabled) in ufshcd_disable_auto_bkops()
5845 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_disable_auto_bkops()
5847 dev_err(hba->dev, "%s: failed to enable exception event %d\n", in ufshcd_disable_auto_bkops()
5852 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, in ufshcd_disable_auto_bkops()
5855 dev_err(hba->dev, "%s: failed to disable bkops %d\n", in ufshcd_disable_auto_bkops()
5857 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_disable_auto_bkops()
5861 hba->auto_bkops_enabled = false; in ufshcd_disable_auto_bkops()
5862 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); in ufshcd_disable_auto_bkops()
5863 hba->is_urgent_bkops_lvl_checked = false; in ufshcd_disable_auto_bkops()
5870 * @hba: per adapter instance
5877 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) in ufshcd_force_reset_auto_bkops() argument
5879 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) { in ufshcd_force_reset_auto_bkops()
5880 hba->auto_bkops_enabled = false; in ufshcd_force_reset_auto_bkops()
5881 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; in ufshcd_force_reset_auto_bkops()
5882 ufshcd_enable_auto_bkops(hba); in ufshcd_force_reset_auto_bkops()
5884 hba->auto_bkops_enabled = true; in ufshcd_force_reset_auto_bkops()
5885 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; in ufshcd_force_reset_auto_bkops()
5886 ufshcd_disable_auto_bkops(hba); in ufshcd_force_reset_auto_bkops()
5888 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; in ufshcd_force_reset_auto_bkops()
5889 hba->is_urgent_bkops_lvl_checked = false; in ufshcd_force_reset_auto_bkops()
5892 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) in ufshcd_get_bkops_status() argument
5894 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_bkops_status()
5900 * @hba: per-adapter instance
5904 * bkops_status is greater than or equal to the "hba->urgent_bkops_lvl",
5909 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5913 static int ufshcd_bkops_ctrl(struct ufs_hba *hba) in ufshcd_bkops_ctrl() argument
5915 enum bkops_status status = hba->urgent_bkops_lvl; in ufshcd_bkops_ctrl()
5919 err = ufshcd_get_bkops_status(hba, &curr_status); in ufshcd_bkops_ctrl()
5921 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", in ufshcd_bkops_ctrl()
5925 dev_err(hba->dev, "%s: invalid BKOPS status %d\n", in ufshcd_bkops_ctrl()
5932 err = ufshcd_enable_auto_bkops(hba); in ufshcd_bkops_ctrl()
5934 err = ufshcd_disable_auto_bkops(hba); in ufshcd_bkops_ctrl()
5939 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) in ufshcd_get_ee_status() argument
5941 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_ee_status()
5945 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) in ufshcd_bkops_exception_event_handler() argument
5950 if (hba->is_urgent_bkops_lvl_checked) in ufshcd_bkops_exception_event_handler()
5953 err = ufshcd_get_bkops_status(hba, &curr_status); in ufshcd_bkops_exception_event_handler()
5955 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", in ufshcd_bkops_exception_event_handler()
5967 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", in ufshcd_bkops_exception_event_handler()
5970 hba->urgent_bkops_lvl = curr_status; in ufshcd_bkops_exception_event_handler()
5971 hba->is_urgent_bkops_lvl_checked = true; in ufshcd_bkops_exception_event_handler()
5975 err = ufshcd_enable_auto_bkops(hba); in ufshcd_bkops_exception_event_handler()
5978 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", in ufshcd_bkops_exception_event_handler()
5982 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn) in __ufshcd_wb_toggle() argument
5988 index = ufshcd_wb_get_query_index(hba); in __ufshcd_wb_toggle()
5989 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL); in __ufshcd_wb_toggle()
5992 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable) in ufshcd_wb_toggle() argument
5996 if (!ufshcd_is_wb_allowed(hba) || in ufshcd_wb_toggle()
5997 hba->dev_info.wb_enabled == enable) in ufshcd_wb_toggle()
6000 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN); in ufshcd_wb_toggle()
6002 dev_err(hba->dev, "%s: Write Booster %s failed %d\n", in ufshcd_wb_toggle()
6007 hba->dev_info.wb_enabled = enable; in ufshcd_wb_toggle()
6008 dev_dbg(hba->dev, "%s: Write Booster %s\n", in ufshcd_wb_toggle()
6014 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba, in ufshcd_wb_toggle_buf_flush_during_h8() argument
6019 ret = __ufshcd_wb_toggle(hba, enable, in ufshcd_wb_toggle_buf_flush_during_h8()
6022 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n", in ufshcd_wb_toggle_buf_flush_during_h8()
6026 dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n", in ufshcd_wb_toggle_buf_flush_during_h8()
6030 int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable) in ufshcd_wb_toggle_buf_flush() argument
6034 if (!ufshcd_is_wb_allowed(hba) || in ufshcd_wb_toggle_buf_flush()
6035 hba->dev_info.wb_buf_flush_enabled == enable) in ufshcd_wb_toggle_buf_flush()
6038 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN); in ufshcd_wb_toggle_buf_flush()
6040 dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n", in ufshcd_wb_toggle_buf_flush()
6045 hba->dev_info.wb_buf_flush_enabled = enable; in ufshcd_wb_toggle_buf_flush()
6046 dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n", in ufshcd_wb_toggle_buf_flush()
6052 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, in ufshcd_wb_presrv_usrspc_keep_vcc_on() argument
6059 index = ufshcd_wb_get_query_index(hba); in ufshcd_wb_presrv_usrspc_keep_vcc_on()
6060 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_wb_presrv_usrspc_keep_vcc_on()
6064 dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n", in ufshcd_wb_presrv_usrspc_keep_vcc_on()
6070 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n", in ufshcd_wb_presrv_usrspc_keep_vcc_on()
6075 return avail_buf < hba->vps->wb_flush_threshold; in ufshcd_wb_presrv_usrspc_keep_vcc_on()
6078 static void ufshcd_wb_force_disable(struct ufs_hba *hba) in ufshcd_wb_force_disable() argument
6080 if (ufshcd_is_wb_buf_flush_allowed(hba)) in ufshcd_wb_force_disable()
6081 ufshcd_wb_toggle_buf_flush(hba, false); in ufshcd_wb_force_disable()
6083 ufshcd_wb_toggle_buf_flush_during_h8(hba, false); in ufshcd_wb_force_disable()
6084 ufshcd_wb_toggle(hba, false); in ufshcd_wb_force_disable()
6085 hba->caps &= ~UFSHCD_CAP_WB_EN; in ufshcd_wb_force_disable()
6087 dev_info(hba->dev, "%s: WB force disabled\n", __func__); in ufshcd_wb_force_disable()
6090 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba) in ufshcd_is_wb_buf_lifetime_available() argument
6096 index = ufshcd_wb_get_query_index(hba); in ufshcd_is_wb_buf_lifetime_available()
6097 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_is_wb_buf_lifetime_available()
6101 dev_err(hba->dev, in ufshcd_is_wb_buf_lifetime_available()
6108 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n", in ufshcd_is_wb_buf_lifetime_available()
6113 dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n", in ufshcd_is_wb_buf_lifetime_available()
6119 static bool ufshcd_wb_need_flush(struct ufs_hba *hba) in ufshcd_wb_need_flush() argument
6125 if (!ufshcd_is_wb_allowed(hba)) in ufshcd_wb_need_flush()
6128 if (!ufshcd_is_wb_buf_lifetime_available(hba)) { in ufshcd_wb_need_flush()
6129 ufshcd_wb_force_disable(hba); in ufshcd_wb_need_flush()
6144 index = ufshcd_wb_get_query_index(hba); in ufshcd_wb_need_flush()
6145 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_wb_need_flush()
6149 dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n", in ufshcd_wb_need_flush()
6154 if (!hba->dev_info.b_presrv_uspc_en) in ufshcd_wb_need_flush()
6157 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf); in ufshcd_wb_need_flush()
6162 struct ufs_hba *hba = container_of(to_delayed_work(work), in ufshcd_rpm_dev_flush_recheck_work() local
6171 ufshcd_rpm_get_sync(hba); in ufshcd_rpm_dev_flush_recheck_work()
6172 ufshcd_rpm_put_sync(hba); in ufshcd_rpm_dev_flush_recheck_work()
6184 struct ufs_hba *hba; in ufshcd_exception_event_handler() local
6187 hba = container_of(work, struct ufs_hba, eeh_work); in ufshcd_exception_event_handler()
6189 err = ufshcd_get_ee_status(hba, &status); in ufshcd_exception_event_handler()
6191 dev_err(hba->dev, "%s: failed to get exception status %d\n", in ufshcd_exception_event_handler()
6196 trace_ufshcd_exception_event(dev_name(hba->dev), status); in ufshcd_exception_event_handler()
6198 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS) in ufshcd_exception_event_handler()
6199 ufshcd_bkops_exception_event_handler(hba); in ufshcd_exception_event_handler()
6201 if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP) in ufshcd_exception_event_handler()
6202 ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP); in ufshcd_exception_event_handler()
6204 ufs_debugfs_exception_event(hba, status); in ufshcd_exception_event_handler()
6208 static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl) in ufshcd_complete_requests() argument
6210 if (hba->mcq_enabled) in ufshcd_complete_requests()
6211 ufshcd_mcq_compl_pending_transfer(hba, force_compl); in ufshcd_complete_requests()
6213 ufshcd_transfer_req_compl(hba); in ufshcd_complete_requests()
6215 ufshcd_tmc_handler(hba); in ufshcd_complete_requests()
6221 * @hba: per-adapter instance
6225 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) in ufshcd_quirk_dl_nac_errors() argument
6230 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
6235 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) in ufshcd_quirk_dl_nac_errors()
6238 if ((hba->saved_err & DEVICE_FATAL_ERROR) || in ufshcd_quirk_dl_nac_errors()
6239 ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
6240 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) in ufshcd_quirk_dl_nac_errors()
6243 if ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
6244 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { in ufshcd_quirk_dl_nac_errors()
6249 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
6251 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
6257 if ((hba->saved_err & INT_FATAL_ERRORS) || in ufshcd_quirk_dl_nac_errors()
6258 ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
6259 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) in ufshcd_quirk_dl_nac_errors()
6269 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
6270 err = ufshcd_verify_dev_init(hba); in ufshcd_quirk_dl_nac_errors()
6271 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
6277 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) in ufshcd_quirk_dl_nac_errors()
6278 hba->saved_err &= ~UIC_ERROR; in ufshcd_quirk_dl_nac_errors()
6280 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; in ufshcd_quirk_dl_nac_errors()
6281 if (!hba->saved_uic_err) in ufshcd_quirk_dl_nac_errors()
6285 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
6290 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) in ufshcd_is_saved_err_fatal() argument
6292 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || in ufshcd_is_saved_err_fatal()
6293 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); in ufshcd_is_saved_err_fatal()
6296 void ufshcd_schedule_eh_work(struct ufs_hba *hba) in ufshcd_schedule_eh_work() argument
6298 lockdep_assert_held(hba->host->host_lock); in ufshcd_schedule_eh_work()
6301 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { in ufshcd_schedule_eh_work()
6302 if (hba->force_reset || ufshcd_is_link_broken(hba) || in ufshcd_schedule_eh_work()
6303 ufshcd_is_saved_err_fatal(hba)) in ufshcd_schedule_eh_work()
6304 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; in ufshcd_schedule_eh_work()
6306 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; in ufshcd_schedule_eh_work()
6307 queue_work(hba->eh_wq, &hba->eh_work); in ufshcd_schedule_eh_work()
6311 static void ufshcd_force_error_recovery(struct ufs_hba *hba) in ufshcd_force_error_recovery() argument
6313 spin_lock_irq(hba->host->host_lock); in ufshcd_force_error_recovery()
6314 hba->force_reset = true; in ufshcd_force_error_recovery()
6315 ufshcd_schedule_eh_work(hba); in ufshcd_force_error_recovery()
6316 spin_unlock_irq(hba->host->host_lock); in ufshcd_force_error_recovery()
6319 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) in ufshcd_clk_scaling_allow() argument
6321 mutex_lock(&hba->wb_mutex); in ufshcd_clk_scaling_allow()
6322 down_write(&hba->clk_scaling_lock); in ufshcd_clk_scaling_allow()
6323 hba->clk_scaling.is_allowed = allow; in ufshcd_clk_scaling_allow()
6324 up_write(&hba->clk_scaling_lock); in ufshcd_clk_scaling_allow()
6325 mutex_unlock(&hba->wb_mutex); in ufshcd_clk_scaling_allow()
6328 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend) in ufshcd_clk_scaling_suspend() argument
6331 if (hba->clk_scaling.is_enabled) in ufshcd_clk_scaling_suspend()
6332 ufshcd_suspend_clkscaling(hba); in ufshcd_clk_scaling_suspend()
6333 ufshcd_clk_scaling_allow(hba, false); in ufshcd_clk_scaling_suspend()
6335 ufshcd_clk_scaling_allow(hba, true); in ufshcd_clk_scaling_suspend()
6336 if (hba->clk_scaling.is_enabled) in ufshcd_clk_scaling_suspend()
6337 ufshcd_resume_clkscaling(hba); in ufshcd_clk_scaling_suspend()
6341 static void ufshcd_err_handling_prepare(struct ufs_hba *hba) in ufshcd_err_handling_prepare() argument
6343 ufshcd_rpm_get_sync(hba); in ufshcd_err_handling_prepare()
6344 if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) || in ufshcd_err_handling_prepare()
6345 hba->is_sys_suspended) { in ufshcd_err_handling_prepare()
6353 ufshcd_setup_hba_vreg(hba, true); in ufshcd_err_handling_prepare()
6354 ufshcd_enable_irq(hba); in ufshcd_err_handling_prepare()
6355 ufshcd_setup_vreg(hba, true); in ufshcd_err_handling_prepare()
6356 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); in ufshcd_err_handling_prepare()
6357 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); in ufshcd_err_handling_prepare()
6358 ufshcd_hold(hba); in ufshcd_err_handling_prepare()
6359 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_err_handling_prepare()
6360 ufshcd_setup_clocks(hba, true); in ufshcd_err_handling_prepare()
6361 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; in ufshcd_err_handling_prepare()
6362 ufshcd_vops_resume(hba, pm_op); in ufshcd_err_handling_prepare()
6364 ufshcd_hold(hba); in ufshcd_err_handling_prepare()
6365 if (ufshcd_is_clkscaling_supported(hba) && in ufshcd_err_handling_prepare()
6366 hba->clk_scaling.is_enabled) in ufshcd_err_handling_prepare()
6367 ufshcd_suspend_clkscaling(hba); in ufshcd_err_handling_prepare()
6368 ufshcd_clk_scaling_allow(hba, false); in ufshcd_err_handling_prepare()
6371 blk_mq_quiesce_tagset(&hba->host->tag_set); in ufshcd_err_handling_prepare()
6372 cancel_work_sync(&hba->eeh_work); in ufshcd_err_handling_prepare()
6375 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) in ufshcd_err_handling_unprepare() argument
6377 blk_mq_unquiesce_tagset(&hba->host->tag_set); in ufshcd_err_handling_unprepare()
6378 ufshcd_release(hba); in ufshcd_err_handling_unprepare()
6379 if (ufshcd_is_clkscaling_supported(hba)) in ufshcd_err_handling_unprepare()
6380 ufshcd_clk_scaling_suspend(hba, false); in ufshcd_err_handling_unprepare()
6381 ufshcd_rpm_put(hba); in ufshcd_err_handling_unprepare()
6384 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) in ufshcd_err_handling_should_stop() argument
6386 return (!hba->is_powered || hba->shutting_down || in ufshcd_err_handling_should_stop()
6387 !hba->ufs_device_wlun || in ufshcd_err_handling_should_stop()
6388 hba->ufshcd_state == UFSHCD_STATE_ERROR || in ufshcd_err_handling_should_stop()
6389 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset || in ufshcd_err_handling_should_stop()
6390 ufshcd_is_link_broken(hba)))); in ufshcd_err_handling_should_stop()
6394 static void ufshcd_recover_pm_error(struct ufs_hba *hba) in ufshcd_recover_pm_error() argument
6396 struct Scsi_Host *shost = hba->host; in ufshcd_recover_pm_error()
6401 hba->is_sys_suspended = false; in ufshcd_recover_pm_error()
6406 ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev); in ufshcd_recover_pm_error()
6408 /* hba device might have a runtime error otherwise */ in ufshcd_recover_pm_error()
6410 ret = pm_runtime_set_active(hba->dev); in ufshcd_recover_pm_error()
6427 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba) in ufshcd_recover_pm_error() argument
6432 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba) in ufshcd_is_pwr_mode_restore_needed() argument
6434 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info; in ufshcd_is_pwr_mode_restore_needed()
6437 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode); in ufshcd_is_pwr_mode_restore_needed()
6455 struct ufs_hba *hba = shost_priv(shost); in ufshcd_abort_one() local
6457 *ret = ufshcd_try_to_abort_task(hba, tag); in ufshcd_abort_one()
6458 dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag, in ufshcd_abort_one()
6459 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1, in ufshcd_abort_one()
6467 * @hba: Host bus adapter pointer.
6471 static bool ufshcd_abort_all(struct ufs_hba *hba) in ufshcd_abort_all() argument
6475 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_abort_one, &ret); in ufshcd_abort_all()
6480 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { in ufshcd_abort_all()
6481 ret = ufshcd_clear_tm_cmd(hba, tag); in ufshcd_abort_all()
6488 ufshcd_complete_requests(hba, false); in ufshcd_abort_all()
6500 struct ufs_hba *hba; in ufshcd_err_handler() local
6506 hba = container_of(work, struct ufs_hba, eh_work); in ufshcd_err_handler()
6508 dev_info(hba->dev, in ufshcd_err_handler()
6509 …"%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force… in ufshcd_err_handler()
6510 __func__, ufshcd_state_name[hba->ufshcd_state], in ufshcd_err_handler()
6511 hba->is_powered, hba->shutting_down, hba->saved_err, in ufshcd_err_handler()
6512 hba->saved_uic_err, hba->force_reset, in ufshcd_err_handler()
6513 ufshcd_is_link_broken(hba) ? "; link is broken" : ""); in ufshcd_err_handler()
6515 down(&hba->host_sem); in ufshcd_err_handler()
6516 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6517 if (ufshcd_err_handling_should_stop(hba)) { in ufshcd_err_handler()
6518 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) in ufshcd_err_handler()
6519 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_err_handler()
6520 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6521 up(&hba->host_sem); in ufshcd_err_handler()
6524 ufshcd_set_eh_in_progress(hba); in ufshcd_err_handler()
6525 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6526 ufshcd_err_handling_prepare(hba); in ufshcd_err_handler()
6528 ufshcd_complete_requests(hba, false); in ufshcd_err_handler()
6529 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6534 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) in ufshcd_err_handler()
6535 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_err_handler()
6540 if (ufshcd_err_handling_should_stop(hba)) in ufshcd_err_handler()
6543 if ((hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) && in ufshcd_err_handler()
6544 !hba->force_reset) { in ufshcd_err_handler()
6547 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6549 ret = ufshcd_quirk_dl_nac_errors(hba); in ufshcd_err_handler()
6550 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6551 if (!ret && ufshcd_err_handling_should_stop(hba)) in ufshcd_err_handler()
6555 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || in ufshcd_err_handler()
6556 (hba->saved_uic_err && in ufshcd_err_handler()
6557 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { in ufshcd_err_handler()
6558 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR); in ufshcd_err_handler()
6560 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6561 ufshcd_print_host_state(hba); in ufshcd_err_handler()
6562 ufshcd_print_pwr_info(hba); in ufshcd_err_handler()
6563 ufshcd_print_evt_hist(hba); in ufshcd_err_handler()
6564 ufshcd_print_tmrs(hba, hba->outstanding_tasks); in ufshcd_err_handler()
6565 ufshcd_print_trs_all(hba, pr_prdt); in ufshcd_err_handler()
6566 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6574 if (hba->force_reset || ufshcd_is_link_broken(hba) || in ufshcd_err_handler()
6575 ufshcd_is_saved_err_fatal(hba) || in ufshcd_err_handler()
6576 ((hba->saved_err & UIC_ERROR) && in ufshcd_err_handler()
6577 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | in ufshcd_err_handler()
6587 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) { in ufshcd_err_handler()
6588 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_err_handler()
6589 if (!hba->saved_uic_err) in ufshcd_err_handler()
6590 hba->saved_err &= ~UIC_ERROR; in ufshcd_err_handler()
6591 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6592 if (ufshcd_is_pwr_mode_restore_needed(hba)) in ufshcd_err_handler()
6594 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6595 if (!hba->saved_err && !needs_restore) in ufshcd_err_handler()
6599 hba->silence_err_logs = true; in ufshcd_err_handler()
6601 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6603 needs_reset = ufshcd_abort_all(hba); in ufshcd_err_handler()
6605 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6606 hba->silence_err_logs = false; in ufshcd_err_handler()
6615 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6620 down_write(&hba->clk_scaling_lock); in ufshcd_err_handler()
6621 hba->force_pmc = true; in ufshcd_err_handler()
6622 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info)); in ufshcd_err_handler()
6625 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n", in ufshcd_err_handler()
6628 hba->force_pmc = false; in ufshcd_err_handler()
6629 ufshcd_print_pwr_info(hba); in ufshcd_err_handler()
6630 up_write(&hba->clk_scaling_lock); in ufshcd_err_handler()
6631 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6639 hba->force_reset = false; in ufshcd_err_handler()
6640 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6641 err = ufshcd_reset_and_restore(hba); in ufshcd_err_handler()
6643 dev_err(hba->dev, "%s: reset and restore failed with err %d\n", in ufshcd_err_handler()
6646 ufshcd_recover_pm_error(hba); in ufshcd_err_handler()
6647 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6652 if (hba->ufshcd_state == UFSHCD_STATE_RESET) in ufshcd_err_handler()
6653 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_err_handler()
6654 if (hba->saved_err || hba->saved_uic_err) in ufshcd_err_handler()
6655 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", in ufshcd_err_handler()
6656 __func__, hba->saved_err, hba->saved_uic_err); in ufshcd_err_handler()
6659 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && in ufshcd_err_handler()
6660 hba->ufshcd_state != UFSHCD_STATE_ERROR) { in ufshcd_err_handler()
6663 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_err_handler()
6665 ufshcd_clear_eh_in_progress(hba); in ufshcd_err_handler()
6666 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6667 ufshcd_err_handling_unprepare(hba); in ufshcd_err_handler()
6668 up(&hba->host_sem); in ufshcd_err_handler()
6670 dev_info(hba->dev, "%s finished; HBA state %s\n", __func__, in ufshcd_err_handler()
6671 ufshcd_state_name[hba->ufshcd_state]); in ufshcd_err_handler()
6676 * @hba: per-adapter instance
6682 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) in ufshcd_update_uic_error() argument
6688 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); in ufshcd_update_uic_error()
6691 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg); in ufshcd_update_uic_error()
6697 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", in ufshcd_update_uic_error()
6704 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_update_uic_error()
6705 if (hba->uic_async_done && hba->active_uic_cmd) in ufshcd_update_uic_error()
6706 cmd = hba->active_uic_cmd; in ufshcd_update_uic_error()
6712 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_update_uic_error()
6718 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); in ufshcd_update_uic_error()
6721 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg); in ufshcd_update_uic_error()
6724 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; in ufshcd_update_uic_error()
6725 else if (hba->dev_quirks & in ufshcd_update_uic_error()
6728 hba->uic_error |= in ufshcd_update_uic_error()
6731 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; in ufshcd_update_uic_error()
6737 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); in ufshcd_update_uic_error()
6740 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg); in ufshcd_update_uic_error()
6741 hba->uic_error |= UFSHCD_UIC_NL_ERROR; in ufshcd_update_uic_error()
6745 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); in ufshcd_update_uic_error()
6748 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg); in ufshcd_update_uic_error()
6749 hba->uic_error |= UFSHCD_UIC_TL_ERROR; in ufshcd_update_uic_error()
6753 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); in ufshcd_update_uic_error()
6756 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg); in ufshcd_update_uic_error()
6757 hba->uic_error |= UFSHCD_UIC_DME_ERROR; in ufshcd_update_uic_error()
6761 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", in ufshcd_update_uic_error()
6762 __func__, hba->uic_error); in ufshcd_update_uic_error()
6768 * @hba: per-adapter instance
6775 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) in ufshcd_check_errors() argument
6780 spin_lock(hba->host->host_lock); in ufshcd_check_errors()
6781 hba->errors |= UFSHCD_ERROR_MASK & intr_status; in ufshcd_check_errors()
6783 if (hba->errors & INT_FATAL_ERRORS) { in ufshcd_check_errors()
6784 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR, in ufshcd_check_errors()
6785 hba->errors); in ufshcd_check_errors()
6789 if (hba->errors & UIC_ERROR) { in ufshcd_check_errors()
6790 hba->uic_error = 0; in ufshcd_check_errors()
6791 retval = ufshcd_update_uic_error(hba); in ufshcd_check_errors()
6792 if (hba->uic_error) in ufshcd_check_errors()
6796 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) { in ufshcd_check_errors()
6797 dev_err(hba->dev, in ufshcd_check_errors()
6799 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ? in ufshcd_check_errors()
6801 hba->errors, ufshcd_get_upmcrs(hba)); in ufshcd_check_errors()
6802 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR, in ufshcd_check_errors()
6803 hba->errors); in ufshcd_check_errors()
6804 ufshcd_set_link_broken(hba); in ufshcd_check_errors()
6813 hba->saved_err |= hba->errors; in ufshcd_check_errors()
6814 hba->saved_uic_err |= hba->uic_error; in ufshcd_check_errors()
6817 if ((hba->saved_err & in ufshcd_check_errors()
6819 (hba->saved_uic_err && in ufshcd_check_errors()
6820 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { in ufshcd_check_errors()
6821 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n", in ufshcd_check_errors()
6822 __func__, hba->saved_err, in ufshcd_check_errors()
6823 hba->saved_uic_err); in ufshcd_check_errors()
6824 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, in ufshcd_check_errors()
6826 ufshcd_print_pwr_info(hba); in ufshcd_check_errors()
6828 ufshcd_schedule_eh_work(hba); in ufshcd_check_errors()
6837 hba->errors = 0; in ufshcd_check_errors()
6838 hba->uic_error = 0; in ufshcd_check_errors()
6839 spin_unlock(hba->host->host_lock); in ufshcd_check_errors()
6845 * @hba: per adapter instance
6851 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) in ufshcd_tmc_handler() argument
6857 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_tmc_handler()
6858 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); in ufshcd_tmc_handler()
6859 issued = hba->outstanding_tasks & ~pending; in ufshcd_tmc_handler()
6860 for_each_set_bit(tag, &issued, hba->nutmrs) { in ufshcd_tmc_handler()
6861 struct request *req = hba->tmf_rqs[tag]; in ufshcd_tmc_handler()
6867 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_tmc_handler()
6874 * @hba: per adapter instance
6878 static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba) in ufshcd_handle_mcq_cq_events() argument
6886 ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs); in ufshcd_handle_mcq_cq_events()
6888 outstanding_cqs = (1U << hba->nr_hw_queues) - 1; in ufshcd_handle_mcq_cq_events()
6891 nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; in ufshcd_handle_mcq_cq_events()
6893 hwq = &hba->uhq[i]; in ufshcd_handle_mcq_cq_events()
6895 events = ufshcd_mcq_read_cqis(hba, i); in ufshcd_handle_mcq_cq_events()
6897 ufshcd_mcq_write_cqis(hba, events, i); in ufshcd_handle_mcq_cq_events()
6900 ufshcd_mcq_poll_cqe_lock(hba, hwq); in ufshcd_handle_mcq_cq_events()
6908 * @hba: per adapter instance
6915 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) in ufshcd_sl_intr() argument
6920 retval |= ufshcd_uic_cmd_compl(hba, intr_status); in ufshcd_sl_intr()
6922 if (intr_status & UFSHCD_ERROR_MASK || hba->errors) in ufshcd_sl_intr()
6923 retval |= ufshcd_check_errors(hba, intr_status); in ufshcd_sl_intr()
6926 retval |= ufshcd_tmc_handler(hba); in ufshcd_sl_intr()
6929 retval |= ufshcd_transfer_req_compl(hba); in ufshcd_sl_intr()
6932 retval |= ufshcd_handle_mcq_cq_events(hba); in ufshcd_sl_intr()
6950 struct ufs_hba *hba = __hba; in ufshcd_intr() local
6951 int retries = hba->nutrs; in ufshcd_intr()
6953 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); in ufshcd_intr()
6954 hba->ufs_stats.last_intr_status = intr_status; in ufshcd_intr()
6955 hba->ufs_stats.last_intr_ts = local_clock(); in ufshcd_intr()
6958 * There could be max of hba->nutrs reqs in flight and in worst case in ufshcd_intr()
6965 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_intr()
6966 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); in ufshcd_intr()
6968 retval |= ufshcd_sl_intr(hba, enabled_intr_status); in ufshcd_intr()
6970 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); in ufshcd_intr()
6975 hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) { in ufshcd_intr()
6976 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", in ufshcd_intr()
6979 hba->ufs_stats.last_intr_status, in ufshcd_intr()
6981 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); in ufshcd_intr()
6987 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) in ufshcd_clear_tm_cmd() argument
6992 if (!test_bit(tag, &hba->outstanding_tasks)) in ufshcd_clear_tm_cmd()
6995 ufshcd_utmrl_clear(hba, tag); in ufshcd_clear_tm_cmd()
6998 err = ufshcd_wait_for_register(hba, in ufshcd_clear_tm_cmd()
7002 dev_err(hba->dev, "Clearing task management function with tag %d %s\n", in ufshcd_clear_tm_cmd()
7009 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, in __ufshcd_issue_tm_cmd() argument
7012 struct request_queue *q = hba->tmf_queue; in __ufshcd_issue_tm_cmd()
7013 struct Scsi_Host *host = hba->host; in __ufshcd_issue_tm_cmd()
7027 ufshcd_hold(hba); in __ufshcd_issue_tm_cmd()
7032 hba->tmf_rqs[req->tag] = req; in __ufshcd_issue_tm_cmd()
7035 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); in __ufshcd_issue_tm_cmd()
7036 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); in __ufshcd_issue_tm_cmd()
7038 __set_bit(task_tag, &hba->outstanding_tasks); in __ufshcd_issue_tm_cmd()
7043 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); in __ufshcd_issue_tm_cmd()
7045 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND); in __ufshcd_issue_tm_cmd()
7051 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR); in __ufshcd_issue_tm_cmd()
7052 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", in __ufshcd_issue_tm_cmd()
7054 if (ufshcd_clear_tm_cmd(hba, task_tag)) in __ufshcd_issue_tm_cmd()
7055 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", in __ufshcd_issue_tm_cmd()
7060 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); in __ufshcd_issue_tm_cmd()
7062 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP); in __ufshcd_issue_tm_cmd()
7065 spin_lock_irqsave(hba->host->host_lock, flags); in __ufshcd_issue_tm_cmd()
7066 hba->tmf_rqs[req->tag] = NULL; in __ufshcd_issue_tm_cmd()
7067 __clear_bit(task_tag, &hba->outstanding_tasks); in __ufshcd_issue_tm_cmd()
7068 spin_unlock_irqrestore(hba->host->host_lock, flags); in __ufshcd_issue_tm_cmd()
7070 ufshcd_release(hba); in __ufshcd_issue_tm_cmd()
7078 * @hba: per adapter instance
7086 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, in ufshcd_issue_tm_cmd() argument
7109 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function); in ufshcd_issue_tm_cmd()
7115 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", in ufshcd_issue_tm_cmd()
7125 * @hba: per-adapter instance
7138 * the caller is expected to hold the hba->dev_cmd.lock mutex.
7142 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, in ufshcd_issue_devman_upiu_cmd() argument
7149 const u32 tag = hba->reserved_slot; in ufshcd_issue_devman_upiu_cmd()
7150 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_issue_devman_upiu_cmd()
7154 /* Protects use of hba->reserved_slot. */ in ufshcd_issue_devman_upiu_cmd()
7155 lockdep_assert_held(&hba->dev_cmd.lock); in ufshcd_issue_devman_upiu_cmd()
7157 ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag); in ufshcd_issue_devman_upiu_cmd()
7159 ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0); in ufshcd_issue_devman_upiu_cmd()
7182 ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT); in ufshcd_issue_devman_upiu_cmd()
7195 dev_warn(hba->dev, in ufshcd_issue_devman_upiu_cmd()
7202 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, in ufshcd_issue_devman_upiu_cmd()
7210 * @hba: per-adapter instance
7225 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, in ufshcd_exec_raw_upiu_cmd() argument
7243 ufshcd_dev_man_lock(hba); in ufshcd_exec_raw_upiu_cmd()
7244 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu, in ufshcd_exec_raw_upiu_cmd()
7247 ufshcd_dev_man_unlock(hba); in ufshcd_exec_raw_upiu_cmd()
7256 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f); in ufshcd_exec_raw_upiu_cmd()
7262 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__, in ufshcd_exec_raw_upiu_cmd()
7281 * @hba: per adapter instance
7292 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu, in ufshcd_advanced_rpmb_req_handler() argument
7297 const u32 tag = hba->reserved_slot; in ufshcd_advanced_rpmb_req_handler()
7298 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_advanced_rpmb_req_handler()
7304 int ehs = (hba->capabilities & MASK_EHSLUTRD_SUPPORTED) ? 2 : 0; in ufshcd_advanced_rpmb_req_handler()
7306 /* Protects use of hba->reserved_slot. */ in ufshcd_advanced_rpmb_req_handler()
7307 ufshcd_dev_man_lock(hba); in ufshcd_advanced_rpmb_req_handler()
7309 ufshcd_setup_dev_cmd(hba, lrbp, DEV_CMD_TYPE_RPMB, UFS_UPIU_RPMB_WLUN, tag); in ufshcd_advanced_rpmb_req_handler()
7311 ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, ehs); in ufshcd_advanced_rpmb_req_handler()
7322 ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list); in ufshcd_advanced_rpmb_req_handler()
7326 err = ufshcd_issue_dev_cmd(hba, lrbp, tag, ADVANCED_RPMB_REQ_TIMEOUT); in ufshcd_advanced_rpmb_req_handler()
7351 ufshcd_dev_man_unlock(hba); in ufshcd_advanced_rpmb_req_handler()
7366 struct ufs_hba *hba; in ufshcd_eh_device_reset_handler() local
7374 hba = shost_priv(host); in ufshcd_eh_device_reset_handler()
7377 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); in ufshcd_eh_device_reset_handler()
7384 if (hba->mcq_enabled) { in ufshcd_eh_device_reset_handler()
7385 for (pos = 0; pos < hba->nutrs; pos++) { in ufshcd_eh_device_reset_handler()
7386 lrbp = &hba->lrb[pos]; in ufshcd_eh_device_reset_handler()
7389 ufshcd_clear_cmd(hba, pos); in ufshcd_eh_device_reset_handler()
7390 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd)); in ufshcd_eh_device_reset_handler()
7391 ufshcd_mcq_poll_cqe_lock(hba, hwq); in ufshcd_eh_device_reset_handler()
7399 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_eh_device_reset_handler()
7400 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) in ufshcd_eh_device_reset_handler()
7401 if (hba->lrb[pos].lun == lun) in ufshcd_eh_device_reset_handler()
7403 hba->outstanding_reqs &= ~pending_reqs; in ufshcd_eh_device_reset_handler()
7404 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_eh_device_reset_handler()
7406 for_each_set_bit(pos, &pending_reqs, hba->nutrs) { in ufshcd_eh_device_reset_handler()
7407 if (ufshcd_clear_cmd(hba, pos) < 0) { in ufshcd_eh_device_reset_handler()
7408 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_eh_device_reset_handler()
7410 ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_eh_device_reset_handler()
7411 hba->outstanding_reqs |= not_cleared; in ufshcd_eh_device_reset_handler()
7413 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_eh_device_reset_handler()
7415 dev_err(hba->dev, "%s: failed to clear request %d\n", in ufshcd_eh_device_reset_handler()
7419 __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask); in ufshcd_eh_device_reset_handler()
7422 hba->req_abort_count = 0; in ufshcd_eh_device_reset_handler()
7423 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err); in ufshcd_eh_device_reset_handler()
7427 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); in ufshcd_eh_device_reset_handler()
7433 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) in ufshcd_set_req_abort_skip() argument
7438 for_each_set_bit(tag, &bitmap, hba->nutrs) { in ufshcd_set_req_abort_skip()
7439 lrbp = &hba->lrb[tag]; in ufshcd_set_req_abort_skip()
7446 * @hba: Pointer to adapter instance
7457 int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) in ufshcd_try_to_abort_task() argument
7459 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_try_to_abort_task()
7465 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, in ufshcd_try_to_abort_task()
7469 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n", in ufshcd_try_to_abort_task()
7478 hba->dev, in ufshcd_try_to_abort_task()
7482 dev_info(hba->dev, in ufshcd_try_to_abort_task()
7489 dev_err(hba->dev, in ufshcd_try_to_abort_task()
7499 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, in ufshcd_try_to_abort_task()
7504 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", in ufshcd_try_to_abort_task()
7510 err = ufshcd_clear_cmd(hba, tag); in ufshcd_try_to_abort_task()
7512 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", in ufshcd_try_to_abort_task()
7527 struct ufs_hba *hba = shost_priv(host); in ufshcd_abort() local
7529 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_abort()
7535 ufshcd_hold(hba); in ufshcd_abort()
7537 if (!hba->mcq_enabled) { in ufshcd_abort()
7538 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_abort()
7539 if (!test_bit(tag, &hba->outstanding_reqs)) { in ufshcd_abort()
7541 dev_err(hba->dev, in ufshcd_abort()
7543 __func__, tag, hba->outstanding_reqs, reg); in ufshcd_abort()
7549 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); in ufshcd_abort()
7559 if (!hba->req_abort_count) { in ufshcd_abort()
7560 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag); in ufshcd_abort()
7561 ufshcd_print_evt_hist(hba); in ufshcd_abort()
7562 ufshcd_print_host_state(hba); in ufshcd_abort()
7563 ufshcd_print_pwr_info(hba); in ufshcd_abort()
7564 ufshcd_print_tr(hba, tag, true); in ufshcd_abort()
7566 ufshcd_print_tr(hba, tag, false); in ufshcd_abort()
7568 hba->req_abort_count++; in ufshcd_abort()
7570 if (!hba->mcq_enabled && !(reg & (1 << tag))) { in ufshcd_abort()
7572 dev_err(hba->dev, in ufshcd_abort()
7575 __ufshcd_transfer_req_compl(hba, 1UL << tag); in ufshcd_abort()
7588 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); in ufshcd_abort()
7591 hba->force_reset = true; in ufshcd_abort()
7592 ufshcd_schedule_eh_work(hba); in ufshcd_abort()
7597 if (hba->mcq_enabled) { in ufshcd_abort()
7605 dev_err(hba->dev, "%s: skipping abort\n", __func__); in ufshcd_abort()
7606 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); in ufshcd_abort()
7610 err = ufshcd_try_to_abort_task(hba, tag); in ufshcd_abort()
7612 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); in ufshcd_abort()
7613 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); in ufshcd_abort()
7622 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_abort()
7623 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs); in ufshcd_abort()
7624 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_abort()
7627 ufshcd_release_scsi_cmd(hba, lrbp); in ufshcd_abort()
7633 ufshcd_release(hba); in ufshcd_abort()
7639 * @hba: UFS host controller instance.
7643 static void ufshcd_process_probe_result(struct ufs_hba *hba, in ufshcd_process_probe_result() argument
7648 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_process_probe_result()
7650 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_process_probe_result()
7651 else if (hba->ufshcd_state == UFSHCD_STATE_RESET) in ufshcd_process_probe_result()
7652 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_process_probe_result()
7653 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_process_probe_result()
7655 trace_ufshcd_init(dev_name(hba->dev), ret, in ufshcd_process_probe_result()
7657 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_process_probe_result()
7662 * @hba: per-adapter instance
7670 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) in ufshcd_host_reset_and_restore() argument
7678 ufshcd_hba_stop(hba); in ufshcd_host_reset_and_restore()
7679 hba->silence_err_logs = true; in ufshcd_host_reset_and_restore()
7680 ufshcd_complete_requests(hba, true); in ufshcd_host_reset_and_restore()
7681 hba->silence_err_logs = false; in ufshcd_host_reset_and_restore()
7684 ufshcd_scale_clks(hba, ULONG_MAX, true); in ufshcd_host_reset_and_restore()
7686 err = ufshcd_hba_enable(hba); in ufshcd_host_reset_and_restore()
7692 err = ufshcd_device_init(hba, /*init_dev_params=*/false); in ufshcd_host_reset_and_restore()
7694 err = ufshcd_probe_hba(hba, false); in ufshcd_host_reset_and_restore()
7695 ufshcd_process_probe_result(hba, probe_start, err); in ufshcd_host_reset_and_restore()
7699 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); in ufshcd_host_reset_and_restore()
7700 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err); in ufshcd_host_reset_and_restore()
7706 * @hba: per-adapter instance
7713 static int ufshcd_reset_and_restore(struct ufs_hba *hba) in ufshcd_reset_and_restore() argument
7721 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7727 saved_err |= hba->saved_err; in ufshcd_reset_and_restore()
7728 saved_uic_err |= hba->saved_uic_err; in ufshcd_reset_and_restore()
7729 hba->saved_err = 0; in ufshcd_reset_and_restore()
7730 hba->saved_uic_err = 0; in ufshcd_reset_and_restore()
7731 hba->force_reset = false; in ufshcd_reset_and_restore()
7732 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_reset_and_restore()
7733 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7736 ufshcd_device_reset(hba); in ufshcd_reset_and_restore()
7738 err = ufshcd_host_reset_and_restore(hba); in ufshcd_reset_and_restore()
7740 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7744 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && in ufshcd_reset_and_restore()
7745 hba->ufshcd_state != UFSHCD_STATE_ERROR && in ufshcd_reset_and_restore()
7746 hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL) in ufshcd_reset_and_restore()
7754 scsi_report_bus_reset(hba->host, 0); in ufshcd_reset_and_restore()
7756 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_reset_and_restore()
7757 hba->saved_err |= saved_err; in ufshcd_reset_and_restore()
7758 hba->saved_uic_err |= saved_uic_err; in ufshcd_reset_and_restore()
7760 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7775 struct ufs_hba *hba; in ufshcd_eh_host_reset_handler() local
7777 hba = shost_priv(cmd->device->host); in ufshcd_eh_host_reset_handler()
7781 * stuck in this function waiting for flush_work(&hba->eh_work). And in ufshcd_eh_host_reset_handler()
7785 if (hba->pm_op_in_progress) { in ufshcd_eh_host_reset_handler()
7786 if (ufshcd_link_recovery(hba)) in ufshcd_eh_host_reset_handler()
7792 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7793 hba->force_reset = true; in ufshcd_eh_host_reset_handler()
7794 ufshcd_schedule_eh_work(hba); in ufshcd_eh_host_reset_handler()
7795 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); in ufshcd_eh_host_reset_handler()
7796 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7798 flush_work(&hba->eh_work); in ufshcd_eh_host_reset_handler()
7800 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7801 if (hba->ufshcd_state == UFSHCD_STATE_ERROR) in ufshcd_eh_host_reset_handler()
7803 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7857 * @hba: per-adapter instance
7862 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, in ufshcd_find_max_sup_active_icc_level() argument
7867 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || in ufshcd_find_max_sup_active_icc_level()
7868 !hba->vreg_info.vccq2) { in ufshcd_find_max_sup_active_icc_level()
7875 dev_dbg(hba->dev, in ufshcd_find_max_sup_active_icc_level()
7881 if (hba->vreg_info.vcc->max_uA) in ufshcd_find_max_sup_active_icc_level()
7883 hba->vreg_info.vcc->max_uA, in ufshcd_find_max_sup_active_icc_level()
7887 if (hba->vreg_info.vccq->max_uA) in ufshcd_find_max_sup_active_icc_level()
7889 hba->vreg_info.vccq->max_uA, in ufshcd_find_max_sup_active_icc_level()
7893 if (hba->vreg_info.vccq2->max_uA) in ufshcd_find_max_sup_active_icc_level()
7895 hba->vreg_info.vccq2->max_uA, in ufshcd_find_max_sup_active_icc_level()
7902 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba) in ufshcd_set_active_icc_lvl() argument
7912 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0, in ufshcd_set_active_icc_lvl()
7915 dev_err(hba->dev, in ufshcd_set_active_icc_lvl()
7921 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf); in ufshcd_set_active_icc_lvl()
7922 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level); in ufshcd_set_active_icc_lvl()
7924 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_set_active_icc_lvl()
7928 dev_err(hba->dev, in ufshcd_set_active_icc_lvl()
7950 * @hba: per-adapter instance
7974 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) in ufshcd_scsi_add_wlus() argument
7979 hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
7981 if (IS_ERR(hba->ufs_device_wlun)) { in ufshcd_scsi_add_wlus()
7982 ret = PTR_ERR(hba->ufs_device_wlun); in ufshcd_scsi_add_wlus()
7983 hba->ufs_device_wlun = NULL; in ufshcd_scsi_add_wlus()
7986 scsi_device_put(hba->ufs_device_wlun); in ufshcd_scsi_add_wlus()
7988 sdev_rpmb = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
7997 sdev_boot = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
8000 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__); in ufshcd_scsi_add_wlus()
8008 scsi_remove_device(hba->ufs_device_wlun); in ufshcd_scsi_add_wlus()
8013 static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf) in ufshcd_wb_probe() argument
8015 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_wb_probe()
8020 if (!ufshcd_is_wb_allowed(hba)) in ufshcd_wb_probe()
8030 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))) in ufshcd_wb_probe()
8056 ufshcd_read_unit_desc_param(hba, in ufshcd_wb_probe()
8071 if (!ufshcd_is_wb_buf_lifetime_available(hba)) in ufshcd_wb_probe()
8077 hba->caps &= ~UFSHCD_CAP_WB_EN; in ufshcd_wb_probe()
8080 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf) in ufshcd_temp_notif_probe() argument
8082 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_temp_notif_probe()
8086 if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300) in ufshcd_temp_notif_probe()
8098 ufshcd_enable_ee(hba, mask); in ufshcd_temp_notif_probe()
8099 ufs_hwmon_probe(hba, mask); in ufshcd_temp_notif_probe()
8103 static void ufshcd_set_rtt(struct ufs_hba *hba) in ufshcd_set_rtt() argument
8105 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_set_rtt()
8108 int host_rtt_cap = hba->vops && hba->vops->max_num_rtt ? in ufshcd_set_rtt()
8109 hba->vops->max_num_rtt : hba->nortt; in ufshcd_set_rtt()
8115 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_set_rtt()
8117 dev_err(hba->dev, "failed reading bMaxNumOfRTT\n"); in ufshcd_set_rtt()
8130 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_set_rtt()
8132 dev_err(hba->dev, "failed writing bMaxNumOfRTT\n"); in ufshcd_set_rtt()
8135 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, in ufshcd_fixup_dev_quirks() argument
8139 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_fixup_dev_quirks()
8150 hba->dev_quirks |= f->quirk; in ufshcd_fixup_dev_quirks()
8155 static void ufs_fixup_device_setup(struct ufs_hba *hba) in ufs_fixup_device_setup() argument
8158 ufshcd_fixup_dev_quirks(hba, ufs_fixups); in ufs_fixup_device_setup()
8161 ufshcd_vops_fixup_dev_quirks(hba); in ufs_fixup_device_setup()
8164 static void ufshcd_update_rtc(struct ufs_hba *hba) in ufshcd_update_rtc() argument
8172 if (ts64.tv_sec < hba->dev_info.rtc_time_baseline) { in ufshcd_update_rtc()
8173 dev_warn_once(hba->dev, "%s: Current time precedes previous setting!\n", __func__); in ufshcd_update_rtc()
8181 val = ts64.tv_sec - hba->dev_info.rtc_time_baseline; in ufshcd_update_rtc()
8184 if (ufshcd_rpm_get_if_active(hba) <= 0) in ufshcd_update_rtc()
8187 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED, in ufshcd_update_rtc()
8189 ufshcd_rpm_put(hba); in ufshcd_update_rtc()
8192 dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err); in ufshcd_update_rtc()
8193 else if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE) in ufshcd_update_rtc()
8194 hba->dev_info.rtc_time_baseline = ts64.tv_sec; in ufshcd_update_rtc()
8199 struct ufs_hba *hba; in ufshcd_rtc_work() local
8201 hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work); in ufshcd_rtc_work()
8204 if (!ufshcd_is_ufs_dev_busy(hba) && in ufshcd_rtc_work()
8205 hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL && in ufshcd_rtc_work()
8206 !hba->clk_gating.active_reqs) in ufshcd_rtc_work()
8207 ufshcd_update_rtc(hba); in ufshcd_rtc_work()
8209 if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period) in ufshcd_rtc_work()
8210 schedule_delayed_work(&hba->ufs_rtc_update_work, in ufshcd_rtc_work()
8211 msecs_to_jiffies(hba->dev_info.rtc_update_period)); in ufshcd_rtc_work()
8214 static void ufs_init_rtc(struct ufs_hba *hba, u8 *desc_buf) in ufs_init_rtc() argument
8217 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_init_rtc()
8242 static int ufs_get_device_desc(struct ufs_hba *hba) in ufs_get_device_desc() argument
8247 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_get_device_desc()
8255 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf, in ufs_get_device_desc()
8258 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", in ufs_get_device_desc()
8279 err = ufshcd_read_string_desc(hba, model_index, in ufs_get_device_desc()
8282 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", in ufs_get_device_desc()
8287 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] + in ufs_get_device_desc()
8290 ufs_fixup_device_setup(hba); in ufs_get_device_desc()
8292 ufshcd_wb_probe(hba, desc_buf); in ufs_get_device_desc()
8294 ufshcd_temp_notif_probe(hba, desc_buf); in ufs_get_device_desc()
8296 ufs_init_rtc(hba, desc_buf); in ufs_get_device_desc()
8309 static void ufs_put_device_desc(struct ufs_hba *hba) in ufs_put_device_desc() argument
8311 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_put_device_desc()
8320 * @hba: per-adapter instance
8328 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) in ufshcd_quirk_tune_host_pa_tactivate() argument
8336 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), in ufshcd_quirk_tune_host_pa_tactivate()
8341 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), in ufshcd_quirk_tune_host_pa_tactivate()
8348 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d", in ufshcd_quirk_tune_host_pa_tactivate()
8355 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d", in ufshcd_quirk_tune_host_pa_tactivate()
8360 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); in ufshcd_quirk_tune_host_pa_tactivate()
8364 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), in ufshcd_quirk_tune_host_pa_tactivate()
8379 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), in ufshcd_quirk_tune_host_pa_tactivate()
8387 static void ufshcd_tune_unipro_params(struct ufs_hba *hba) in ufshcd_tune_unipro_params() argument
8389 ufshcd_vops_apply_dev_quirks(hba); in ufshcd_tune_unipro_params()
8391 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) in ufshcd_tune_unipro_params()
8393 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); in ufshcd_tune_unipro_params()
8395 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) in ufshcd_tune_unipro_params()
8396 ufshcd_quirk_tune_host_pa_tactivate(hba); in ufshcd_tune_unipro_params()
8399 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) in ufshcd_clear_dbg_ufs_stats() argument
8401 hba->ufs_stats.hibern8_exit_cnt = 0; in ufshcd_clear_dbg_ufs_stats()
8402 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_clear_dbg_ufs_stats()
8403 hba->req_abort_count = 0; in ufshcd_clear_dbg_ufs_stats()
8406 static int ufshcd_device_geo_params_init(struct ufs_hba *hba) in ufshcd_device_geo_params_init() argument
8417 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0, in ufshcd_device_geo_params_init()
8420 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", in ufshcd_device_geo_params_init()
8426 hba->dev_info.max_lu_supported = 32; in ufshcd_device_geo_params_init()
8428 hba->dev_info.max_lu_supported = 8; in ufshcd_device_geo_params_init()
8460 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk) in ufshcd_parse_dev_ref_clk_freq() argument
8466 hba->dev_ref_clk_freq = in ufshcd_parse_dev_ref_clk_freq()
8469 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) in ufshcd_parse_dev_ref_clk_freq()
8470 dev_err(hba->dev, in ufshcd_parse_dev_ref_clk_freq()
8474 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) in ufshcd_set_dev_ref_clk() argument
8478 u32 freq = hba->dev_ref_clk_freq; in ufshcd_set_dev_ref_clk()
8480 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_set_dev_ref_clk()
8484 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n", in ufshcd_set_dev_ref_clk()
8492 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_set_dev_ref_clk()
8496 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n", in ufshcd_set_dev_ref_clk()
8501 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n", in ufshcd_set_dev_ref_clk()
8508 static int ufshcd_device_params_init(struct ufs_hba *hba) in ufshcd_device_params_init() argument
8514 ret = ufshcd_device_geo_params_init(hba); in ufshcd_device_params_init()
8519 ret = ufs_get_device_desc(hba); in ufshcd_device_params_init()
8521 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", in ufshcd_device_params_init()
8526 ufshcd_set_rtt(hba); in ufshcd_device_params_init()
8528 ufshcd_get_ref_clk_gating_wait(hba); in ufshcd_device_params_init()
8530 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, in ufshcd_device_params_init()
8532 hba->dev_info.f_power_on_wp_en = flag; in ufshcd_device_params_init()
8535 if (ufshcd_get_max_pwr_mode(hba)) in ufshcd_device_params_init()
8536 dev_err(hba->dev, in ufshcd_device_params_init()
8543 static void ufshcd_set_timestamp_attr(struct ufs_hba *hba) in ufshcd_set_timestamp_attr() argument
8548 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_set_timestamp_attr()
8554 ufshcd_dev_man_lock(hba); in ufshcd_set_timestamp_attr()
8556 ufshcd_init_query(hba, &request, &response, in ufshcd_set_timestamp_attr()
8566 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in ufshcd_set_timestamp_attr()
8569 dev_err(hba->dev, "%s: failed to set timestamp %d\n", in ufshcd_set_timestamp_attr()
8572 ufshcd_dev_man_unlock(hba); in ufshcd_set_timestamp_attr()
8577 * @hba: per-adapter instance
8581 static int ufshcd_add_lus(struct ufs_hba *hba) in ufshcd_add_lus() argument
8586 ret = ufshcd_scsi_add_wlus(hba); in ufshcd_add_lus()
8591 if (ufshcd_is_clkscaling_supported(hba)) { in ufshcd_add_lus()
8592 memcpy(&hba->clk_scaling.saved_pwr_info, in ufshcd_add_lus()
8593 &hba->pwr_info, in ufshcd_add_lus()
8595 hba->clk_scaling.is_allowed = true; in ufshcd_add_lus()
8597 ret = ufshcd_devfreq_init(hba); in ufshcd_add_lus()
8601 hba->clk_scaling.is_enabled = true; in ufshcd_add_lus()
8602 ufshcd_init_clk_scaling_sysfs(hba); in ufshcd_add_lus()
8606 * The RTC update code accesses the hba->ufs_device_wlun->sdev_gendev in ufshcd_add_lus()
8610 schedule_delayed_work(&hba->ufs_rtc_update_work, in ufshcd_add_lus()
8613 ufs_bsg_probe(hba); in ufshcd_add_lus()
8614 scsi_scan_host(hba->host); in ufshcd_add_lus()
8621 static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs) in ufshcd_release_sdb_queue() argument
8625 ucdl_size = ufshcd_get_ucd_size(hba) * nutrs; in ufshcd_release_sdb_queue()
8626 dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr, in ufshcd_release_sdb_queue()
8627 hba->ucdl_dma_addr); in ufshcd_release_sdb_queue()
8630 dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr, in ufshcd_release_sdb_queue()
8631 hba->utrdl_dma_addr); in ufshcd_release_sdb_queue()
8633 devm_kfree(hba->dev, hba->lrb); in ufshcd_release_sdb_queue()
8636 static int ufshcd_alloc_mcq(struct ufs_hba *hba) in ufshcd_alloc_mcq() argument
8639 int old_nutrs = hba->nutrs; in ufshcd_alloc_mcq()
8641 ret = ufshcd_mcq_decide_queue_depth(hba); in ufshcd_alloc_mcq()
8645 hba->nutrs = ret; in ufshcd_alloc_mcq()
8646 ret = ufshcd_mcq_init(hba); in ufshcd_alloc_mcq()
8654 if (hba->nutrs != old_nutrs) { in ufshcd_alloc_mcq()
8655 ufshcd_release_sdb_queue(hba, old_nutrs); in ufshcd_alloc_mcq()
8656 ret = ufshcd_memory_alloc(hba); in ufshcd_alloc_mcq()
8659 ufshcd_host_memory_configure(hba); in ufshcd_alloc_mcq()
8662 ret = ufshcd_mcq_memory_alloc(hba); in ufshcd_alloc_mcq()
8666 hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; in ufshcd_alloc_mcq()
8667 hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED; in ufshcd_alloc_mcq()
8671 hba->nutrs = old_nutrs; in ufshcd_alloc_mcq()
8675 static void ufshcd_config_mcq(struct ufs_hba *hba) in ufshcd_config_mcq() argument
8680 ret = ufshcd_mcq_vops_config_esi(hba); in ufshcd_config_mcq()
8681 dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : ""); in ufshcd_config_mcq()
8684 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR) in ufshcd_config_mcq()
8686 ufshcd_enable_intr(hba, intrs); in ufshcd_config_mcq()
8687 ufshcd_mcq_make_queues_operational(hba); in ufshcd_config_mcq()
8688 ufshcd_mcq_config_mac(hba, hba->nutrs); in ufshcd_config_mcq()
8690 …dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, que… in ufshcd_config_mcq()
8691 hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT], in ufshcd_config_mcq()
8692 hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL], in ufshcd_config_mcq()
8693 hba->nutrs); in ufshcd_config_mcq()
8696 static int ufshcd_post_device_init(struct ufs_hba *hba) in ufshcd_post_device_init() argument
8700 ufshcd_tune_unipro_params(hba); in ufshcd_post_device_init()
8703 ufshcd_set_ufs_dev_active(hba); in ufshcd_post_device_init()
8704 ufshcd_force_reset_auto_bkops(hba); in ufshcd_post_device_init()
8706 ufshcd_set_timestamp_attr(hba); in ufshcd_post_device_init()
8708 if (!hba->max_pwr_info.is_valid) in ufshcd_post_device_init()
8715 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) in ufshcd_post_device_init()
8716 ufshcd_set_dev_ref_clk(hba); in ufshcd_post_device_init()
8718 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); in ufshcd_post_device_init()
8720 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", in ufshcd_post_device_init()
8728 static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) in ufshcd_device_init() argument
8732 WARN_ON_ONCE(!hba->scsi_host_added); in ufshcd_device_init()
8734 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_device_init()
8736 ret = ufshcd_link_startup(hba); in ufshcd_device_init()
8740 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) in ufshcd_device_init()
8744 ufshcd_clear_dbg_ufs_stats(hba); in ufshcd_device_init()
8747 ufshcd_set_link_active(hba); in ufshcd_device_init()
8750 if (hba->mcq_enabled && !init_dev_params) { in ufshcd_device_init()
8751 ufshcd_config_mcq(hba); in ufshcd_device_init()
8752 ufshcd_mcq_enable(hba); in ufshcd_device_init()
8756 ret = ufshcd_verify_dev_init(hba); in ufshcd_device_init()
8761 ret = ufshcd_complete_dev_init(hba); in ufshcd_device_init()
8770 ret = ufshcd_device_params_init(hba); in ufshcd_device_init()
8773 if (is_mcq_supported(hba) && in ufshcd_device_init()
8774 hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) { in ufshcd_device_init()
8775 ufshcd_config_mcq(hba); in ufshcd_device_init()
8776 ufshcd_mcq_enable(hba); in ufshcd_device_init()
8780 return ufshcd_post_device_init(hba); in ufshcd_device_init()
8784 * ufshcd_probe_hba - probe hba to detect device and initialize it
8785 * @hba: per-adapter instance
8792 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) in ufshcd_probe_hba() argument
8796 if (!hba->pm_op_in_progress && in ufshcd_probe_hba()
8797 (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) { in ufshcd_probe_hba()
8799 ufshcd_device_reset(hba); in ufshcd_probe_hba()
8800 ufs_put_device_desc(hba); in ufshcd_probe_hba()
8801 ufshcd_hba_stop(hba); in ufshcd_probe_hba()
8802 ret = ufshcd_hba_enable(hba); in ufshcd_probe_hba()
8804 dev_err(hba->dev, "Host controller enable failed\n"); in ufshcd_probe_hba()
8805 ufshcd_print_evt_hist(hba); in ufshcd_probe_hba()
8806 ufshcd_print_host_state(hba); in ufshcd_probe_hba()
8811 ret = ufshcd_device_init(hba, init_dev_params); in ufshcd_probe_hba()
8816 ufshcd_print_pwr_info(hba); in ufshcd_probe_hba()
8824 ufshcd_set_active_icc_lvl(hba); in ufshcd_probe_hba()
8827 ufshcd_configure_wb(hba); in ufshcd_probe_hba()
8829 if (hba->ee_usr_mask) in ufshcd_probe_hba()
8830 ufshcd_write_ee_control(hba); in ufshcd_probe_hba()
8831 ufshcd_configure_auto_hibern8(hba); in ufshcd_probe_hba()
8837 * ufshcd_async_scan - asynchronous execution for probing hba
8843 struct ufs_hba *hba = (struct ufs_hba *)data; in ufshcd_async_scan() local
8847 down(&hba->host_sem); in ufshcd_async_scan()
8848 /* Initialize hba, detect and initialize UFS device */ in ufshcd_async_scan()
8850 ret = ufshcd_probe_hba(hba, true); in ufshcd_async_scan()
8851 ufshcd_process_probe_result(hba, probe_start, ret); in ufshcd_async_scan()
8852 up(&hba->host_sem); in ufshcd_async_scan()
8857 ret = ufshcd_add_lus(hba); in ufshcd_async_scan()
8860 pm_runtime_put_sync(hba->dev); in ufshcd_async_scan()
8863 dev_err(hba->dev, "%s failed: %d\n", __func__, ret); in ufshcd_async_scan()
8868 struct ufs_hba *hba = shost_priv(scmd->device->host); in ufshcd_eh_timed_out() local
8870 if (!hba->system_suspending) { in ufshcd_eh_timed_out()
8881 ufshcd_link_recovery(hba); in ufshcd_eh_timed_out()
8882 dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n", in ufshcd_eh_timed_out()
8883 __func__, hba->outstanding_tasks); in ufshcd_eh_timed_out()
8885 return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE; in ufshcd_eh_timed_out()
8955 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, in ufshcd_config_vreg_lpm() argument
8958 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); in ufshcd_config_vreg_lpm()
8961 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, in ufshcd_config_vreg_hpm() argument
8967 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); in ufshcd_config_vreg_hpm()
9020 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) in ufshcd_setup_vreg() argument
9023 struct device *dev = hba->dev; in ufshcd_setup_vreg()
9024 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_setup_vreg()
9045 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) in ufshcd_setup_hba_vreg() argument
9047 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_setup_hba_vreg()
9049 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); in ufshcd_setup_hba_vreg()
9070 static int ufshcd_init_vreg(struct ufs_hba *hba) in ufshcd_init_vreg() argument
9073 struct device *dev = hba->dev; in ufshcd_init_vreg()
9074 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_init_vreg()
9087 static int ufshcd_init_hba_vreg(struct ufs_hba *hba) in ufshcd_init_hba_vreg() argument
9089 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_init_hba_vreg()
9091 return ufshcd_get_vreg(hba->dev, info->vdd_hba); in ufshcd_init_hba_vreg()
9094 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) in ufshcd_setup_clocks() argument
9098 struct list_head *head = &hba->clk_list_head; in ufshcd_setup_clocks()
9105 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); in ufshcd_setup_clocks()
9115 if (ufshcd_is_link_active(hba) && in ufshcd_setup_clocks()
9123 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", in ufshcd_setup_clocks()
9131 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, in ufshcd_setup_clocks()
9136 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); in ufshcd_setup_clocks()
9140 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_setup_clocks()
9141 ufshcd_pm_qos_update(hba, on); in ufshcd_setup_clocks()
9148 } else if (!ret && on && hba->clk_gating.is_initialized) { in ufshcd_setup_clocks()
9149 scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) in ufshcd_setup_clocks()
9150 hba->clk_gating.state = CLKS_ON; in ufshcd_setup_clocks()
9151 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_setup_clocks()
9152 hba->clk_gating.state); in ufshcd_setup_clocks()
9156 trace_ufshcd_profile_clk_gating(dev_name(hba->dev), in ufshcd_setup_clocks()
9162 static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba) in ufshcd_parse_ref_clk_property() argument
9165 int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq); in ufshcd_parse_ref_clk_property()
9168 dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret); in ufshcd_parse_ref_clk_property()
9175 static int ufshcd_init_clocks(struct ufs_hba *hba) in ufshcd_init_clocks() argument
9179 struct device *dev = hba->dev; in ufshcd_init_clocks()
9180 struct list_head *head = &hba->clk_list_head; in ufshcd_init_clocks()
9203 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk); in ufshcd_init_clocks()
9208 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_init_clocks()
9220 if (hba->use_pm_opp) { in ufshcd_init_clocks()
9221 ret = ufshcd_opp_set_rate(hba, ULONG_MAX); in ufshcd_init_clocks()
9223 dev_err(hba->dev, "%s: failed to set OPP: %d", __func__, in ufshcd_init_clocks()
9233 static int ufshcd_variant_hba_init(struct ufs_hba *hba) in ufshcd_variant_hba_init() argument
9237 if (!hba->vops) in ufshcd_variant_hba_init()
9240 err = ufshcd_vops_init(hba); in ufshcd_variant_hba_init()
9242 dev_err_probe(hba->dev, err, in ufshcd_variant_hba_init()
9244 __func__, ufshcd_get_var_name(hba), err); in ufshcd_variant_hba_init()
9249 static void ufshcd_variant_hba_exit(struct ufs_hba *hba) in ufshcd_variant_hba_exit() argument
9251 if (!hba->vops) in ufshcd_variant_hba_exit()
9254 ufshcd_vops_exit(hba); in ufshcd_variant_hba_exit()
9257 static int ufshcd_hba_init(struct ufs_hba *hba) in ufshcd_hba_init() argument
9268 err = ufshcd_init_hba_vreg(hba); in ufshcd_hba_init()
9272 err = ufshcd_setup_hba_vreg(hba, true); in ufshcd_hba_init()
9276 err = ufshcd_init_clocks(hba); in ufshcd_hba_init()
9280 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) in ufshcd_hba_init()
9281 hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba); in ufshcd_hba_init()
9283 err = ufshcd_setup_clocks(hba, true); in ufshcd_hba_init()
9287 err = ufshcd_init_vreg(hba); in ufshcd_hba_init()
9291 err = ufshcd_setup_vreg(hba, true); in ufshcd_hba_init()
9295 err = ufshcd_variant_hba_init(hba); in ufshcd_hba_init()
9299 ufs_debugfs_hba_init(hba); in ufshcd_hba_init()
9300 ufs_fault_inject_hba_init(hba); in ufshcd_hba_init()
9302 hba->is_powered = true; in ufshcd_hba_init()
9306 ufshcd_setup_vreg(hba, false); in ufshcd_hba_init()
9308 ufshcd_setup_clocks(hba, false); in ufshcd_hba_init()
9310 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_init()
9315 static void ufshcd_hba_exit(struct ufs_hba *hba) in ufshcd_hba_exit() argument
9317 if (hba->is_powered) { in ufshcd_hba_exit()
9318 ufshcd_pm_qos_exit(hba); in ufshcd_hba_exit()
9319 ufshcd_exit_clk_scaling(hba); in ufshcd_hba_exit()
9320 ufshcd_exit_clk_gating(hba); in ufshcd_hba_exit()
9321 if (hba->eh_wq) in ufshcd_hba_exit()
9322 destroy_workqueue(hba->eh_wq); in ufshcd_hba_exit()
9323 ufs_debugfs_hba_exit(hba); in ufshcd_hba_exit()
9324 ufshcd_variant_hba_exit(hba); in ufshcd_hba_exit()
9325 ufshcd_setup_vreg(hba, false); in ufshcd_hba_exit()
9326 ufshcd_setup_clocks(hba, false); in ufshcd_hba_exit()
9327 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_exit()
9328 hba->is_powered = false; in ufshcd_hba_exit()
9329 ufs_put_device_desc(hba); in ufshcd_hba_exit()
9362 * @hba: per adapter instance
9368 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, in ufshcd_set_dev_pwr_mode() argument
9376 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_set_dev_pwr_mode()
9377 sdp = hba->ufs_device_wlun; in ufshcd_set_dev_pwr_mode()
9382 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_set_dev_pwr_mode()
9393 hba->host->eh_noresume = 1; in ufshcd_set_dev_pwr_mode()
9411 hba->curr_dev_pwr_mode = pwr_mode; in ufshcd_set_dev_pwr_mode()
9415 hba->host->eh_noresume = 0; in ufshcd_set_dev_pwr_mode()
9419 static int ufshcd_link_state_transition(struct ufs_hba *hba, in ufshcd_link_state_transition() argument
9425 if (req_link_state == hba->uic_link_state) in ufshcd_link_state_transition()
9429 ret = ufshcd_uic_hibern8_enter(hba); in ufshcd_link_state_transition()
9431 ufshcd_set_link_hibern8(hba); in ufshcd_link_state_transition()
9433 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_link_state_transition()
9444 (!check_for_bkops || !hba->auto_bkops_enabled)) { in ufshcd_link_state_transition()
9455 ret = ufshcd_uic_hibern8_enter(hba); in ufshcd_link_state_transition()
9457 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_link_state_transition()
9465 ufshcd_hba_stop(hba); in ufshcd_link_state_transition()
9470 ufshcd_set_link_off(hba); in ufshcd_link_state_transition()
9477 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) in ufshcd_vreg_set_lpm() argument
9487 if (!ufshcd_is_link_active(hba) && in ufshcd_vreg_set_lpm()
9488 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) in ufshcd_vreg_set_lpm()
9506 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && in ufshcd_vreg_set_lpm()
9507 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_vreg_set_lpm()
9508 ufshcd_setup_vreg(hba, false); in ufshcd_vreg_set_lpm()
9510 } else if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_vreg_set_lpm()
9511 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); in ufshcd_vreg_set_lpm()
9513 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) { in ufshcd_vreg_set_lpm()
9514 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_lpm()
9515 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); in ufshcd_vreg_set_lpm()
9522 if (vcc_off && hba->vreg_info.vcc && in ufshcd_vreg_set_lpm()
9523 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM) in ufshcd_vreg_set_lpm()
9528 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) in ufshcd_vreg_set_hpm() argument
9532 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && in ufshcd_vreg_set_hpm()
9533 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_vreg_set_hpm()
9534 ret = ufshcd_setup_vreg(hba, true); in ufshcd_vreg_set_hpm()
9535 } else if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_vreg_set_hpm()
9536 if (!ufshcd_is_link_active(hba)) { in ufshcd_vreg_set_hpm()
9537 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_hpm()
9540 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); in ufshcd_vreg_set_hpm()
9544 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); in ufshcd_vreg_set_hpm()
9549 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_hpm()
9551 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); in ufshcd_vreg_set_hpm()
9557 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) in ufshcd_hba_vreg_set_lpm() argument
9559 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) in ufshcd_hba_vreg_set_lpm()
9560 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_vreg_set_lpm()
9563 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) in ufshcd_hba_vreg_set_hpm() argument
9565 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) in ufshcd_hba_vreg_set_hpm()
9566 ufshcd_setup_hba_vreg(hba, true); in ufshcd_hba_vreg_set_hpm()
9569 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) in __ufshcd_wl_suspend() argument
9577 hba->pm_op_in_progress = true; in __ufshcd_wl_suspend()
9580 hba->rpm_lvl : hba->spm_lvl; in __ufshcd_wl_suspend()
9592 ufshcd_hold(hba); in __ufshcd_wl_suspend()
9593 hba->clk_gating.is_suspended = true; in __ufshcd_wl_suspend()
9595 if (ufshcd_is_clkscaling_supported(hba)) in __ufshcd_wl_suspend()
9596 ufshcd_clk_scaling_suspend(hba, true); in __ufshcd_wl_suspend()
9603 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && in __ufshcd_wl_suspend()
9604 (req_link_state == hba->uic_link_state)) in __ufshcd_wl_suspend()
9608 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { in __ufshcd_wl_suspend()
9610 if (!ufshcd_eh_in_progress(hba)) in __ufshcd_wl_suspend()
9611 ufshcd_force_error_recovery(hba); in __ufshcd_wl_suspend()
9617 if (ufshcd_can_autobkops_during_suspend(hba)) { in __ufshcd_wl_suspend()
9623 ret = ufshcd_bkops_ctrl(hba); in __ufshcd_wl_suspend()
9630 ufshcd_force_error_recovery(hba); in __ufshcd_wl_suspend()
9636 ufshcd_disable_auto_bkops(hba); in __ufshcd_wl_suspend()
9643 hba->dev_info.b_rpm_dev_flush_capable = in __ufshcd_wl_suspend()
9644 hba->auto_bkops_enabled || in __ufshcd_wl_suspend()
9647 ufshcd_is_auto_hibern8_enabled(hba))) && in __ufshcd_wl_suspend()
9648 ufshcd_wb_need_flush(hba)); in __ufshcd_wl_suspend()
9651 flush_work(&hba->eeh_work); in __ufshcd_wl_suspend()
9653 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); in __ufshcd_wl_suspend()
9657 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) { in __ufshcd_wl_suspend()
9660 ufshcd_disable_auto_bkops(hba); in __ufshcd_wl_suspend()
9662 if (!hba->dev_info.b_rpm_dev_flush_capable) { in __ufshcd_wl_suspend()
9663 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); in __ufshcd_wl_suspend()
9670 ufshcd_force_error_recovery(hba); in __ufshcd_wl_suspend()
9682 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba); in __ufshcd_wl_suspend()
9683 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops); in __ufshcd_wl_suspend()
9690 ufshcd_force_error_recovery(hba); in __ufshcd_wl_suspend()
9702 ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); in __ufshcd_wl_suspend()
9706 cancel_delayed_work_sync(&hba->ufs_rtc_update_work); in __ufshcd_wl_suspend()
9715 if (ufshcd_is_ufs_dev_deepsleep(hba)) { in __ufshcd_wl_suspend()
9716 ufshcd_device_reset(hba); in __ufshcd_wl_suspend()
9717 WARN_ON(!ufshcd_is_link_off(hba)); in __ufshcd_wl_suspend()
9719 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) in __ufshcd_wl_suspend()
9720 ufshcd_set_link_active(hba); in __ufshcd_wl_suspend()
9721 else if (ufshcd_is_link_off(hba)) in __ufshcd_wl_suspend()
9722 ufshcd_host_reset_and_restore(hba); in __ufshcd_wl_suspend()
9725 if (ufshcd_is_ufs_dev_deepsleep(hba)) { in __ufshcd_wl_suspend()
9726 ufshcd_device_reset(hba); in __ufshcd_wl_suspend()
9727 ufshcd_host_reset_and_restore(hba); in __ufshcd_wl_suspend()
9729 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) in __ufshcd_wl_suspend()
9730 ufshcd_disable_auto_bkops(hba); in __ufshcd_wl_suspend()
9732 if (ufshcd_is_clkscaling_supported(hba)) in __ufshcd_wl_suspend()
9733 ufshcd_clk_scaling_suspend(hba, false); in __ufshcd_wl_suspend()
9735 hba->dev_info.b_rpm_dev_flush_capable = false; in __ufshcd_wl_suspend()
9737 if (hba->dev_info.b_rpm_dev_flush_capable) { in __ufshcd_wl_suspend()
9738 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work, in __ufshcd_wl_suspend()
9743 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret); in __ufshcd_wl_suspend()
9744 hba->clk_gating.is_suspended = false; in __ufshcd_wl_suspend()
9745 ufshcd_release(hba); in __ufshcd_wl_suspend()
9747 hba->pm_op_in_progress = false; in __ufshcd_wl_suspend()
9752 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) in __ufshcd_wl_resume() argument
9755 enum uic_link_state old_link_state = hba->uic_link_state; in __ufshcd_wl_resume()
9757 hba->pm_op_in_progress = true; in __ufshcd_wl_resume()
9764 ret = ufshcd_vops_resume(hba, pm_op); in __ufshcd_wl_resume()
9769 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba)); in __ufshcd_wl_resume()
9771 if (ufshcd_is_link_hibern8(hba)) { in __ufshcd_wl_resume()
9772 ret = ufshcd_uic_hibern8_exit(hba); in __ufshcd_wl_resume()
9774 ufshcd_set_link_active(hba); in __ufshcd_wl_resume()
9776 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", in __ufshcd_wl_resume()
9780 } else if (ufshcd_is_link_off(hba)) { in __ufshcd_wl_resume()
9787 ret = ufshcd_reset_and_restore(hba); in __ufshcd_wl_resume()
9792 if (ret || !ufshcd_is_link_active(hba)) in __ufshcd_wl_resume()
9796 if (!ufshcd_is_ufs_dev_active(hba)) { in __ufshcd_wl_resume()
9797 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); in __ufshcd_wl_resume()
9800 ufshcd_set_timestamp_attr(hba); in __ufshcd_wl_resume()
9801 schedule_delayed_work(&hba->ufs_rtc_update_work, in __ufshcd_wl_resume()
9805 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) in __ufshcd_wl_resume()
9806 ufshcd_enable_auto_bkops(hba); in __ufshcd_wl_resume()
9812 ufshcd_bkops_ctrl(hba); in __ufshcd_wl_resume()
9814 if (hba->ee_usr_mask) in __ufshcd_wl_resume()
9815 ufshcd_write_ee_control(hba); in __ufshcd_wl_resume()
9817 if (ufshcd_is_clkscaling_supported(hba)) in __ufshcd_wl_resume()
9818 ufshcd_clk_scaling_suspend(hba, false); in __ufshcd_wl_resume()
9820 if (hba->dev_info.b_rpm_dev_flush_capable) { in __ufshcd_wl_resume()
9821 hba->dev_info.b_rpm_dev_flush_capable = false; in __ufshcd_wl_resume()
9822 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); in __ufshcd_wl_resume()
9825 ufshcd_configure_auto_hibern8(hba); in __ufshcd_wl_resume()
9830 ufshcd_link_state_transition(hba, old_link_state, 0); in __ufshcd_wl_resume()
9832 ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); in __ufshcd_wl_resume()
9833 ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); in __ufshcd_wl_resume()
9836 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret); in __ufshcd_wl_resume()
9837 hba->clk_gating.is_suspended = false; in __ufshcd_wl_resume()
9838 ufshcd_release(hba); in __ufshcd_wl_resume()
9839 hba->pm_op_in_progress = false; in __ufshcd_wl_resume()
9846 struct ufs_hba *hba; in ufshcd_wl_runtime_suspend() local
9850 hba = shost_priv(sdev->host); in ufshcd_wl_runtime_suspend()
9852 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM); in ufshcd_wl_runtime_suspend()
9858 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_wl_runtime_suspend()
9866 struct ufs_hba *hba; in ufshcd_wl_runtime_resume() local
9870 hba = shost_priv(sdev->host); in ufshcd_wl_runtime_resume()
9872 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM); in ufshcd_wl_runtime_resume()
9878 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_wl_runtime_resume()
9888 struct ufs_hba *hba; in ufshcd_wl_suspend() local
9892 hba = shost_priv(sdev->host); in ufshcd_wl_suspend()
9893 down(&hba->host_sem); in ufshcd_wl_suspend()
9894 hba->system_suspending = true; in ufshcd_wl_suspend()
9899 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM); in ufshcd_wl_suspend()
9902 up(&hba->host_sem); in ufshcd_wl_suspend()
9907 hba->is_sys_suspended = true; in ufshcd_wl_suspend()
9910 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_wl_suspend()
9918 struct ufs_hba *hba; in ufshcd_wl_resume() local
9922 hba = shost_priv(sdev->host); in ufshcd_wl_resume()
9927 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM); in ufshcd_wl_resume()
9933 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_wl_resume()
9935 hba->is_sys_suspended = false; in ufshcd_wl_resume()
9936 hba->system_suspending = false; in ufshcd_wl_resume()
9937 up(&hba->host_sem); in ufshcd_wl_resume()
9944 * @hba: per adapter instance
9947 * and set vreg and hba-vreg in lpm mode.
9951 static int ufshcd_suspend(struct ufs_hba *hba) in ufshcd_suspend() argument
9955 if (!hba->is_powered) in ufshcd_suspend()
9961 ufshcd_disable_irq(hba); in ufshcd_suspend()
9962 ret = ufshcd_setup_clocks(hba, false); in ufshcd_suspend()
9964 ufshcd_enable_irq(hba); in ufshcd_suspend()
9967 if (ufshcd_is_clkgating_allowed(hba)) { in ufshcd_suspend()
9968 hba->clk_gating.state = CLKS_OFF; in ufshcd_suspend()
9969 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_suspend()
9970 hba->clk_gating.state); in ufshcd_suspend()
9973 ufshcd_vreg_set_lpm(hba); in ufshcd_suspend()
9975 ufshcd_hba_vreg_set_lpm(hba); in ufshcd_suspend()
9976 ufshcd_pm_qos_update(hba, false); in ufshcd_suspend()
9983 * @hba: per adapter instance
9986 * irqs of the hba.
9990 static int ufshcd_resume(struct ufs_hba *hba) in ufshcd_resume() argument
9994 if (!hba->is_powered) in ufshcd_resume()
9997 ufshcd_hba_vreg_set_hpm(hba); in ufshcd_resume()
9998 ret = ufshcd_vreg_set_hpm(hba); in ufshcd_resume()
10003 ret = ufshcd_setup_clocks(hba, true); in ufshcd_resume()
10008 ufshcd_enable_irq(hba); in ufshcd_resume()
10013 ufshcd_vreg_set_lpm(hba); in ufshcd_resume()
10016 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret); in ufshcd_resume()
10033 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_system_suspend() local
10037 if (pm_runtime_suspended(hba->dev)) in ufshcd_system_suspend()
10040 ret = ufshcd_suspend(hba); in ufshcd_system_suspend()
10042 trace_ufshcd_system_suspend(dev_name(hba->dev), ret, in ufshcd_system_suspend()
10044 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_system_suspend()
10060 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_system_resume() local
10064 if (pm_runtime_suspended(hba->dev)) in ufshcd_system_resume()
10067 ret = ufshcd_resume(hba); in ufshcd_system_resume()
10070 trace_ufshcd_system_resume(dev_name(hba->dev), ret, in ufshcd_system_resume()
10072 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_system_resume()
10090 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_runtime_suspend() local
10094 ret = ufshcd_suspend(hba); in ufshcd_runtime_suspend()
10096 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, in ufshcd_runtime_suspend()
10098 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_runtime_suspend()
10117 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_runtime_resume() local
10121 ret = ufshcd_resume(hba); in ufshcd_runtime_resume()
10123 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, in ufshcd_runtime_resume()
10125 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_runtime_resume()
10134 struct ufs_hba *hba = shost_priv(sdev->host); in ufshcd_wl_shutdown() local
10136 down(&hba->host_sem); in ufshcd_wl_shutdown()
10137 hba->shutting_down = true; in ufshcd_wl_shutdown()
10138 up(&hba->host_sem); in ufshcd_wl_shutdown()
10141 ufshcd_rpm_get_sync(hba); in ufshcd_wl_shutdown()
10143 shost_for_each_device(sdev, hba->host) { in ufshcd_wl_shutdown()
10144 if (sdev == hba->ufs_device_wlun) in ufshcd_wl_shutdown()
10150 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); in ufshcd_wl_shutdown()
10156 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) in ufshcd_wl_shutdown()
10157 ufshcd_suspend(hba); in ufshcd_wl_shutdown()
10159 hba->is_powered = false; in ufshcd_wl_shutdown()
10165 * @hba: per adapter instance
10167 void ufshcd_remove(struct ufs_hba *hba) in ufshcd_remove() argument
10169 if (hba->ufs_device_wlun) in ufshcd_remove()
10170 ufshcd_rpm_get_sync(hba); in ufshcd_remove()
10171 ufs_hwmon_remove(hba); in ufshcd_remove()
10172 ufs_bsg_remove(hba); in ufshcd_remove()
10173 ufs_sysfs_remove_nodes(hba->dev); in ufshcd_remove()
10174 cancel_delayed_work_sync(&hba->ufs_rtc_update_work); in ufshcd_remove()
10175 blk_mq_destroy_queue(hba->tmf_queue); in ufshcd_remove()
10176 blk_put_queue(hba->tmf_queue); in ufshcd_remove()
10177 blk_mq_free_tag_set(&hba->tmf_tag_set); in ufshcd_remove()
10178 if (hba->scsi_host_added) in ufshcd_remove()
10179 scsi_remove_host(hba->host); in ufshcd_remove()
10181 ufshcd_disable_intr(hba, hba->intr_mask); in ufshcd_remove()
10182 ufshcd_hba_stop(hba); in ufshcd_remove()
10183 ufshcd_hba_exit(hba); in ufshcd_remove()
10199 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_system_restore() local
10207 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), in ufshcd_system_restore()
10209 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), in ufshcd_system_restore()
10211 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), in ufshcd_system_restore()
10213 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), in ufshcd_system_restore()
10220 ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H); in ufshcd_system_restore()
10237 * @hba: per adapter instance
10241 static int ufshcd_set_dma_mask(struct ufs_hba *hba) in ufshcd_set_dma_mask() argument
10243 if (hba->vops && hba->vops->set_dma_mask) in ufshcd_set_dma_mask()
10244 return hba->vops->set_dma_mask(hba); in ufshcd_set_dma_mask()
10245 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { in ufshcd_set_dma_mask()
10246 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) in ufshcd_set_dma_mask()
10249 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); in ufshcd_set_dma_mask()
10254 * hba->dev
10263 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
10276 struct ufs_hba *hba; in ufshcd_alloc_host() local
10301 hba = shost_priv(host); in ufshcd_alloc_host()
10302 hba->host = host; in ufshcd_alloc_host()
10303 hba->dev = dev; in ufshcd_alloc_host()
10304 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; in ufshcd_alloc_host()
10305 hba->nop_out_timeout = NOP_OUT_TIMEOUT; in ufshcd_alloc_host()
10306 ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry)); in ufshcd_alloc_host()
10307 INIT_LIST_HEAD(&hba->clk_list_head); in ufshcd_alloc_host()
10308 spin_lock_init(&hba->outstanding_lock); in ufshcd_alloc_host()
10310 *hba_handle = hba; in ufshcd_alloc_host()
10329 static int ufshcd_add_scsi_host(struct ufs_hba *hba) in ufshcd_add_scsi_host() argument
10333 if (is_mcq_supported(hba)) { in ufshcd_add_scsi_host()
10334 ufshcd_mcq_enable(hba); in ufshcd_add_scsi_host()
10335 err = ufshcd_alloc_mcq(hba); in ufshcd_add_scsi_host()
10337 ufshcd_config_mcq(hba); in ufshcd_add_scsi_host()
10340 ufshcd_mcq_disable(hba); in ufshcd_add_scsi_host()
10342 dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", in ufshcd_add_scsi_host()
10346 if (!is_mcq_supported(hba) && !hba->lsdb_sup) { in ufshcd_add_scsi_host()
10347 dev_err(hba->dev, in ufshcd_add_scsi_host()
10353 err = scsi_add_host(hba->host, hba->dev); in ufshcd_add_scsi_host()
10355 dev_err(hba->dev, "scsi_add_host failed\n"); in ufshcd_add_scsi_host()
10358 hba->scsi_host_added = true; in ufshcd_add_scsi_host()
10360 hba->tmf_tag_set = (struct blk_mq_tag_set) { in ufshcd_add_scsi_host()
10362 .queue_depth = hba->nutmrs, in ufshcd_add_scsi_host()
10365 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); in ufshcd_add_scsi_host()
10368 hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL); in ufshcd_add_scsi_host()
10369 if (IS_ERR(hba->tmf_queue)) { in ufshcd_add_scsi_host()
10370 err = PTR_ERR(hba->tmf_queue); in ufshcd_add_scsi_host()
10373 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, in ufshcd_add_scsi_host()
10374 sizeof(*hba->tmf_rqs), GFP_KERNEL); in ufshcd_add_scsi_host()
10375 if (!hba->tmf_rqs) { in ufshcd_add_scsi_host()
10383 blk_mq_destroy_queue(hba->tmf_queue); in ufshcd_add_scsi_host()
10384 blk_put_queue(hba->tmf_queue); in ufshcd_add_scsi_host()
10387 blk_mq_free_tag_set(&hba->tmf_tag_set); in ufshcd_add_scsi_host()
10390 if (hba->scsi_host_added) in ufshcd_add_scsi_host()
10391 scsi_remove_host(hba->host); in ufshcd_add_scsi_host()
10398 * @hba: per-adapter instance
10404 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) in ufshcd_init() argument
10407 struct Scsi_Host *host = hba->host; in ufshcd_init()
10408 struct device *dev = hba->dev; in ufshcd_init()
10415 dev_set_drvdata(dev, hba); in ufshcd_init()
10418 dev_err(hba->dev, in ufshcd_init()
10424 hba->mmio_base = mmio_base; in ufshcd_init()
10425 hba->irq = irq; in ufshcd_init()
10426 hba->vps = &ufs_hba_vps; in ufshcd_init()
10432 spin_lock_init(&hba->clk_gating.lock); in ufshcd_init()
10442 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( in ufshcd_init()
10445 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( in ufshcd_init()
10449 err = ufshcd_hba_init(hba); in ufshcd_init()
10454 err = ufshcd_hba_capabilities(hba); in ufshcd_init()
10459 hba->ufs_version = ufshcd_get_ufs_version(hba); in ufshcd_init()
10462 hba->intr_mask = ufshcd_get_intr_mask(hba); in ufshcd_init()
10464 err = ufshcd_set_dma_mask(hba); in ufshcd_init()
10466 dev_err(hba->dev, "set dma mask failed\n"); in ufshcd_init()
10471 err = ufshcd_memory_alloc(hba); in ufshcd_init()
10473 dev_err(hba->dev, "Memory allocation failed\n"); in ufshcd_init()
10478 ufshcd_host_memory_configure(hba); in ufshcd_init()
10480 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; in ufshcd_init()
10481 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED; in ufshcd_init()
10487 host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING); in ufshcd_init()
10493 hba->max_pwr_info.is_valid = false; in ufshcd_init()
10496 hba->eh_wq = alloc_ordered_workqueue("ufs_eh_wq_%d", WQ_MEM_RECLAIM, in ufshcd_init()
10497 hba->host->host_no); in ufshcd_init()
10498 if (!hba->eh_wq) { in ufshcd_init()
10499 dev_err(hba->dev, "%s: failed to create eh workqueue\n", in ufshcd_init()
10504 INIT_WORK(&hba->eh_work, ufshcd_err_handler); in ufshcd_init()
10505 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); in ufshcd_init()
10507 sema_init(&hba->host_sem, 1); in ufshcd_init()
10510 mutex_init(&hba->uic_cmd_mutex); in ufshcd_init()
10513 mutex_init(&hba->dev_cmd.lock); in ufshcd_init()
10516 mutex_init(&hba->ee_ctrl_mutex); in ufshcd_init()
10518 mutex_init(&hba->wb_mutex); in ufshcd_init()
10519 init_rwsem(&hba->clk_scaling_lock); in ufshcd_init()
10521 ufshcd_init_clk_gating(hba); in ufshcd_init()
10523 ufshcd_init_clk_scaling(hba); in ufshcd_init()
10530 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), in ufshcd_init()
10532 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); in ufshcd_init()
10537 ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_init()
10540 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); in ufshcd_init()
10542 dev_err(hba->dev, "request irq failed\n"); in ufshcd_init()
10545 hba->is_irq_enabled = true; in ufshcd_init()
10549 ufshcd_device_reset(hba); in ufshcd_init()
10551 ufshcd_init_crypto(hba); in ufshcd_init()
10554 err = ufshcd_hba_enable(hba); in ufshcd_init()
10556 dev_err(hba->dev, "Host controller enable failed\n"); in ufshcd_init()
10557 ufshcd_print_evt_hist(hba); in ufshcd_init()
10558 ufshcd_print_host_state(hba); in ufshcd_init()
10562 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work); in ufshcd_init()
10563 INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work); in ufshcd_init()
10566 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) { in ufshcd_init()
10567 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) | in ufshcd_init()
10580 ufshcd_set_ufs_dev_active(hba); in ufshcd_init()
10582 /* Initialize hba, detect and initialize UFS device */ in ufshcd_init()
10585 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_init()
10587 err = ufshcd_link_startup(hba); in ufshcd_init()
10591 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) in ufshcd_init()
10595 ufshcd_clear_dbg_ufs_stats(hba); in ufshcd_init()
10598 ufshcd_set_link_active(hba); in ufshcd_init()
10601 err = ufshcd_verify_dev_init(hba); in ufshcd_init()
10606 err = ufshcd_complete_dev_init(hba); in ufshcd_init()
10610 err = ufshcd_device_params_init(hba); in ufshcd_init()
10614 err = ufshcd_post_device_init(hba); in ufshcd_init()
10617 ufshcd_process_probe_result(hba, probe_start, err); in ufshcd_init()
10621 err = ufshcd_add_scsi_host(hba); in ufshcd_init()
10625 async_schedule(ufshcd_async_scan, hba); in ufshcd_init()
10626 ufs_sysfs_add_nodes(hba->dev); in ufshcd_init()
10629 ufshcd_pm_qos_init(hba); in ufshcd_init()
10633 hba->is_irq_enabled = false; in ufshcd_init()
10634 ufshcd_hba_exit(hba); in ufshcd_init()
10642 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_resume_complete() local
10644 if (hba->complete_put) { in ufshcd_resume_complete()
10645 ufshcd_rpm_put(hba); in ufshcd_resume_complete()
10646 hba->complete_put = false; in ufshcd_resume_complete()
10651 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba) in ufshcd_rpm_ok_for_spm() argument
10653 struct device *dev = &hba->ufs_device_wlun->sdev_gendev; in ufshcd_rpm_ok_for_spm()
10660 dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl); in ufshcd_rpm_ok_for_spm()
10661 link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl); in ufshcd_rpm_ok_for_spm()
10663 hba->curr_dev_pwr_mode == dev_pwr_mode && in ufshcd_rpm_ok_for_spm()
10664 hba->uic_link_state == link_state && in ufshcd_rpm_ok_for_spm()
10665 !hba->dev_info.b_rpm_dev_flush_capable; in ufshcd_rpm_ok_for_spm()
10673 struct ufs_hba *hba = dev_get_drvdata(dev); in __ufshcd_suspend_prepare() local
10682 if (hba->ufs_device_wlun) { in __ufshcd_suspend_prepare()
10684 ufshcd_rpm_get_noresume(hba); in __ufshcd_suspend_prepare()
10689 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) { in __ufshcd_suspend_prepare()
10691 ret = ufshcd_rpm_resume(hba); in __ufshcd_suspend_prepare()
10693 ufshcd_rpm_put(hba); in __ufshcd_suspend_prepare()
10697 hba->complete_put = true; in __ufshcd_suspend_prepare()
10713 struct ufs_hba *hba = shost_priv(sdev->host); in ufshcd_wl_poweroff() local
10715 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); in ufshcd_wl_poweroff()