1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Universal Flash Storage Host controller driver Core 4 * Copyright (C) 2011-2013 Samsung India Software Operations 5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. 6 * 7 * Authors: 8 * Santosh Yaraganavi <santosh.sy@samsung.com> 9 * Vinayak Holikatti <h.vinayak@samsung.com> 10 */ 11 12 #include <linux/async.h> 13 #include <linux/devfreq.h> 14 #include <linux/nls.h> 15 #include <linux/of.h> 16 #include <linux/bitfield.h> 17 #include <linux/blk-pm.h> 18 #include <linux/blkdev.h> 19 #include <linux/clk.h> 20 #include <linux/delay.h> 21 #include <linux/interrupt.h> 22 #include <linux/module.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/sched/clock.h> 25 #include <scsi/scsi_cmnd.h> 26 #include <scsi/scsi_dbg.h> 27 #include <scsi/scsi_driver.h> 28 #include <scsi/scsi_eh.h> 29 #include "ufshcd-priv.h" 30 #include <ufs/ufs_quirks.h> 31 #include <ufs/unipro.h> 32 #include "ufs-sysfs.h" 33 #include "ufs-debugfs.h" 34 #include "ufs-fault-injection.h" 35 #include "ufs_bsg.h" 36 #include "ufshcd-crypto.h" 37 #include <asm/unaligned.h> 38 39 #define CREATE_TRACE_POINTS 40 #include <trace/events/ufs.h> 41 42 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ 43 UTP_TASK_REQ_COMPL |\ 44 UFSHCD_ERROR_MASK) 45 46 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\ 47 UFSHCD_ERROR_MASK |\ 48 MCQ_CQ_EVENT_STATUS) 49 50 51 /* UIC command timeout, unit: ms */ 52 #define UIC_CMD_TIMEOUT 500 53 54 /* NOP OUT retries waiting for NOP IN response */ 55 #define NOP_OUT_RETRIES 10 56 /* Timeout after 50 msecs if NOP OUT hangs without response */ 57 #define NOP_OUT_TIMEOUT 50 /* msecs */ 58 59 /* Query request retries */ 60 #define QUERY_REQ_RETRIES 3 61 /* Query request timeout */ 62 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ 63 64 /* Advanced RPMB request timeout */ 65 #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */ 66 67 /* Task management command timeout */ 68 #define TM_CMD_TIMEOUT 100 /* msecs */ 69 70 /* maximum number of retries for a general UIC command */ 71 #define UFS_UIC_COMMAND_RETRIES 3 72 73 /* maximum number of link-startup retries */ 74 #define DME_LINKSTARTUP_RETRIES 3 75 76 /* maximum number of reset retries before giving up */ 77 #define MAX_HOST_RESET_RETRIES 5 78 79 /* Maximum number of error handler retries before giving up */ 80 #define MAX_ERR_HANDLER_RETRIES 5 81 82 /* Expose the flag value from utp_upiu_query.value */ 83 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF 84 85 /* Interrupt aggregation default timeout, unit: 40us */ 86 #define INT_AGGR_DEF_TO 0x02 87 88 /* default delay of autosuspend: 2000 ms */ 89 #define RPM_AUTOSUSPEND_DELAY_MS 2000 90 91 /* Default delay of RPM device flush delayed work */ 92 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000 93 94 /* Default value of wait time before gating device ref clock */ 95 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */ 96 97 /* Polling time to wait for fDeviceInit */ 98 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */ 99 100 /* UFSHC 4.0 compliant HC support this mode. */ 101 static bool use_mcq_mode = true; 102 103 static bool is_mcq_supported(struct ufs_hba *hba) 104 { 105 return hba->mcq_sup && use_mcq_mode; 106 } 107 108 module_param(use_mcq_mode, bool, 0644); 109 MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default"); 110 111 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ 112 ({ \ 113 int _ret; \ 114 if (_on) \ 115 _ret = ufshcd_enable_vreg(_dev, _vreg); \ 116 else \ 117 _ret = ufshcd_disable_vreg(_dev, _vreg); \ 118 _ret; \ 119 }) 120 121 #define ufshcd_hex_dump(prefix_str, buf, len) do { \ 122 size_t __len = (len); \ 123 print_hex_dump(KERN_ERR, prefix_str, \ 124 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\ 125 16, 4, buf, __len, false); \ 126 } while (0) 127 128 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, 129 const char *prefix) 130 { 131 u32 *regs; 132 size_t pos; 133 134 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */ 135 return -EINVAL; 136 137 regs = kzalloc(len, GFP_ATOMIC); 138 if (!regs) 139 return -ENOMEM; 140 141 for (pos = 0; pos < len; pos += 4) { 142 if (offset == 0 && 143 pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER && 144 pos <= REG_UIC_ERROR_CODE_DME) 145 continue; 146 regs[pos / 4] = ufshcd_readl(hba, offset + pos); 147 } 148 149 ufshcd_hex_dump(prefix, regs, len); 150 kfree(regs); 151 152 return 0; 153 } 154 EXPORT_SYMBOL_GPL(ufshcd_dump_regs); 155 156 enum { 157 UFSHCD_MAX_CHANNEL = 0, 158 UFSHCD_MAX_ID = 1, 159 UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED, 160 UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED, 161 }; 162 163 static const char *const ufshcd_state_name[] = { 164 [UFSHCD_STATE_RESET] = "reset", 165 [UFSHCD_STATE_OPERATIONAL] = "operational", 166 [UFSHCD_STATE_ERROR] = "error", 167 [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal", 168 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal", 169 }; 170 171 /* UFSHCD error handling flags */ 172 enum { 173 UFSHCD_EH_IN_PROGRESS = (1 << 0), 174 }; 175 176 /* UFSHCD UIC layer error flags */ 177 enum { 178 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ 179 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */ 180 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */ 181 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */ 182 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */ 183 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */ 184 UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */ 185 }; 186 187 #define ufshcd_set_eh_in_progress(h) \ 188 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS) 189 #define ufshcd_eh_in_progress(h) \ 190 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS) 191 #define ufshcd_clear_eh_in_progress(h) \ 192 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) 193 194 const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { 195 [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE}, 196 [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE}, 197 [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE}, 198 [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE}, 199 [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE}, 200 [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE}, 201 /* 202 * For DeepSleep, the link is first put in hibern8 and then off. 203 * Leaving the link in hibern8 is not supported. 204 */ 205 [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE}, 206 }; 207 208 static inline enum ufs_dev_pwr_mode 209 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl) 210 { 211 return ufs_pm_lvl_states[lvl].dev_state; 212 } 213 214 static inline enum uic_link_state 215 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) 216 { 217 return ufs_pm_lvl_states[lvl].link_state; 218 } 219 220 static inline enum ufs_pm_level 221 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state, 222 enum uic_link_state link_state) 223 { 224 enum ufs_pm_level lvl; 225 226 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) { 227 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) && 228 (ufs_pm_lvl_states[lvl].link_state == link_state)) 229 return lvl; 230 } 231 232 /* if no match found, return the level 0 */ 233 return UFS_PM_LVL_0; 234 } 235 236 static const struct ufs_dev_quirk ufs_fixups[] = { 237 /* UFS cards deviations table */ 238 { .wmanufacturerid = UFS_VENDOR_MICRON, 239 .model = UFS_ANY_MODEL, 240 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, 241 { .wmanufacturerid = UFS_VENDOR_SAMSUNG, 242 .model = UFS_ANY_MODEL, 243 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | 244 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE | 245 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS }, 246 { .wmanufacturerid = UFS_VENDOR_SKHYNIX, 247 .model = UFS_ANY_MODEL, 248 .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME }, 249 { .wmanufacturerid = UFS_VENDOR_SKHYNIX, 250 .model = "hB8aL1" /*H28U62301AMR*/, 251 .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME }, 252 { .wmanufacturerid = UFS_VENDOR_TOSHIBA, 253 .model = UFS_ANY_MODEL, 254 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, 255 { .wmanufacturerid = UFS_VENDOR_TOSHIBA, 256 .model = "THGLF2G9C8KBADG", 257 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE }, 258 { .wmanufacturerid = UFS_VENDOR_TOSHIBA, 259 .model = "THGLF2G9D8KBADG", 260 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE }, 261 {} 262 }; 263 264 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba); 265 static void ufshcd_async_scan(void *data, async_cookie_t cookie); 266 static int ufshcd_reset_and_restore(struct ufs_hba *hba); 267 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); 268 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); 269 static void ufshcd_hba_exit(struct ufs_hba *hba); 270 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params); 271 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); 272 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); 273 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); 274 static void ufshcd_resume_clkscaling(struct ufs_hba *hba); 275 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); 276 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba); 277 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); 278 static irqreturn_t ufshcd_intr(int irq, void *__hba); 279 static int ufshcd_change_power_mode(struct ufs_hba *hba, 280 struct ufs_pa_layer_attr *pwr_mode); 281 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); 282 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); 283 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 284 struct ufs_vreg *vreg); 285 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba, 286 bool enable); 287 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba); 288 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba); 289 290 static inline void ufshcd_enable_irq(struct ufs_hba *hba) 291 { 292 if (!hba->is_irq_enabled) { 293 enable_irq(hba->irq); 294 hba->is_irq_enabled = true; 295 } 296 } 297 298 static inline void ufshcd_disable_irq(struct ufs_hba *hba) 299 { 300 if (hba->is_irq_enabled) { 301 disable_irq(hba->irq); 302 hba->is_irq_enabled = false; 303 } 304 } 305 306 static void ufshcd_configure_wb(struct ufs_hba *hba) 307 { 308 if (!ufshcd_is_wb_allowed(hba)) 309 return; 310 311 ufshcd_wb_toggle(hba, true); 312 313 ufshcd_wb_toggle_buf_flush_during_h8(hba, true); 314 315 if (ufshcd_is_wb_buf_flush_allowed(hba)) 316 ufshcd_wb_toggle_buf_flush(hba, true); 317 } 318 319 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) 320 { 321 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) 322 scsi_unblock_requests(hba->host); 323 } 324 325 static void ufshcd_scsi_block_requests(struct ufs_hba *hba) 326 { 327 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) 328 scsi_block_requests(hba->host); 329 } 330 331 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, 332 enum ufs_trace_str_t str_t) 333 { 334 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; 335 struct utp_upiu_header *header; 336 337 if (!trace_ufshcd_upiu_enabled()) 338 return; 339 340 if (str_t == UFS_CMD_SEND) 341 header = &rq->header; 342 else 343 header = &hba->lrb[tag].ucd_rsp_ptr->header; 344 345 trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb, 346 UFS_TSF_CDB); 347 } 348 349 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, 350 enum ufs_trace_str_t str_t, 351 struct utp_upiu_req *rq_rsp) 352 { 353 if (!trace_ufshcd_upiu_enabled()) 354 return; 355 356 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header, 357 &rq_rsp->qr, UFS_TSF_OSF); 358 } 359 360 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, 361 enum ufs_trace_str_t str_t) 362 { 363 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag]; 364 365 if (!trace_ufshcd_upiu_enabled()) 366 return; 367 368 if (str_t == UFS_TM_SEND) 369 trace_ufshcd_upiu(dev_name(hba->dev), str_t, 370 &descp->upiu_req.req_header, 371 &descp->upiu_req.input_param1, 372 UFS_TSF_TM_INPUT); 373 else 374 trace_ufshcd_upiu(dev_name(hba->dev), str_t, 375 &descp->upiu_rsp.rsp_header, 376 &descp->upiu_rsp.output_param1, 377 UFS_TSF_TM_OUTPUT); 378 } 379 380 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, 381 const struct uic_command *ucmd, 382 enum ufs_trace_str_t str_t) 383 { 384 u32 cmd; 385 386 if (!trace_ufshcd_uic_command_enabled()) 387 return; 388 389 if (str_t == UFS_CMD_SEND) 390 cmd = ucmd->command; 391 else 392 cmd = ufshcd_readl(hba, REG_UIC_COMMAND); 393 394 trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd, 395 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1), 396 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2), 397 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3)); 398 } 399 400 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, 401 enum ufs_trace_str_t str_t) 402 { 403 u64 lba = 0; 404 u8 opcode = 0, group_id = 0; 405 u32 doorbell = 0; 406 u32 intr; 407 int hwq_id = -1; 408 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; 409 struct scsi_cmnd *cmd = lrbp->cmd; 410 struct request *rq = scsi_cmd_to_rq(cmd); 411 int transfer_len = -1; 412 413 if (!cmd) 414 return; 415 416 /* trace UPIU also */ 417 ufshcd_add_cmd_upiu_trace(hba, tag, str_t); 418 if (!trace_ufshcd_command_enabled()) 419 return; 420 421 opcode = cmd->cmnd[0]; 422 423 if (opcode == READ_10 || opcode == WRITE_10) { 424 /* 425 * Currently we only fully trace read(10) and write(10) commands 426 */ 427 transfer_len = 428 be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len); 429 lba = scsi_get_lba(cmd); 430 if (opcode == WRITE_10) 431 group_id = lrbp->cmd->cmnd[6]; 432 } else if (opcode == UNMAP) { 433 /* 434 * The number of Bytes to be unmapped beginning with the lba. 435 */ 436 transfer_len = blk_rq_bytes(rq); 437 lba = scsi_get_lba(cmd); 438 } 439 440 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 441 442 if (is_mcq_enabled(hba)) { 443 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq); 444 445 hwq_id = hwq->id; 446 } else { 447 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 448 } 449 trace_ufshcd_command(dev_name(hba->dev), str_t, tag, 450 doorbell, hwq_id, transfer_len, intr, lba, opcode, group_id); 451 } 452 453 static void ufshcd_print_clk_freqs(struct ufs_hba *hba) 454 { 455 struct ufs_clk_info *clki; 456 struct list_head *head = &hba->clk_list_head; 457 458 if (list_empty(head)) 459 return; 460 461 list_for_each_entry(clki, head, list) { 462 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq && 463 clki->max_freq) 464 dev_err(hba->dev, "clk: %s, rate: %u\n", 465 clki->name, clki->curr_freq); 466 } 467 } 468 469 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id, 470 const char *err_name) 471 { 472 int i; 473 bool found = false; 474 const struct ufs_event_hist *e; 475 476 if (id >= UFS_EVT_CNT) 477 return; 478 479 e = &hba->ufs_stats.event[id]; 480 481 for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) { 482 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH; 483 484 if (e->tstamp[p] == 0) 485 continue; 486 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, 487 e->val[p], div_u64(e->tstamp[p], 1000)); 488 found = true; 489 } 490 491 if (!found) 492 dev_err(hba->dev, "No record of %s\n", err_name); 493 else 494 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt); 495 } 496 497 static void ufshcd_print_evt_hist(struct ufs_hba *hba) 498 { 499 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); 500 501 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err"); 502 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err"); 503 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err"); 504 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err"); 505 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err"); 506 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR, 507 "auto_hibern8_err"); 508 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err"); 509 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL, 510 "link_startup_fail"); 511 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail"); 512 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR, 513 "suspend_fail"); 514 ufshcd_print_evt(hba, UFS_EVT_WL_RES_ERR, "wlun resume_fail"); 515 ufshcd_print_evt(hba, UFS_EVT_WL_SUSP_ERR, 516 "wlun suspend_fail"); 517 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset"); 518 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset"); 519 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort"); 520 521 ufshcd_vops_dbg_register_dump(hba); 522 } 523 524 static 525 void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt) 526 { 527 const struct ufshcd_lrb *lrbp; 528 int prdt_length; 529 530 lrbp = &hba->lrb[tag]; 531 532 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", 533 tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000)); 534 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", 535 tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000)); 536 dev_err(hba->dev, 537 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n", 538 tag, (u64)lrbp->utrd_dma_addr); 539 540 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr, 541 sizeof(struct utp_transfer_req_desc)); 542 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag, 543 (u64)lrbp->ucd_req_dma_addr); 544 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr, 545 sizeof(struct utp_upiu_req)); 546 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag, 547 (u64)lrbp->ucd_rsp_dma_addr); 548 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr, 549 sizeof(struct utp_upiu_rsp)); 550 551 prdt_length = le16_to_cpu( 552 lrbp->utr_descriptor_ptr->prd_table_length); 553 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) 554 prdt_length /= ufshcd_sg_entry_size(hba); 555 556 dev_err(hba->dev, 557 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n", 558 tag, prdt_length, 559 (u64)lrbp->ucd_prdt_dma_addr); 560 561 if (pr_prdt) 562 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr, 563 ufshcd_sg_entry_size(hba) * prdt_length); 564 } 565 566 static bool ufshcd_print_tr_iter(struct request *req, void *priv) 567 { 568 struct scsi_device *sdev = req->q->queuedata; 569 struct Scsi_Host *shost = sdev->host; 570 struct ufs_hba *hba = shost_priv(shost); 571 572 ufshcd_print_tr(hba, req->tag, *(bool *)priv); 573 574 return true; 575 } 576 577 /** 578 * ufshcd_print_trs_all - print trs for all started requests. 579 * @hba: per-adapter instance. 580 * @pr_prdt: need to print prdt or not. 581 */ 582 static void ufshcd_print_trs_all(struct ufs_hba *hba, bool pr_prdt) 583 { 584 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_print_tr_iter, &pr_prdt); 585 } 586 587 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) 588 { 589 int tag; 590 591 for_each_set_bit(tag, &bitmap, hba->nutmrs) { 592 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag]; 593 594 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag); 595 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp)); 596 } 597 } 598 599 static void ufshcd_print_host_state(struct ufs_hba *hba) 600 { 601 const struct scsi_device *sdev_ufs = hba->ufs_device_wlun; 602 603 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); 604 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", 605 hba->outstanding_reqs, hba->outstanding_tasks); 606 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", 607 hba->saved_err, hba->saved_uic_err); 608 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", 609 hba->curr_dev_pwr_mode, hba->uic_link_state); 610 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n", 611 hba->pm_op_in_progress, hba->is_sys_suspended); 612 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n", 613 hba->auto_bkops_enabled, hba->host->host_self_blocked); 614 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state); 615 dev_err(hba->dev, 616 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n", 617 div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000), 618 hba->ufs_stats.hibern8_exit_cnt); 619 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n", 620 div_u64(hba->ufs_stats.last_intr_ts, 1000), 621 hba->ufs_stats.last_intr_status); 622 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", 623 hba->eh_flags, hba->req_abort_count); 624 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n", 625 hba->ufs_version, hba->capabilities, hba->caps); 626 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks, 627 hba->dev_quirks); 628 if (sdev_ufs) 629 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n", 630 sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev); 631 632 ufshcd_print_clk_freqs(hba); 633 } 634 635 /** 636 * ufshcd_print_pwr_info - print power params as saved in hba 637 * power info 638 * @hba: per-adapter instance 639 */ 640 static void ufshcd_print_pwr_info(struct ufs_hba *hba) 641 { 642 static const char * const names[] = { 643 "INVALID MODE", 644 "FAST MODE", 645 "SLOW_MODE", 646 "INVALID MODE", 647 "FASTAUTO_MODE", 648 "SLOWAUTO_MODE", 649 "INVALID MODE", 650 }; 651 652 /* 653 * Using dev_dbg to avoid messages during runtime PM to avoid 654 * never-ending cycles of messages written back to storage by user space 655 * causing runtime resume, causing more messages and so on. 656 */ 657 dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", 658 __func__, 659 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, 660 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, 661 names[hba->pwr_info.pwr_rx], 662 names[hba->pwr_info.pwr_tx], 663 hba->pwr_info.hs_rate); 664 } 665 666 static void ufshcd_device_reset(struct ufs_hba *hba) 667 { 668 int err; 669 670 err = ufshcd_vops_device_reset(hba); 671 672 if (!err) { 673 ufshcd_set_ufs_dev_active(hba); 674 if (ufshcd_is_wb_allowed(hba)) { 675 hba->dev_info.wb_enabled = false; 676 hba->dev_info.wb_buf_flush_enabled = false; 677 } 678 } 679 if (err != -EOPNOTSUPP) 680 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err); 681 } 682 683 void ufshcd_delay_us(unsigned long us, unsigned long tolerance) 684 { 685 if (!us) 686 return; 687 688 if (us < 10) 689 udelay(us); 690 else 691 usleep_range(us, us + tolerance); 692 } 693 EXPORT_SYMBOL_GPL(ufshcd_delay_us); 694 695 /** 696 * ufshcd_wait_for_register - wait for register value to change 697 * @hba: per-adapter interface 698 * @reg: mmio register offset 699 * @mask: mask to apply to the read register value 700 * @val: value to wait for 701 * @interval_us: polling interval in microseconds 702 * @timeout_ms: timeout in milliseconds 703 * 704 * Return: -ETIMEDOUT on error, zero on success. 705 */ 706 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, 707 u32 val, unsigned long interval_us, 708 unsigned long timeout_ms) 709 { 710 int err = 0; 711 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); 712 713 /* ignore bits that we don't intend to wait on */ 714 val = val & mask; 715 716 while ((ufshcd_readl(hba, reg) & mask) != val) { 717 usleep_range(interval_us, interval_us + 50); 718 if (time_after(jiffies, timeout)) { 719 if ((ufshcd_readl(hba, reg) & mask) != val) 720 err = -ETIMEDOUT; 721 break; 722 } 723 } 724 725 return err; 726 } 727 728 /** 729 * ufshcd_get_intr_mask - Get the interrupt bit mask 730 * @hba: Pointer to adapter instance 731 * 732 * Return: interrupt bit mask per version 733 */ 734 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) 735 { 736 if (hba->ufs_version == ufshci_version(1, 0)) 737 return INTERRUPT_MASK_ALL_VER_10; 738 if (hba->ufs_version <= ufshci_version(2, 0)) 739 return INTERRUPT_MASK_ALL_VER_11; 740 741 return INTERRUPT_MASK_ALL_VER_21; 742 } 743 744 /** 745 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA 746 * @hba: Pointer to adapter instance 747 * 748 * Return: UFSHCI version supported by the controller 749 */ 750 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) 751 { 752 u32 ufshci_ver; 753 754 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) 755 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba); 756 else 757 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION); 758 759 /* 760 * UFSHCI v1.x uses a different version scheme, in order 761 * to allow the use of comparisons with the ufshci_version 762 * function, we convert it to the same scheme as ufs 2.0+. 763 */ 764 if (ufshci_ver & 0x00010000) 765 return ufshci_version(1, ufshci_ver & 0x00000100); 766 767 return ufshci_ver; 768 } 769 770 /** 771 * ufshcd_is_device_present - Check if any device connected to 772 * the host controller 773 * @hba: pointer to adapter instance 774 * 775 * Return: true if device present, false if no device detected 776 */ 777 static inline bool ufshcd_is_device_present(struct ufs_hba *hba) 778 { 779 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT; 780 } 781 782 /** 783 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status 784 * @lrbp: pointer to local command reference block 785 * @cqe: pointer to the completion queue entry 786 * 787 * This function is used to get the OCS field from UTRD 788 * 789 * Return: the OCS field in the UTRD. 790 */ 791 static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp, 792 struct cq_entry *cqe) 793 { 794 if (cqe) 795 return le32_to_cpu(cqe->status) & MASK_OCS; 796 797 return lrbp->utr_descriptor_ptr->header.ocs & MASK_OCS; 798 } 799 800 /** 801 * ufshcd_utrl_clear() - Clear requests from the controller request list. 802 * @hba: per adapter instance 803 * @mask: mask with one bit set for each request to be cleared 804 */ 805 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask) 806 { 807 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) 808 mask = ~mask; 809 /* 810 * From the UFSHCI specification: "UTP Transfer Request List CLear 811 * Register (UTRLCLR): This field is bit significant. Each bit 812 * corresponds to a slot in the UTP Transfer Request List, where bit 0 813 * corresponds to request slot 0. A bit in this field is set to ‘0’ 814 * by host software to indicate to the host controller that a transfer 815 * request slot is cleared. The host controller 816 * shall free up any resources associated to the request slot 817 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The 818 * host software indicates no change to request slots by setting the 819 * associated bits in this field to ‘1’. Bits in this field shall only 820 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’." 821 */ 822 ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR); 823 } 824 825 /** 826 * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register 827 * @hba: per adapter instance 828 * @pos: position of the bit to be cleared 829 */ 830 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos) 831 { 832 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) 833 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); 834 else 835 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); 836 } 837 838 /** 839 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY 840 * @reg: Register value of host controller status 841 * 842 * Return: 0 on success; a positive value if failed. 843 */ 844 static inline int ufshcd_get_lists_status(u32 reg) 845 { 846 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY); 847 } 848 849 /** 850 * ufshcd_get_uic_cmd_result - Get the UIC command result 851 * @hba: Pointer to adapter instance 852 * 853 * This function gets the result of UIC command completion 854 * 855 * Return: 0 on success; non-zero value on error. 856 */ 857 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) 858 { 859 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & 860 MASK_UIC_COMMAND_RESULT; 861 } 862 863 /** 864 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command 865 * @hba: Pointer to adapter instance 866 * 867 * This function gets UIC command argument3 868 * 869 * Return: 0 on success; non-zero value on error. 870 */ 871 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) 872 { 873 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); 874 } 875 876 /** 877 * ufshcd_get_req_rsp - returns the TR response transaction type 878 * @ucd_rsp_ptr: pointer to response UPIU 879 * 880 * Return: UPIU type. 881 */ 882 static inline enum upiu_response_transaction 883 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) 884 { 885 return ucd_rsp_ptr->header.transaction_code; 886 } 887 888 /** 889 * ufshcd_is_exception_event - Check if the device raised an exception event 890 * @ucd_rsp_ptr: pointer to response UPIU 891 * 892 * The function checks if the device raised an exception event indicated in 893 * the Device Information field of response UPIU. 894 * 895 * Return: true if exception is raised, false otherwise. 896 */ 897 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr) 898 { 899 return ucd_rsp_ptr->header.device_information & 1; 900 } 901 902 /** 903 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values. 904 * @hba: per adapter instance 905 */ 906 static inline void 907 ufshcd_reset_intr_aggr(struct ufs_hba *hba) 908 { 909 ufshcd_writel(hba, INT_AGGR_ENABLE | 910 INT_AGGR_COUNTER_AND_TIMER_RESET, 911 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 912 } 913 914 /** 915 * ufshcd_config_intr_aggr - Configure interrupt aggregation values. 916 * @hba: per adapter instance 917 * @cnt: Interrupt aggregation counter threshold 918 * @tmout: Interrupt aggregation timeout value 919 */ 920 static inline void 921 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) 922 { 923 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | 924 INT_AGGR_COUNTER_THLD_VAL(cnt) | 925 INT_AGGR_TIMEOUT_VAL(tmout), 926 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 927 } 928 929 /** 930 * ufshcd_disable_intr_aggr - Disables interrupt aggregation. 931 * @hba: per adapter instance 932 */ 933 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) 934 { 935 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 936 } 937 938 /** 939 * ufshcd_enable_run_stop_reg - Enable run-stop registers, 940 * When run-stop registers are set to 1, it indicates the 941 * host controller that it can process the requests 942 * @hba: per adapter instance 943 */ 944 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) 945 { 946 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, 947 REG_UTP_TASK_REQ_LIST_RUN_STOP); 948 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, 949 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); 950 } 951 952 /** 953 * ufshcd_hba_start - Start controller initialization sequence 954 * @hba: per adapter instance 955 */ 956 static inline void ufshcd_hba_start(struct ufs_hba *hba) 957 { 958 u32 val = CONTROLLER_ENABLE; 959 960 if (ufshcd_crypto_enable(hba)) 961 val |= CRYPTO_GENERAL_ENABLE; 962 963 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE); 964 } 965 966 /** 967 * ufshcd_is_hba_active - Get controller state 968 * @hba: per adapter instance 969 * 970 * Return: true if and only if the controller is active. 971 */ 972 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) 973 { 974 return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE; 975 } 976 977 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) 978 { 979 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */ 980 if (hba->ufs_version <= ufshci_version(1, 1)) 981 return UFS_UNIPRO_VER_1_41; 982 else 983 return UFS_UNIPRO_VER_1_6; 984 } 985 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver); 986 987 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) 988 { 989 /* 990 * If both host and device support UniPro ver1.6 or later, PA layer 991 * parameters tuning happens during link startup itself. 992 * 993 * We can manually tune PA layer parameters if either host or device 994 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning 995 * logic simple, we will only do manual tuning if local unipro version 996 * doesn't support ver1.6 or later. 997 */ 998 return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6; 999 } 1000 1001 /** 1002 * ufshcd_set_clk_freq - set UFS controller clock frequencies 1003 * @hba: per adapter instance 1004 * @scale_up: If True, set max possible frequency othewise set low frequency 1005 * 1006 * Return: 0 if successful; < 0 upon failure. 1007 */ 1008 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up) 1009 { 1010 int ret = 0; 1011 struct ufs_clk_info *clki; 1012 struct list_head *head = &hba->clk_list_head; 1013 1014 if (list_empty(head)) 1015 goto out; 1016 1017 list_for_each_entry(clki, head, list) { 1018 if (!IS_ERR_OR_NULL(clki->clk)) { 1019 if (scale_up && clki->max_freq) { 1020 if (clki->curr_freq == clki->max_freq) 1021 continue; 1022 1023 ret = clk_set_rate(clki->clk, clki->max_freq); 1024 if (ret) { 1025 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", 1026 __func__, clki->name, 1027 clki->max_freq, ret); 1028 break; 1029 } 1030 trace_ufshcd_clk_scaling(dev_name(hba->dev), 1031 "scaled up", clki->name, 1032 clki->curr_freq, 1033 clki->max_freq); 1034 1035 clki->curr_freq = clki->max_freq; 1036 1037 } else if (!scale_up && clki->min_freq) { 1038 if (clki->curr_freq == clki->min_freq) 1039 continue; 1040 1041 ret = clk_set_rate(clki->clk, clki->min_freq); 1042 if (ret) { 1043 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", 1044 __func__, clki->name, 1045 clki->min_freq, ret); 1046 break; 1047 } 1048 trace_ufshcd_clk_scaling(dev_name(hba->dev), 1049 "scaled down", clki->name, 1050 clki->curr_freq, 1051 clki->min_freq); 1052 clki->curr_freq = clki->min_freq; 1053 } 1054 } 1055 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, 1056 clki->name, clk_get_rate(clki->clk)); 1057 } 1058 1059 out: 1060 return ret; 1061 } 1062 1063 /** 1064 * ufshcd_scale_clks - scale up or scale down UFS controller clocks 1065 * @hba: per adapter instance 1066 * @scale_up: True if scaling up and false if scaling down 1067 * 1068 * Return: 0 if successful; < 0 upon failure. 1069 */ 1070 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) 1071 { 1072 int ret = 0; 1073 ktime_t start = ktime_get(); 1074 1075 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); 1076 if (ret) 1077 goto out; 1078 1079 ret = ufshcd_set_clk_freq(hba, scale_up); 1080 if (ret) 1081 goto out; 1082 1083 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); 1084 if (ret) 1085 ufshcd_set_clk_freq(hba, !scale_up); 1086 1087 out: 1088 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), 1089 (scale_up ? "up" : "down"), 1090 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 1091 return ret; 1092 } 1093 1094 /** 1095 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not 1096 * @hba: per adapter instance 1097 * @scale_up: True if scaling up and false if scaling down 1098 * 1099 * Return: true if scaling is required, false otherwise. 1100 */ 1101 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, 1102 bool scale_up) 1103 { 1104 struct ufs_clk_info *clki; 1105 struct list_head *head = &hba->clk_list_head; 1106 1107 if (list_empty(head)) 1108 return false; 1109 1110 list_for_each_entry(clki, head, list) { 1111 if (!IS_ERR_OR_NULL(clki->clk)) { 1112 if (scale_up && clki->max_freq) { 1113 if (clki->curr_freq == clki->max_freq) 1114 continue; 1115 return true; 1116 } else if (!scale_up && clki->min_freq) { 1117 if (clki->curr_freq == clki->min_freq) 1118 continue; 1119 return true; 1120 } 1121 } 1122 } 1123 1124 return false; 1125 } 1126 1127 /* 1128 * Determine the number of pending commands by counting the bits in the SCSI 1129 * device budget maps. This approach has been selected because a bit is set in 1130 * the budget map before scsi_host_queue_ready() checks the host_self_blocked 1131 * flag. The host_self_blocked flag can be modified by calling 1132 * scsi_block_requests() or scsi_unblock_requests(). 1133 */ 1134 static u32 ufshcd_pending_cmds(struct ufs_hba *hba) 1135 { 1136 const struct scsi_device *sdev; 1137 u32 pending = 0; 1138 1139 lockdep_assert_held(hba->host->host_lock); 1140 __shost_for_each_device(sdev, hba->host) 1141 pending += sbitmap_weight(&sdev->budget_map); 1142 1143 return pending; 1144 } 1145 1146 /* 1147 * Wait until all pending SCSI commands and TMFs have finished or the timeout 1148 * has expired. 1149 * 1150 * Return: 0 upon success; -EBUSY upon timeout. 1151 */ 1152 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, 1153 u64 wait_timeout_us) 1154 { 1155 unsigned long flags; 1156 int ret = 0; 1157 u32 tm_doorbell; 1158 u32 tr_pending; 1159 bool timeout = false, do_last_check = false; 1160 ktime_t start; 1161 1162 ufshcd_hold(hba); 1163 spin_lock_irqsave(hba->host->host_lock, flags); 1164 /* 1165 * Wait for all the outstanding tasks/transfer requests. 1166 * Verify by checking the doorbell registers are clear. 1167 */ 1168 start = ktime_get(); 1169 do { 1170 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { 1171 ret = -EBUSY; 1172 goto out; 1173 } 1174 1175 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); 1176 tr_pending = ufshcd_pending_cmds(hba); 1177 if (!tm_doorbell && !tr_pending) { 1178 timeout = false; 1179 break; 1180 } else if (do_last_check) { 1181 break; 1182 } 1183 1184 spin_unlock_irqrestore(hba->host->host_lock, flags); 1185 io_schedule_timeout(msecs_to_jiffies(20)); 1186 if (ktime_to_us(ktime_sub(ktime_get(), start)) > 1187 wait_timeout_us) { 1188 timeout = true; 1189 /* 1190 * We might have scheduled out for long time so make 1191 * sure to check if doorbells are cleared by this time 1192 * or not. 1193 */ 1194 do_last_check = true; 1195 } 1196 spin_lock_irqsave(hba->host->host_lock, flags); 1197 } while (tm_doorbell || tr_pending); 1198 1199 if (timeout) { 1200 dev_err(hba->dev, 1201 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n", 1202 __func__, tm_doorbell, tr_pending); 1203 ret = -EBUSY; 1204 } 1205 out: 1206 spin_unlock_irqrestore(hba->host->host_lock, flags); 1207 ufshcd_release(hba); 1208 return ret; 1209 } 1210 1211 /** 1212 * ufshcd_scale_gear - scale up/down UFS gear 1213 * @hba: per adapter instance 1214 * @scale_up: True for scaling up gear and false for scaling down 1215 * 1216 * Return: 0 for success; -EBUSY if scaling can't happen at this time; 1217 * non-zero for any other errors. 1218 */ 1219 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) 1220 { 1221 int ret = 0; 1222 struct ufs_pa_layer_attr new_pwr_info; 1223 1224 if (scale_up) { 1225 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info, 1226 sizeof(struct ufs_pa_layer_attr)); 1227 } else { 1228 memcpy(&new_pwr_info, &hba->pwr_info, 1229 sizeof(struct ufs_pa_layer_attr)); 1230 1231 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear || 1232 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) { 1233 /* save the current power mode */ 1234 memcpy(&hba->clk_scaling.saved_pwr_info, 1235 &hba->pwr_info, 1236 sizeof(struct ufs_pa_layer_attr)); 1237 1238 /* scale down gear */ 1239 new_pwr_info.gear_tx = hba->clk_scaling.min_gear; 1240 new_pwr_info.gear_rx = hba->clk_scaling.min_gear; 1241 } 1242 } 1243 1244 /* check if the power mode needs to be changed or not? */ 1245 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info); 1246 if (ret) 1247 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", 1248 __func__, ret, 1249 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx, 1250 new_pwr_info.gear_tx, new_pwr_info.gear_rx); 1251 1252 return ret; 1253 } 1254 1255 /* 1256 * Wait until all pending SCSI commands and TMFs have finished or the timeout 1257 * has expired. 1258 * 1259 * Return: 0 upon success; -EBUSY upon timeout. 1260 */ 1261 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) 1262 { 1263 int ret = 0; 1264 /* 1265 * make sure that there are no outstanding requests when 1266 * clock scaling is in progress 1267 */ 1268 ufshcd_scsi_block_requests(hba); 1269 mutex_lock(&hba->wb_mutex); 1270 down_write(&hba->clk_scaling_lock); 1271 1272 if (!hba->clk_scaling.is_allowed || 1273 ufshcd_wait_for_doorbell_clr(hba, timeout_us)) { 1274 ret = -EBUSY; 1275 up_write(&hba->clk_scaling_lock); 1276 mutex_unlock(&hba->wb_mutex); 1277 ufshcd_scsi_unblock_requests(hba); 1278 goto out; 1279 } 1280 1281 /* let's not get into low power until clock scaling is completed */ 1282 ufshcd_hold(hba); 1283 1284 out: 1285 return ret; 1286 } 1287 1288 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up) 1289 { 1290 up_write(&hba->clk_scaling_lock); 1291 1292 /* Enable Write Booster if we have scaled up else disable it */ 1293 if (ufshcd_enable_wb_if_scaling_up(hba) && !err) 1294 ufshcd_wb_toggle(hba, scale_up); 1295 1296 mutex_unlock(&hba->wb_mutex); 1297 1298 ufshcd_scsi_unblock_requests(hba); 1299 ufshcd_release(hba); 1300 } 1301 1302 /** 1303 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear 1304 * @hba: per adapter instance 1305 * @scale_up: True for scaling up and false for scalin down 1306 * 1307 * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero 1308 * for any other errors. 1309 */ 1310 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) 1311 { 1312 int ret = 0; 1313 1314 ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC); 1315 if (ret) 1316 return ret; 1317 1318 /* scale down the gear before scaling down clocks */ 1319 if (!scale_up) { 1320 ret = ufshcd_scale_gear(hba, false); 1321 if (ret) 1322 goto out_unprepare; 1323 } 1324 1325 ret = ufshcd_scale_clks(hba, scale_up); 1326 if (ret) { 1327 if (!scale_up) 1328 ufshcd_scale_gear(hba, true); 1329 goto out_unprepare; 1330 } 1331 1332 /* scale up the gear after scaling up clocks */ 1333 if (scale_up) { 1334 ret = ufshcd_scale_gear(hba, true); 1335 if (ret) { 1336 ufshcd_scale_clks(hba, false); 1337 goto out_unprepare; 1338 } 1339 } 1340 1341 out_unprepare: 1342 ufshcd_clock_scaling_unprepare(hba, ret, scale_up); 1343 return ret; 1344 } 1345 1346 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work) 1347 { 1348 struct ufs_hba *hba = container_of(work, struct ufs_hba, 1349 clk_scaling.suspend_work); 1350 unsigned long irq_flags; 1351 1352 spin_lock_irqsave(hba->host->host_lock, irq_flags); 1353 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) { 1354 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1355 return; 1356 } 1357 hba->clk_scaling.is_suspended = true; 1358 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1359 1360 __ufshcd_suspend_clkscaling(hba); 1361 } 1362 1363 static void ufshcd_clk_scaling_resume_work(struct work_struct *work) 1364 { 1365 struct ufs_hba *hba = container_of(work, struct ufs_hba, 1366 clk_scaling.resume_work); 1367 unsigned long irq_flags; 1368 1369 spin_lock_irqsave(hba->host->host_lock, irq_flags); 1370 if (!hba->clk_scaling.is_suspended) { 1371 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1372 return; 1373 } 1374 hba->clk_scaling.is_suspended = false; 1375 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1376 1377 devfreq_resume_device(hba->devfreq); 1378 } 1379 1380 static int ufshcd_devfreq_target(struct device *dev, 1381 unsigned long *freq, u32 flags) 1382 { 1383 int ret = 0; 1384 struct ufs_hba *hba = dev_get_drvdata(dev); 1385 ktime_t start; 1386 bool scale_up, sched_clk_scaling_suspend_work = false; 1387 struct list_head *clk_list = &hba->clk_list_head; 1388 struct ufs_clk_info *clki; 1389 unsigned long irq_flags; 1390 1391 if (!ufshcd_is_clkscaling_supported(hba)) 1392 return -EINVAL; 1393 1394 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); 1395 /* Override with the closest supported frequency */ 1396 *freq = (unsigned long) clk_round_rate(clki->clk, *freq); 1397 spin_lock_irqsave(hba->host->host_lock, irq_flags); 1398 if (ufshcd_eh_in_progress(hba)) { 1399 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1400 return 0; 1401 } 1402 1403 if (!hba->clk_scaling.active_reqs) 1404 sched_clk_scaling_suspend_work = true; 1405 1406 if (list_empty(clk_list)) { 1407 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1408 goto out; 1409 } 1410 1411 /* Decide based on the rounded-off frequency and update */ 1412 scale_up = *freq == clki->max_freq; 1413 if (!scale_up) 1414 *freq = clki->min_freq; 1415 /* Update the frequency */ 1416 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) { 1417 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1418 ret = 0; 1419 goto out; /* no state change required */ 1420 } 1421 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1422 1423 start = ktime_get(); 1424 ret = ufshcd_devfreq_scale(hba, scale_up); 1425 1426 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), 1427 (scale_up ? "up" : "down"), 1428 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 1429 1430 out: 1431 if (sched_clk_scaling_suspend_work) 1432 queue_work(hba->clk_scaling.workq, 1433 &hba->clk_scaling.suspend_work); 1434 1435 return ret; 1436 } 1437 1438 static int ufshcd_devfreq_get_dev_status(struct device *dev, 1439 struct devfreq_dev_status *stat) 1440 { 1441 struct ufs_hba *hba = dev_get_drvdata(dev); 1442 struct ufs_clk_scaling *scaling = &hba->clk_scaling; 1443 unsigned long flags; 1444 struct list_head *clk_list = &hba->clk_list_head; 1445 struct ufs_clk_info *clki; 1446 ktime_t curr_t; 1447 1448 if (!ufshcd_is_clkscaling_supported(hba)) 1449 return -EINVAL; 1450 1451 memset(stat, 0, sizeof(*stat)); 1452 1453 spin_lock_irqsave(hba->host->host_lock, flags); 1454 curr_t = ktime_get(); 1455 if (!scaling->window_start_t) 1456 goto start_window; 1457 1458 clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1459 /* 1460 * If current frequency is 0, then the ondemand governor considers 1461 * there's no initial frequency set. And it always requests to set 1462 * to max. frequency. 1463 */ 1464 stat->current_frequency = clki->curr_freq; 1465 if (scaling->is_busy_started) 1466 scaling->tot_busy_t += ktime_us_delta(curr_t, 1467 scaling->busy_start_t); 1468 1469 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t); 1470 stat->busy_time = scaling->tot_busy_t; 1471 start_window: 1472 scaling->window_start_t = curr_t; 1473 scaling->tot_busy_t = 0; 1474 1475 if (scaling->active_reqs) { 1476 scaling->busy_start_t = curr_t; 1477 scaling->is_busy_started = true; 1478 } else { 1479 scaling->busy_start_t = 0; 1480 scaling->is_busy_started = false; 1481 } 1482 spin_unlock_irqrestore(hba->host->host_lock, flags); 1483 return 0; 1484 } 1485 1486 static int ufshcd_devfreq_init(struct ufs_hba *hba) 1487 { 1488 struct list_head *clk_list = &hba->clk_list_head; 1489 struct ufs_clk_info *clki; 1490 struct devfreq *devfreq; 1491 int ret; 1492 1493 /* Skip devfreq if we don't have any clocks in the list */ 1494 if (list_empty(clk_list)) 1495 return 0; 1496 1497 clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1498 dev_pm_opp_add(hba->dev, clki->min_freq, 0); 1499 dev_pm_opp_add(hba->dev, clki->max_freq, 0); 1500 1501 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile, 1502 &hba->vps->ondemand_data); 1503 devfreq = devfreq_add_device(hba->dev, 1504 &hba->vps->devfreq_profile, 1505 DEVFREQ_GOV_SIMPLE_ONDEMAND, 1506 &hba->vps->ondemand_data); 1507 if (IS_ERR(devfreq)) { 1508 ret = PTR_ERR(devfreq); 1509 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); 1510 1511 dev_pm_opp_remove(hba->dev, clki->min_freq); 1512 dev_pm_opp_remove(hba->dev, clki->max_freq); 1513 return ret; 1514 } 1515 1516 hba->devfreq = devfreq; 1517 1518 return 0; 1519 } 1520 1521 static void ufshcd_devfreq_remove(struct ufs_hba *hba) 1522 { 1523 struct list_head *clk_list = &hba->clk_list_head; 1524 struct ufs_clk_info *clki; 1525 1526 if (!hba->devfreq) 1527 return; 1528 1529 devfreq_remove_device(hba->devfreq); 1530 hba->devfreq = NULL; 1531 1532 clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1533 dev_pm_opp_remove(hba->dev, clki->min_freq); 1534 dev_pm_opp_remove(hba->dev, clki->max_freq); 1535 } 1536 1537 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba) 1538 { 1539 unsigned long flags; 1540 1541 devfreq_suspend_device(hba->devfreq); 1542 spin_lock_irqsave(hba->host->host_lock, flags); 1543 hba->clk_scaling.window_start_t = 0; 1544 spin_unlock_irqrestore(hba->host->host_lock, flags); 1545 } 1546 1547 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) 1548 { 1549 unsigned long flags; 1550 bool suspend = false; 1551 1552 cancel_work_sync(&hba->clk_scaling.suspend_work); 1553 cancel_work_sync(&hba->clk_scaling.resume_work); 1554 1555 spin_lock_irqsave(hba->host->host_lock, flags); 1556 if (!hba->clk_scaling.is_suspended) { 1557 suspend = true; 1558 hba->clk_scaling.is_suspended = true; 1559 } 1560 spin_unlock_irqrestore(hba->host->host_lock, flags); 1561 1562 if (suspend) 1563 __ufshcd_suspend_clkscaling(hba); 1564 } 1565 1566 static void ufshcd_resume_clkscaling(struct ufs_hba *hba) 1567 { 1568 unsigned long flags; 1569 bool resume = false; 1570 1571 spin_lock_irqsave(hba->host->host_lock, flags); 1572 if (hba->clk_scaling.is_suspended) { 1573 resume = true; 1574 hba->clk_scaling.is_suspended = false; 1575 } 1576 spin_unlock_irqrestore(hba->host->host_lock, flags); 1577 1578 if (resume) 1579 devfreq_resume_device(hba->devfreq); 1580 } 1581 1582 static ssize_t ufshcd_clkscale_enable_show(struct device *dev, 1583 struct device_attribute *attr, char *buf) 1584 { 1585 struct ufs_hba *hba = dev_get_drvdata(dev); 1586 1587 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled); 1588 } 1589 1590 static ssize_t ufshcd_clkscale_enable_store(struct device *dev, 1591 struct device_attribute *attr, const char *buf, size_t count) 1592 { 1593 struct ufs_hba *hba = dev_get_drvdata(dev); 1594 u32 value; 1595 int err = 0; 1596 1597 if (kstrtou32(buf, 0, &value)) 1598 return -EINVAL; 1599 1600 down(&hba->host_sem); 1601 if (!ufshcd_is_user_access_allowed(hba)) { 1602 err = -EBUSY; 1603 goto out; 1604 } 1605 1606 value = !!value; 1607 if (value == hba->clk_scaling.is_enabled) 1608 goto out; 1609 1610 ufshcd_rpm_get_sync(hba); 1611 ufshcd_hold(hba); 1612 1613 hba->clk_scaling.is_enabled = value; 1614 1615 if (value) { 1616 ufshcd_resume_clkscaling(hba); 1617 } else { 1618 ufshcd_suspend_clkscaling(hba); 1619 err = ufshcd_devfreq_scale(hba, true); 1620 if (err) 1621 dev_err(hba->dev, "%s: failed to scale clocks up %d\n", 1622 __func__, err); 1623 } 1624 1625 ufshcd_release(hba); 1626 ufshcd_rpm_put_sync(hba); 1627 out: 1628 up(&hba->host_sem); 1629 return err ? err : count; 1630 } 1631 1632 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba) 1633 { 1634 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; 1635 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; 1636 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); 1637 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; 1638 hba->clk_scaling.enable_attr.attr.mode = 0644; 1639 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) 1640 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); 1641 } 1642 1643 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba) 1644 { 1645 if (hba->clk_scaling.enable_attr.attr.name) 1646 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); 1647 } 1648 1649 static void ufshcd_init_clk_scaling(struct ufs_hba *hba) 1650 { 1651 char wq_name[sizeof("ufs_clkscaling_00")]; 1652 1653 if (!ufshcd_is_clkscaling_supported(hba)) 1654 return; 1655 1656 if (!hba->clk_scaling.min_gear) 1657 hba->clk_scaling.min_gear = UFS_HS_G1; 1658 1659 INIT_WORK(&hba->clk_scaling.suspend_work, 1660 ufshcd_clk_scaling_suspend_work); 1661 INIT_WORK(&hba->clk_scaling.resume_work, 1662 ufshcd_clk_scaling_resume_work); 1663 1664 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d", 1665 hba->host->host_no); 1666 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); 1667 1668 hba->clk_scaling.is_initialized = true; 1669 } 1670 1671 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) 1672 { 1673 if (!hba->clk_scaling.is_initialized) 1674 return; 1675 1676 ufshcd_remove_clk_scaling_sysfs(hba); 1677 destroy_workqueue(hba->clk_scaling.workq); 1678 ufshcd_devfreq_remove(hba); 1679 hba->clk_scaling.is_initialized = false; 1680 } 1681 1682 static void ufshcd_ungate_work(struct work_struct *work) 1683 { 1684 int ret; 1685 unsigned long flags; 1686 struct ufs_hba *hba = container_of(work, struct ufs_hba, 1687 clk_gating.ungate_work); 1688 1689 cancel_delayed_work_sync(&hba->clk_gating.gate_work); 1690 1691 spin_lock_irqsave(hba->host->host_lock, flags); 1692 if (hba->clk_gating.state == CLKS_ON) { 1693 spin_unlock_irqrestore(hba->host->host_lock, flags); 1694 return; 1695 } 1696 1697 spin_unlock_irqrestore(hba->host->host_lock, flags); 1698 ufshcd_hba_vreg_set_hpm(hba); 1699 ufshcd_setup_clocks(hba, true); 1700 1701 ufshcd_enable_irq(hba); 1702 1703 /* Exit from hibern8 */ 1704 if (ufshcd_can_hibern8_during_gating(hba)) { 1705 /* Prevent gating in this path */ 1706 hba->clk_gating.is_suspended = true; 1707 if (ufshcd_is_link_hibern8(hba)) { 1708 ret = ufshcd_uic_hibern8_exit(hba); 1709 if (ret) 1710 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", 1711 __func__, ret); 1712 else 1713 ufshcd_set_link_active(hba); 1714 } 1715 hba->clk_gating.is_suspended = false; 1716 } 1717 } 1718 1719 /** 1720 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release. 1721 * Also, exit from hibern8 mode and set the link as active. 1722 * @hba: per adapter instance 1723 */ 1724 void ufshcd_hold(struct ufs_hba *hba) 1725 { 1726 bool flush_result; 1727 unsigned long flags; 1728 1729 if (!ufshcd_is_clkgating_allowed(hba) || 1730 !hba->clk_gating.is_initialized) 1731 return; 1732 spin_lock_irqsave(hba->host->host_lock, flags); 1733 hba->clk_gating.active_reqs++; 1734 1735 start: 1736 switch (hba->clk_gating.state) { 1737 case CLKS_ON: 1738 /* 1739 * Wait for the ungate work to complete if in progress. 1740 * Though the clocks may be in ON state, the link could 1741 * still be in hibner8 state if hibern8 is allowed 1742 * during clock gating. 1743 * Make sure we exit hibern8 state also in addition to 1744 * clocks being ON. 1745 */ 1746 if (ufshcd_can_hibern8_during_gating(hba) && 1747 ufshcd_is_link_hibern8(hba)) { 1748 spin_unlock_irqrestore(hba->host->host_lock, flags); 1749 flush_result = flush_work(&hba->clk_gating.ungate_work); 1750 if (hba->clk_gating.is_suspended && !flush_result) 1751 return; 1752 spin_lock_irqsave(hba->host->host_lock, flags); 1753 goto start; 1754 } 1755 break; 1756 case REQ_CLKS_OFF: 1757 if (cancel_delayed_work(&hba->clk_gating.gate_work)) { 1758 hba->clk_gating.state = CLKS_ON; 1759 trace_ufshcd_clk_gating(dev_name(hba->dev), 1760 hba->clk_gating.state); 1761 break; 1762 } 1763 /* 1764 * If we are here, it means gating work is either done or 1765 * currently running. Hence, fall through to cancel gating 1766 * work and to enable clocks. 1767 */ 1768 fallthrough; 1769 case CLKS_OFF: 1770 hba->clk_gating.state = REQ_CLKS_ON; 1771 trace_ufshcd_clk_gating(dev_name(hba->dev), 1772 hba->clk_gating.state); 1773 queue_work(hba->clk_gating.clk_gating_workq, 1774 &hba->clk_gating.ungate_work); 1775 /* 1776 * fall through to check if we should wait for this 1777 * work to be done or not. 1778 */ 1779 fallthrough; 1780 case REQ_CLKS_ON: 1781 spin_unlock_irqrestore(hba->host->host_lock, flags); 1782 flush_work(&hba->clk_gating.ungate_work); 1783 /* Make sure state is CLKS_ON before returning */ 1784 spin_lock_irqsave(hba->host->host_lock, flags); 1785 goto start; 1786 default: 1787 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", 1788 __func__, hba->clk_gating.state); 1789 break; 1790 } 1791 spin_unlock_irqrestore(hba->host->host_lock, flags); 1792 } 1793 EXPORT_SYMBOL_GPL(ufshcd_hold); 1794 1795 static void ufshcd_gate_work(struct work_struct *work) 1796 { 1797 struct ufs_hba *hba = container_of(work, struct ufs_hba, 1798 clk_gating.gate_work.work); 1799 unsigned long flags; 1800 int ret; 1801 1802 spin_lock_irqsave(hba->host->host_lock, flags); 1803 /* 1804 * In case you are here to cancel this work the gating state 1805 * would be marked as REQ_CLKS_ON. In this case save time by 1806 * skipping the gating work and exit after changing the clock 1807 * state to CLKS_ON. 1808 */ 1809 if (hba->clk_gating.is_suspended || 1810 (hba->clk_gating.state != REQ_CLKS_OFF)) { 1811 hba->clk_gating.state = CLKS_ON; 1812 trace_ufshcd_clk_gating(dev_name(hba->dev), 1813 hba->clk_gating.state); 1814 goto rel_lock; 1815 } 1816 1817 if (hba->clk_gating.active_reqs 1818 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL 1819 || hba->outstanding_reqs || hba->outstanding_tasks 1820 || hba->active_uic_cmd || hba->uic_async_done) 1821 goto rel_lock; 1822 1823 spin_unlock_irqrestore(hba->host->host_lock, flags); 1824 1825 /* put the link into hibern8 mode before turning off clocks */ 1826 if (ufshcd_can_hibern8_during_gating(hba)) { 1827 ret = ufshcd_uic_hibern8_enter(hba); 1828 if (ret) { 1829 hba->clk_gating.state = CLKS_ON; 1830 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", 1831 __func__, ret); 1832 trace_ufshcd_clk_gating(dev_name(hba->dev), 1833 hba->clk_gating.state); 1834 goto out; 1835 } 1836 ufshcd_set_link_hibern8(hba); 1837 } 1838 1839 ufshcd_disable_irq(hba); 1840 1841 ufshcd_setup_clocks(hba, false); 1842 1843 /* Put the host controller in low power mode if possible */ 1844 ufshcd_hba_vreg_set_lpm(hba); 1845 /* 1846 * In case you are here to cancel this work the gating state 1847 * would be marked as REQ_CLKS_ON. In this case keep the state 1848 * as REQ_CLKS_ON which would anyway imply that clocks are off 1849 * and a request to turn them on is pending. By doing this way, 1850 * we keep the state machine in tact and this would ultimately 1851 * prevent from doing cancel work multiple times when there are 1852 * new requests arriving before the current cancel work is done. 1853 */ 1854 spin_lock_irqsave(hba->host->host_lock, flags); 1855 if (hba->clk_gating.state == REQ_CLKS_OFF) { 1856 hba->clk_gating.state = CLKS_OFF; 1857 trace_ufshcd_clk_gating(dev_name(hba->dev), 1858 hba->clk_gating.state); 1859 } 1860 rel_lock: 1861 spin_unlock_irqrestore(hba->host->host_lock, flags); 1862 out: 1863 return; 1864 } 1865 1866 /* host lock must be held before calling this variant */ 1867 static void __ufshcd_release(struct ufs_hba *hba) 1868 { 1869 if (!ufshcd_is_clkgating_allowed(hba)) 1870 return; 1871 1872 hba->clk_gating.active_reqs--; 1873 1874 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || 1875 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || 1876 hba->outstanding_tasks || !hba->clk_gating.is_initialized || 1877 hba->active_uic_cmd || hba->uic_async_done || 1878 hba->clk_gating.state == CLKS_OFF) 1879 return; 1880 1881 hba->clk_gating.state = REQ_CLKS_OFF; 1882 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); 1883 queue_delayed_work(hba->clk_gating.clk_gating_workq, 1884 &hba->clk_gating.gate_work, 1885 msecs_to_jiffies(hba->clk_gating.delay_ms)); 1886 } 1887 1888 void ufshcd_release(struct ufs_hba *hba) 1889 { 1890 unsigned long flags; 1891 1892 spin_lock_irqsave(hba->host->host_lock, flags); 1893 __ufshcd_release(hba); 1894 spin_unlock_irqrestore(hba->host->host_lock, flags); 1895 } 1896 EXPORT_SYMBOL_GPL(ufshcd_release); 1897 1898 static ssize_t ufshcd_clkgate_delay_show(struct device *dev, 1899 struct device_attribute *attr, char *buf) 1900 { 1901 struct ufs_hba *hba = dev_get_drvdata(dev); 1902 1903 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms); 1904 } 1905 1906 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value) 1907 { 1908 struct ufs_hba *hba = dev_get_drvdata(dev); 1909 unsigned long flags; 1910 1911 spin_lock_irqsave(hba->host->host_lock, flags); 1912 hba->clk_gating.delay_ms = value; 1913 spin_unlock_irqrestore(hba->host->host_lock, flags); 1914 } 1915 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set); 1916 1917 static ssize_t ufshcd_clkgate_delay_store(struct device *dev, 1918 struct device_attribute *attr, const char *buf, size_t count) 1919 { 1920 unsigned long value; 1921 1922 if (kstrtoul(buf, 0, &value)) 1923 return -EINVAL; 1924 1925 ufshcd_clkgate_delay_set(dev, value); 1926 return count; 1927 } 1928 1929 static ssize_t ufshcd_clkgate_enable_show(struct device *dev, 1930 struct device_attribute *attr, char *buf) 1931 { 1932 struct ufs_hba *hba = dev_get_drvdata(dev); 1933 1934 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled); 1935 } 1936 1937 static ssize_t ufshcd_clkgate_enable_store(struct device *dev, 1938 struct device_attribute *attr, const char *buf, size_t count) 1939 { 1940 struct ufs_hba *hba = dev_get_drvdata(dev); 1941 unsigned long flags; 1942 u32 value; 1943 1944 if (kstrtou32(buf, 0, &value)) 1945 return -EINVAL; 1946 1947 value = !!value; 1948 1949 spin_lock_irqsave(hba->host->host_lock, flags); 1950 if (value == hba->clk_gating.is_enabled) 1951 goto out; 1952 1953 if (value) 1954 __ufshcd_release(hba); 1955 else 1956 hba->clk_gating.active_reqs++; 1957 1958 hba->clk_gating.is_enabled = value; 1959 out: 1960 spin_unlock_irqrestore(hba->host->host_lock, flags); 1961 return count; 1962 } 1963 1964 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba) 1965 { 1966 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; 1967 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; 1968 sysfs_attr_init(&hba->clk_gating.delay_attr.attr); 1969 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; 1970 hba->clk_gating.delay_attr.attr.mode = 0644; 1971 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) 1972 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); 1973 1974 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; 1975 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; 1976 sysfs_attr_init(&hba->clk_gating.enable_attr.attr); 1977 hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; 1978 hba->clk_gating.enable_attr.attr.mode = 0644; 1979 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) 1980 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); 1981 } 1982 1983 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba) 1984 { 1985 if (hba->clk_gating.delay_attr.attr.name) 1986 device_remove_file(hba->dev, &hba->clk_gating.delay_attr); 1987 if (hba->clk_gating.enable_attr.attr.name) 1988 device_remove_file(hba->dev, &hba->clk_gating.enable_attr); 1989 } 1990 1991 static void ufshcd_init_clk_gating(struct ufs_hba *hba) 1992 { 1993 char wq_name[sizeof("ufs_clk_gating_00")]; 1994 1995 if (!ufshcd_is_clkgating_allowed(hba)) 1996 return; 1997 1998 hba->clk_gating.state = CLKS_ON; 1999 2000 hba->clk_gating.delay_ms = 150; 2001 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); 2002 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); 2003 2004 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d", 2005 hba->host->host_no); 2006 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, 2007 WQ_MEM_RECLAIM | WQ_HIGHPRI); 2008 2009 ufshcd_init_clk_gating_sysfs(hba); 2010 2011 hba->clk_gating.is_enabled = true; 2012 hba->clk_gating.is_initialized = true; 2013 } 2014 2015 static void ufshcd_exit_clk_gating(struct ufs_hba *hba) 2016 { 2017 if (!hba->clk_gating.is_initialized) 2018 return; 2019 2020 ufshcd_remove_clk_gating_sysfs(hba); 2021 2022 /* Ungate the clock if necessary. */ 2023 ufshcd_hold(hba); 2024 hba->clk_gating.is_initialized = false; 2025 ufshcd_release(hba); 2026 2027 destroy_workqueue(hba->clk_gating.clk_gating_workq); 2028 } 2029 2030 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) 2031 { 2032 bool queue_resume_work = false; 2033 ktime_t curr_t = ktime_get(); 2034 unsigned long flags; 2035 2036 if (!ufshcd_is_clkscaling_supported(hba)) 2037 return; 2038 2039 spin_lock_irqsave(hba->host->host_lock, flags); 2040 if (!hba->clk_scaling.active_reqs++) 2041 queue_resume_work = true; 2042 2043 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) { 2044 spin_unlock_irqrestore(hba->host->host_lock, flags); 2045 return; 2046 } 2047 2048 if (queue_resume_work) 2049 queue_work(hba->clk_scaling.workq, 2050 &hba->clk_scaling.resume_work); 2051 2052 if (!hba->clk_scaling.window_start_t) { 2053 hba->clk_scaling.window_start_t = curr_t; 2054 hba->clk_scaling.tot_busy_t = 0; 2055 hba->clk_scaling.is_busy_started = false; 2056 } 2057 2058 if (!hba->clk_scaling.is_busy_started) { 2059 hba->clk_scaling.busy_start_t = curr_t; 2060 hba->clk_scaling.is_busy_started = true; 2061 } 2062 spin_unlock_irqrestore(hba->host->host_lock, flags); 2063 } 2064 2065 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) 2066 { 2067 struct ufs_clk_scaling *scaling = &hba->clk_scaling; 2068 unsigned long flags; 2069 2070 if (!ufshcd_is_clkscaling_supported(hba)) 2071 return; 2072 2073 spin_lock_irqsave(hba->host->host_lock, flags); 2074 hba->clk_scaling.active_reqs--; 2075 if (!scaling->active_reqs && scaling->is_busy_started) { 2076 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), 2077 scaling->busy_start_t)); 2078 scaling->busy_start_t = 0; 2079 scaling->is_busy_started = false; 2080 } 2081 spin_unlock_irqrestore(hba->host->host_lock, flags); 2082 } 2083 2084 static inline int ufshcd_monitor_opcode2dir(u8 opcode) 2085 { 2086 if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16) 2087 return READ; 2088 else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16) 2089 return WRITE; 2090 else 2091 return -EINVAL; 2092 } 2093 2094 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba, 2095 struct ufshcd_lrb *lrbp) 2096 { 2097 const struct ufs_hba_monitor *m = &hba->monitor; 2098 2099 return (m->enabled && lrbp && lrbp->cmd && 2100 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) && 2101 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp)); 2102 } 2103 2104 static void ufshcd_start_monitor(struct ufs_hba *hba, 2105 const struct ufshcd_lrb *lrbp) 2106 { 2107 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); 2108 unsigned long flags; 2109 2110 spin_lock_irqsave(hba->host->host_lock, flags); 2111 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0) 2112 hba->monitor.busy_start_ts[dir] = ktime_get(); 2113 spin_unlock_irqrestore(hba->host->host_lock, flags); 2114 } 2115 2116 static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp) 2117 { 2118 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); 2119 unsigned long flags; 2120 2121 spin_lock_irqsave(hba->host->host_lock, flags); 2122 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) { 2123 const struct request *req = scsi_cmd_to_rq(lrbp->cmd); 2124 struct ufs_hba_monitor *m = &hba->monitor; 2125 ktime_t now, inc, lat; 2126 2127 now = lrbp->compl_time_stamp; 2128 inc = ktime_sub(now, m->busy_start_ts[dir]); 2129 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc); 2130 m->nr_sec_rw[dir] += blk_rq_sectors(req); 2131 2132 /* Update latencies */ 2133 m->nr_req[dir]++; 2134 lat = ktime_sub(now, lrbp->issue_time_stamp); 2135 m->lat_sum[dir] += lat; 2136 if (m->lat_max[dir] < lat || !m->lat_max[dir]) 2137 m->lat_max[dir] = lat; 2138 if (m->lat_min[dir] > lat || !m->lat_min[dir]) 2139 m->lat_min[dir] = lat; 2140 2141 m->nr_queued[dir]--; 2142 /* Push forward the busy start of monitor */ 2143 m->busy_start_ts[dir] = now; 2144 } 2145 spin_unlock_irqrestore(hba->host->host_lock, flags); 2146 } 2147 2148 /** 2149 * ufshcd_send_command - Send SCSI or device management commands 2150 * @hba: per adapter instance 2151 * @task_tag: Task tag of the command 2152 * @hwq: pointer to hardware queue instance 2153 */ 2154 static inline 2155 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag, 2156 struct ufs_hw_queue *hwq) 2157 { 2158 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; 2159 unsigned long flags; 2160 2161 lrbp->issue_time_stamp = ktime_get(); 2162 lrbp->issue_time_stamp_local_clock = local_clock(); 2163 lrbp->compl_time_stamp = ktime_set(0, 0); 2164 lrbp->compl_time_stamp_local_clock = 0; 2165 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND); 2166 ufshcd_clk_scaling_start_busy(hba); 2167 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) 2168 ufshcd_start_monitor(hba, lrbp); 2169 2170 if (is_mcq_enabled(hba)) { 2171 int utrd_size = sizeof(struct utp_transfer_req_desc); 2172 struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr; 2173 struct utp_transfer_req_desc *dest = hwq->sqe_base_addr + hwq->sq_tail_slot; 2174 2175 spin_lock(&hwq->sq_lock); 2176 memcpy(dest, src, utrd_size); 2177 ufshcd_inc_sq_tail(hwq); 2178 spin_unlock(&hwq->sq_lock); 2179 } else { 2180 spin_lock_irqsave(&hba->outstanding_lock, flags); 2181 if (hba->vops && hba->vops->setup_xfer_req) 2182 hba->vops->setup_xfer_req(hba, lrbp->task_tag, 2183 !!lrbp->cmd); 2184 __set_bit(lrbp->task_tag, &hba->outstanding_reqs); 2185 ufshcd_writel(hba, 1 << lrbp->task_tag, 2186 REG_UTP_TRANSFER_REQ_DOOR_BELL); 2187 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 2188 } 2189 } 2190 2191 /** 2192 * ufshcd_copy_sense_data - Copy sense data in case of check condition 2193 * @lrbp: pointer to local reference block 2194 */ 2195 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) 2196 { 2197 u8 *const sense_buffer = lrbp->cmd->sense_buffer; 2198 u16 resp_len; 2199 int len; 2200 2201 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header.data_segment_length); 2202 if (sense_buffer && resp_len) { 2203 int len_to_copy; 2204 2205 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); 2206 len_to_copy = min_t(int, UFS_SENSE_SIZE, len); 2207 2208 memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data, 2209 len_to_copy); 2210 } 2211 } 2212 2213 /** 2214 * ufshcd_copy_query_response() - Copy the Query Response and the data 2215 * descriptor 2216 * @hba: per adapter instance 2217 * @lrbp: pointer to local reference block 2218 * 2219 * Return: 0 upon success; < 0 upon failure. 2220 */ 2221 static 2222 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) 2223 { 2224 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; 2225 2226 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); 2227 2228 /* Get the descriptor */ 2229 if (hba->dev_cmd.query.descriptor && 2230 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { 2231 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + 2232 GENERAL_UPIU_REQUEST_SIZE; 2233 u16 resp_len; 2234 u16 buf_len; 2235 2236 /* data segment length */ 2237 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header 2238 .data_segment_length); 2239 buf_len = be16_to_cpu( 2240 hba->dev_cmd.query.request.upiu_req.length); 2241 if (likely(buf_len >= resp_len)) { 2242 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); 2243 } else { 2244 dev_warn(hba->dev, 2245 "%s: rsp size %d is bigger than buffer size %d", 2246 __func__, resp_len, buf_len); 2247 return -EINVAL; 2248 } 2249 } 2250 2251 return 0; 2252 } 2253 2254 /** 2255 * ufshcd_hba_capabilities - Read controller capabilities 2256 * @hba: per adapter instance 2257 * 2258 * Return: 0 on success, negative on error. 2259 */ 2260 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) 2261 { 2262 int err; 2263 2264 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); 2265 if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) 2266 hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; 2267 2268 /* nutrs and nutmrs are 0 based values */ 2269 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; 2270 hba->nutmrs = 2271 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; 2272 hba->reserved_slot = hba->nutrs - 1; 2273 2274 /* Read crypto capabilities */ 2275 err = ufshcd_hba_init_crypto_capabilities(hba); 2276 if (err) { 2277 dev_err(hba->dev, "crypto setup failed\n"); 2278 return err; 2279 } 2280 2281 hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities); 2282 if (!hba->mcq_sup) 2283 return 0; 2284 2285 hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP); 2286 hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT, 2287 hba->mcq_capabilities); 2288 2289 return 0; 2290 } 2291 2292 /** 2293 * ufshcd_ready_for_uic_cmd - Check if controller is ready 2294 * to accept UIC commands 2295 * @hba: per adapter instance 2296 * 2297 * Return: true on success, else false. 2298 */ 2299 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) 2300 { 2301 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY; 2302 } 2303 2304 /** 2305 * ufshcd_get_upmcrs - Get the power mode change request status 2306 * @hba: Pointer to adapter instance 2307 * 2308 * This function gets the UPMCRS field of HCS register 2309 * 2310 * Return: value of UPMCRS field. 2311 */ 2312 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) 2313 { 2314 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; 2315 } 2316 2317 /** 2318 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer 2319 * @hba: per adapter instance 2320 * @uic_cmd: UIC command 2321 */ 2322 static inline void 2323 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) 2324 { 2325 lockdep_assert_held(&hba->uic_cmd_mutex); 2326 2327 WARN_ON(hba->active_uic_cmd); 2328 2329 hba->active_uic_cmd = uic_cmd; 2330 2331 /* Write Args */ 2332 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); 2333 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); 2334 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); 2335 2336 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND); 2337 2338 /* Write UIC Cmd */ 2339 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, 2340 REG_UIC_COMMAND); 2341 } 2342 2343 /** 2344 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command 2345 * @hba: per adapter instance 2346 * @uic_cmd: UIC command 2347 * 2348 * Return: 0 only if success. 2349 */ 2350 static int 2351 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) 2352 { 2353 int ret; 2354 unsigned long flags; 2355 2356 lockdep_assert_held(&hba->uic_cmd_mutex); 2357 2358 if (wait_for_completion_timeout(&uic_cmd->done, 2359 msecs_to_jiffies(UIC_CMD_TIMEOUT))) { 2360 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; 2361 } else { 2362 ret = -ETIMEDOUT; 2363 dev_err(hba->dev, 2364 "uic cmd 0x%x with arg3 0x%x completion timeout\n", 2365 uic_cmd->command, uic_cmd->argument3); 2366 2367 if (!uic_cmd->cmd_active) { 2368 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n", 2369 __func__); 2370 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; 2371 } 2372 } 2373 2374 spin_lock_irqsave(hba->host->host_lock, flags); 2375 hba->active_uic_cmd = NULL; 2376 spin_unlock_irqrestore(hba->host->host_lock, flags); 2377 2378 return ret; 2379 } 2380 2381 /** 2382 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result 2383 * @hba: per adapter instance 2384 * @uic_cmd: UIC command 2385 * @completion: initialize the completion only if this is set to true 2386 * 2387 * Return: 0 only if success. 2388 */ 2389 static int 2390 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, 2391 bool completion) 2392 { 2393 lockdep_assert_held(&hba->uic_cmd_mutex); 2394 lockdep_assert_held(hba->host->host_lock); 2395 2396 if (!ufshcd_ready_for_uic_cmd(hba)) { 2397 dev_err(hba->dev, 2398 "Controller not ready to accept UIC commands\n"); 2399 return -EIO; 2400 } 2401 2402 if (completion) 2403 init_completion(&uic_cmd->done); 2404 2405 uic_cmd->cmd_active = 1; 2406 ufshcd_dispatch_uic_cmd(hba, uic_cmd); 2407 2408 return 0; 2409 } 2410 2411 /** 2412 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result 2413 * @hba: per adapter instance 2414 * @uic_cmd: UIC command 2415 * 2416 * Return: 0 only if success. 2417 */ 2418 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) 2419 { 2420 int ret; 2421 unsigned long flags; 2422 2423 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) 2424 return 0; 2425 2426 ufshcd_hold(hba); 2427 mutex_lock(&hba->uic_cmd_mutex); 2428 ufshcd_add_delay_before_dme_cmd(hba); 2429 2430 spin_lock_irqsave(hba->host->host_lock, flags); 2431 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); 2432 spin_unlock_irqrestore(hba->host->host_lock, flags); 2433 if (!ret) 2434 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); 2435 2436 mutex_unlock(&hba->uic_cmd_mutex); 2437 2438 ufshcd_release(hba); 2439 return ret; 2440 } 2441 2442 /** 2443 * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format) 2444 * @hba: per-adapter instance 2445 * @lrbp: pointer to local reference block 2446 * @sg_entries: The number of sg lists actually used 2447 * @sg_list: Pointer to SG list 2448 */ 2449 static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries, 2450 struct scatterlist *sg_list) 2451 { 2452 struct ufshcd_sg_entry *prd; 2453 struct scatterlist *sg; 2454 int i; 2455 2456 if (sg_entries) { 2457 2458 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) 2459 lrbp->utr_descriptor_ptr->prd_table_length = 2460 cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba)); 2461 else 2462 lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries); 2463 2464 prd = lrbp->ucd_prdt_ptr; 2465 2466 for_each_sg(sg_list, sg, sg_entries, i) { 2467 const unsigned int len = sg_dma_len(sg); 2468 2469 /* 2470 * From the UFSHCI spec: "Data Byte Count (DBC): A '0' 2471 * based value that indicates the length, in bytes, of 2472 * the data block. A maximum of length of 256KB may 2473 * exist for any entry. Bits 1:0 of this field shall be 2474 * 11b to indicate Dword granularity. A value of '3' 2475 * indicates 4 bytes, '7' indicates 8 bytes, etc." 2476 */ 2477 WARN_ONCE(len > SZ_256K, "len = %#x\n", len); 2478 prd->size = cpu_to_le32(len - 1); 2479 prd->addr = cpu_to_le64(sg->dma_address); 2480 prd->reserved = 0; 2481 prd = (void *)prd + ufshcd_sg_entry_size(hba); 2482 } 2483 } else { 2484 lrbp->utr_descriptor_ptr->prd_table_length = 0; 2485 } 2486 } 2487 2488 /** 2489 * ufshcd_map_sg - Map scatter-gather list to prdt 2490 * @hba: per adapter instance 2491 * @lrbp: pointer to local reference block 2492 * 2493 * Return: 0 in case of success, non-zero value in case of failure. 2494 */ 2495 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) 2496 { 2497 struct scsi_cmnd *cmd = lrbp->cmd; 2498 int sg_segments = scsi_dma_map(cmd); 2499 2500 if (sg_segments < 0) 2501 return sg_segments; 2502 2503 ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd)); 2504 2505 return 0; 2506 } 2507 2508 /** 2509 * ufshcd_enable_intr - enable interrupts 2510 * @hba: per adapter instance 2511 * @intrs: interrupt bits 2512 */ 2513 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) 2514 { 2515 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); 2516 2517 if (hba->ufs_version == ufshci_version(1, 0)) { 2518 u32 rw; 2519 rw = set & INTERRUPT_MASK_RW_VER_10; 2520 set = rw | ((set ^ intrs) & intrs); 2521 } else { 2522 set |= intrs; 2523 } 2524 2525 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); 2526 } 2527 2528 /** 2529 * ufshcd_disable_intr - disable interrupts 2530 * @hba: per adapter instance 2531 * @intrs: interrupt bits 2532 */ 2533 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) 2534 { 2535 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); 2536 2537 if (hba->ufs_version == ufshci_version(1, 0)) { 2538 u32 rw; 2539 rw = (set & INTERRUPT_MASK_RW_VER_10) & 2540 ~(intrs & INTERRUPT_MASK_RW_VER_10); 2541 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10); 2542 2543 } else { 2544 set &= ~intrs; 2545 } 2546 2547 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); 2548 } 2549 2550 /** 2551 * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request 2552 * descriptor according to request 2553 * @lrbp: pointer to local reference block 2554 * @upiu_flags: flags required in the header 2555 * @cmd_dir: requests data direction 2556 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments) 2557 */ 2558 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags, 2559 enum dma_data_direction cmd_dir, int ehs_length) 2560 { 2561 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; 2562 struct request_desc_header *h = &req_desc->header; 2563 enum utp_data_direction data_direction; 2564 2565 *h = (typeof(*h)){ }; 2566 2567 if (cmd_dir == DMA_FROM_DEVICE) { 2568 data_direction = UTP_DEVICE_TO_HOST; 2569 *upiu_flags = UPIU_CMD_FLAGS_READ; 2570 } else if (cmd_dir == DMA_TO_DEVICE) { 2571 data_direction = UTP_HOST_TO_DEVICE; 2572 *upiu_flags = UPIU_CMD_FLAGS_WRITE; 2573 } else { 2574 data_direction = UTP_NO_DATA_TRANSFER; 2575 *upiu_flags = UPIU_CMD_FLAGS_NONE; 2576 } 2577 2578 h->command_type = lrbp->command_type; 2579 h->data_direction = data_direction; 2580 h->ehs_length = ehs_length; 2581 2582 if (lrbp->intr_cmd) 2583 h->interrupt = 1; 2584 2585 /* Prepare crypto related dwords */ 2586 ufshcd_prepare_req_desc_hdr_crypto(lrbp, h); 2587 2588 /* 2589 * assigning invalid value for command status. Controller 2590 * updates OCS on command completion, with the command 2591 * status 2592 */ 2593 h->ocs = OCS_INVALID_COMMAND_STATUS; 2594 2595 req_desc->prd_table_length = 0; 2596 } 2597 2598 /** 2599 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc, 2600 * for scsi commands 2601 * @lrbp: local reference block pointer 2602 * @upiu_flags: flags 2603 */ 2604 static 2605 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags) 2606 { 2607 struct scsi_cmnd *cmd = lrbp->cmd; 2608 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; 2609 unsigned short cdb_len; 2610 2611 ucd_req_ptr->header = (struct utp_upiu_header){ 2612 .transaction_code = UPIU_TRANSACTION_COMMAND, 2613 .flags = upiu_flags, 2614 .lun = lrbp->lun, 2615 .task_tag = lrbp->task_tag, 2616 .command_set_type = UPIU_COMMAND_SET_TYPE_SCSI, 2617 }; 2618 2619 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length); 2620 2621 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE); 2622 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); 2623 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len); 2624 2625 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 2626 } 2627 2628 /** 2629 * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request 2630 * @hba: UFS hba 2631 * @lrbp: local reference block pointer 2632 * @upiu_flags: flags 2633 */ 2634 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, 2635 struct ufshcd_lrb *lrbp, u8 upiu_flags) 2636 { 2637 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; 2638 struct ufs_query *query = &hba->dev_cmd.query; 2639 u16 len = be16_to_cpu(query->request.upiu_req.length); 2640 2641 /* Query request header */ 2642 ucd_req_ptr->header = (struct utp_upiu_header){ 2643 .transaction_code = UPIU_TRANSACTION_QUERY_REQ, 2644 .flags = upiu_flags, 2645 .lun = lrbp->lun, 2646 .task_tag = lrbp->task_tag, 2647 .query_function = query->request.query_func, 2648 /* Data segment length only need for WRITE_DESC */ 2649 .data_segment_length = 2650 query->request.upiu_req.opcode == 2651 UPIU_QUERY_OPCODE_WRITE_DESC ? 2652 cpu_to_be16(len) : 2653 0, 2654 }; 2655 2656 /* Copy the Query Request buffer as is */ 2657 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, 2658 QUERY_OSF_SIZE); 2659 2660 /* Copy the Descriptor */ 2661 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) 2662 memcpy(ucd_req_ptr + 1, query->descriptor, len); 2663 2664 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 2665 } 2666 2667 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) 2668 { 2669 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; 2670 2671 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); 2672 2673 ucd_req_ptr->header = (struct utp_upiu_header){ 2674 .transaction_code = UPIU_TRANSACTION_NOP_OUT, 2675 .task_tag = lrbp->task_tag, 2676 }; 2677 2678 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 2679 } 2680 2681 /** 2682 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU) 2683 * for Device Management Purposes 2684 * @hba: per adapter instance 2685 * @lrbp: pointer to local reference block 2686 * 2687 * Return: 0 upon success; < 0 upon failure. 2688 */ 2689 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba, 2690 struct ufshcd_lrb *lrbp) 2691 { 2692 u8 upiu_flags; 2693 int ret = 0; 2694 2695 if (hba->ufs_version <= ufshci_version(1, 1)) 2696 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; 2697 else 2698 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; 2699 2700 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0); 2701 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) 2702 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags); 2703 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) 2704 ufshcd_prepare_utp_nop_upiu(lrbp); 2705 else 2706 ret = -EINVAL; 2707 2708 return ret; 2709 } 2710 2711 /** 2712 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU) 2713 * for SCSI Purposes 2714 * @hba: per adapter instance 2715 * @lrbp: pointer to local reference block 2716 * 2717 * Return: 0 upon success; < 0 upon failure. 2718 */ 2719 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) 2720 { 2721 u8 upiu_flags; 2722 int ret = 0; 2723 2724 if (hba->ufs_version <= ufshci_version(1, 1)) 2725 lrbp->command_type = UTP_CMD_TYPE_SCSI; 2726 else 2727 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; 2728 2729 if (likely(lrbp->cmd)) { 2730 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0); 2731 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); 2732 } else { 2733 ret = -EINVAL; 2734 } 2735 2736 return ret; 2737 } 2738 2739 /** 2740 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID 2741 * @upiu_wlun_id: UPIU W-LUN id 2742 * 2743 * Return: SCSI W-LUN id. 2744 */ 2745 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id) 2746 { 2747 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE; 2748 } 2749 2750 static inline bool is_device_wlun(struct scsi_device *sdev) 2751 { 2752 return sdev->lun == 2753 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN); 2754 } 2755 2756 /* 2757 * Associate the UFS controller queue with the default and poll HCTX types. 2758 * Initialize the mq_map[] arrays. 2759 */ 2760 static void ufshcd_map_queues(struct Scsi_Host *shost) 2761 { 2762 struct ufs_hba *hba = shost_priv(shost); 2763 int i, queue_offset = 0; 2764 2765 if (!is_mcq_supported(hba)) { 2766 hba->nr_queues[HCTX_TYPE_DEFAULT] = 1; 2767 hba->nr_queues[HCTX_TYPE_READ] = 0; 2768 hba->nr_queues[HCTX_TYPE_POLL] = 1; 2769 hba->nr_hw_queues = 1; 2770 } 2771 2772 for (i = 0; i < shost->nr_maps; i++) { 2773 struct blk_mq_queue_map *map = &shost->tag_set.map[i]; 2774 2775 map->nr_queues = hba->nr_queues[i]; 2776 if (!map->nr_queues) 2777 continue; 2778 map->queue_offset = queue_offset; 2779 if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba)) 2780 map->queue_offset = 0; 2781 2782 blk_mq_map_queues(map); 2783 queue_offset += map->nr_queues; 2784 } 2785 } 2786 2787 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) 2788 { 2789 struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr + 2790 i * ufshcd_get_ucd_size(hba); 2791 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; 2792 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + 2793 i * ufshcd_get_ucd_size(hba); 2794 u16 response_offset = offsetof(struct utp_transfer_cmd_desc, 2795 response_upiu); 2796 u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); 2797 2798 lrb->utr_descriptor_ptr = utrdlp + i; 2799 lrb->utrd_dma_addr = hba->utrdl_dma_addr + 2800 i * sizeof(struct utp_transfer_req_desc); 2801 lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu; 2802 lrb->ucd_req_dma_addr = cmd_desc_element_addr; 2803 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu; 2804 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset; 2805 lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table; 2806 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset; 2807 } 2808 2809 /** 2810 * ufshcd_queuecommand - main entry point for SCSI requests 2811 * @host: SCSI host pointer 2812 * @cmd: command from SCSI Midlayer 2813 * 2814 * Return: 0 for success, non-zero in case of failure. 2815 */ 2816 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 2817 { 2818 struct ufs_hba *hba = shost_priv(host); 2819 int tag = scsi_cmd_to_rq(cmd)->tag; 2820 struct ufshcd_lrb *lrbp; 2821 int err = 0; 2822 struct ufs_hw_queue *hwq = NULL; 2823 2824 WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag); 2825 2826 switch (hba->ufshcd_state) { 2827 case UFSHCD_STATE_OPERATIONAL: 2828 break; 2829 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: 2830 /* 2831 * SCSI error handler can call ->queuecommand() while UFS error 2832 * handler is in progress. Error interrupts could change the 2833 * state from UFSHCD_STATE_RESET to 2834 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests 2835 * being issued in that case. 2836 */ 2837 if (ufshcd_eh_in_progress(hba)) { 2838 err = SCSI_MLQUEUE_HOST_BUSY; 2839 goto out; 2840 } 2841 break; 2842 case UFSHCD_STATE_EH_SCHEDULED_FATAL: 2843 /* 2844 * pm_runtime_get_sync() is used at error handling preparation 2845 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's 2846 * PM ops, it can never be finished if we let SCSI layer keep 2847 * retrying it, which gets err handler stuck forever. Neither 2848 * can we let the scsi cmd pass through, because UFS is in bad 2849 * state, the scsi cmd may eventually time out, which will get 2850 * err handler blocked for too long. So, just fail the scsi cmd 2851 * sent from PM ops, err handler can recover PM error anyways. 2852 */ 2853 if (hba->pm_op_in_progress) { 2854 hba->force_reset = true; 2855 set_host_byte(cmd, DID_BAD_TARGET); 2856 scsi_done(cmd); 2857 goto out; 2858 } 2859 fallthrough; 2860 case UFSHCD_STATE_RESET: 2861 err = SCSI_MLQUEUE_HOST_BUSY; 2862 goto out; 2863 case UFSHCD_STATE_ERROR: 2864 set_host_byte(cmd, DID_ERROR); 2865 scsi_done(cmd); 2866 goto out; 2867 } 2868 2869 hba->req_abort_count = 0; 2870 2871 ufshcd_hold(hba); 2872 2873 lrbp = &hba->lrb[tag]; 2874 lrbp->cmd = cmd; 2875 lrbp->task_tag = tag; 2876 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); 2877 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); 2878 2879 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp); 2880 2881 lrbp->req_abort_skip = false; 2882 2883 ufshcd_comp_scsi_upiu(hba, lrbp); 2884 2885 err = ufshcd_map_sg(hba, lrbp); 2886 if (err) { 2887 ufshcd_release(hba); 2888 goto out; 2889 } 2890 2891 if (is_mcq_enabled(hba)) 2892 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); 2893 2894 ufshcd_send_command(hba, tag, hwq); 2895 2896 out: 2897 if (ufs_trigger_eh()) { 2898 unsigned long flags; 2899 2900 spin_lock_irqsave(hba->host->host_lock, flags); 2901 ufshcd_schedule_eh_work(hba); 2902 spin_unlock_irqrestore(hba->host->host_lock, flags); 2903 } 2904 2905 return err; 2906 } 2907 2908 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, 2909 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) 2910 { 2911 lrbp->cmd = NULL; 2912 lrbp->task_tag = tag; 2913 lrbp->lun = 0; /* device management cmd is not specific to any LUN */ 2914 lrbp->intr_cmd = true; /* No interrupt aggregation */ 2915 ufshcd_prepare_lrbp_crypto(NULL, lrbp); 2916 hba->dev_cmd.type = cmd_type; 2917 2918 return ufshcd_compose_devman_upiu(hba, lrbp); 2919 } 2920 2921 /* 2922 * Check with the block layer if the command is inflight 2923 * @cmd: command to check. 2924 * 2925 * Return: true if command is inflight; false if not. 2926 */ 2927 bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd) 2928 { 2929 struct request *rq; 2930 2931 if (!cmd) 2932 return false; 2933 2934 rq = scsi_cmd_to_rq(cmd); 2935 if (!blk_mq_request_started(rq)) 2936 return false; 2937 2938 return true; 2939 } 2940 2941 /* 2942 * Clear the pending command in the controller and wait until 2943 * the controller confirms that the command has been cleared. 2944 * @hba: per adapter instance 2945 * @task_tag: The tag number of the command to be cleared. 2946 */ 2947 static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) 2948 { 2949 u32 mask = 1U << task_tag; 2950 unsigned long flags; 2951 int err; 2952 2953 if (is_mcq_enabled(hba)) { 2954 /* 2955 * MCQ mode. Clean up the MCQ resources similar to 2956 * what the ufshcd_utrl_clear() does for SDB mode. 2957 */ 2958 err = ufshcd_mcq_sq_cleanup(hba, task_tag); 2959 if (err) { 2960 dev_err(hba->dev, "%s: failed tag=%d. err=%d\n", 2961 __func__, task_tag, err); 2962 return err; 2963 } 2964 return 0; 2965 } 2966 2967 /* clear outstanding transaction before retry */ 2968 spin_lock_irqsave(hba->host->host_lock, flags); 2969 ufshcd_utrl_clear(hba, mask); 2970 spin_unlock_irqrestore(hba->host->host_lock, flags); 2971 2972 /* 2973 * wait for h/w to clear corresponding bit in door-bell. 2974 * max. wait is 1 sec. 2975 */ 2976 return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL, 2977 mask, ~mask, 1000, 1000); 2978 } 2979 2980 /** 2981 * ufshcd_dev_cmd_completion() - handles device management command responses 2982 * @hba: per adapter instance 2983 * @lrbp: pointer to local reference block 2984 * 2985 * Return: 0 upon success; < 0 upon failure. 2986 */ 2987 static int 2988 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) 2989 { 2990 enum upiu_response_transaction resp; 2991 int err = 0; 2992 2993 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); 2994 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); 2995 2996 switch (resp) { 2997 case UPIU_TRANSACTION_NOP_IN: 2998 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { 2999 err = -EINVAL; 3000 dev_err(hba->dev, "%s: unexpected response %x\n", 3001 __func__, resp); 3002 } 3003 break; 3004 case UPIU_TRANSACTION_QUERY_RSP: { 3005 u8 response = lrbp->ucd_rsp_ptr->header.response; 3006 3007 if (response == 0) 3008 err = ufshcd_copy_query_response(hba, lrbp); 3009 break; 3010 } 3011 case UPIU_TRANSACTION_REJECT_UPIU: 3012 /* TODO: handle Reject UPIU Response */ 3013 err = -EPERM; 3014 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", 3015 __func__); 3016 break; 3017 case UPIU_TRANSACTION_RESPONSE: 3018 if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) { 3019 err = -EINVAL; 3020 dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp); 3021 } 3022 break; 3023 default: 3024 err = -EINVAL; 3025 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", 3026 __func__, resp); 3027 break; 3028 } 3029 3030 return err; 3031 } 3032 3033 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, 3034 struct ufshcd_lrb *lrbp, int max_timeout) 3035 { 3036 unsigned long time_left = msecs_to_jiffies(max_timeout); 3037 unsigned long flags; 3038 bool pending; 3039 int err; 3040 3041 retry: 3042 time_left = wait_for_completion_timeout(hba->dev_cmd.complete, 3043 time_left); 3044 3045 if (likely(time_left)) { 3046 /* 3047 * The completion handler called complete() and the caller of 3048 * this function still owns the @lrbp tag so the code below does 3049 * not trigger any race conditions. 3050 */ 3051 hba->dev_cmd.complete = NULL; 3052 err = ufshcd_get_tr_ocs(lrbp, NULL); 3053 if (!err) 3054 err = ufshcd_dev_cmd_completion(hba, lrbp); 3055 } else { 3056 err = -ETIMEDOUT; 3057 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", 3058 __func__, lrbp->task_tag); 3059 3060 /* MCQ mode */ 3061 if (is_mcq_enabled(hba)) { 3062 err = ufshcd_clear_cmd(hba, lrbp->task_tag); 3063 hba->dev_cmd.complete = NULL; 3064 return err; 3065 } 3066 3067 /* SDB mode */ 3068 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) { 3069 /* successfully cleared the command, retry if needed */ 3070 err = -EAGAIN; 3071 /* 3072 * Since clearing the command succeeded we also need to 3073 * clear the task tag bit from the outstanding_reqs 3074 * variable. 3075 */ 3076 spin_lock_irqsave(&hba->outstanding_lock, flags); 3077 pending = test_bit(lrbp->task_tag, 3078 &hba->outstanding_reqs); 3079 if (pending) { 3080 hba->dev_cmd.complete = NULL; 3081 __clear_bit(lrbp->task_tag, 3082 &hba->outstanding_reqs); 3083 } 3084 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 3085 3086 if (!pending) { 3087 /* 3088 * The completion handler ran while we tried to 3089 * clear the command. 3090 */ 3091 time_left = 1; 3092 goto retry; 3093 } 3094 } else { 3095 dev_err(hba->dev, "%s: failed to clear tag %d\n", 3096 __func__, lrbp->task_tag); 3097 3098 spin_lock_irqsave(&hba->outstanding_lock, flags); 3099 pending = test_bit(lrbp->task_tag, 3100 &hba->outstanding_reqs); 3101 if (pending) 3102 hba->dev_cmd.complete = NULL; 3103 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 3104 3105 if (!pending) { 3106 /* 3107 * The completion handler ran while we tried to 3108 * clear the command. 3109 */ 3110 time_left = 1; 3111 goto retry; 3112 } 3113 } 3114 } 3115 3116 return err; 3117 } 3118 3119 /** 3120 * ufshcd_exec_dev_cmd - API for sending device management requests 3121 * @hba: UFS hba 3122 * @cmd_type: specifies the type (NOP, Query...) 3123 * @timeout: timeout in milliseconds 3124 * 3125 * Return: 0 upon success; < 0 upon failure. 3126 * 3127 * NOTE: Since there is only one available tag for device management commands, 3128 * it is expected you hold the hba->dev_cmd.lock mutex. 3129 */ 3130 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, 3131 enum dev_cmd_type cmd_type, int timeout) 3132 { 3133 DECLARE_COMPLETION_ONSTACK(wait); 3134 const u32 tag = hba->reserved_slot; 3135 struct ufshcd_lrb *lrbp; 3136 int err; 3137 3138 /* Protects use of hba->reserved_slot. */ 3139 lockdep_assert_held(&hba->dev_cmd.lock); 3140 3141 down_read(&hba->clk_scaling_lock); 3142 3143 lrbp = &hba->lrb[tag]; 3144 lrbp->cmd = NULL; 3145 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); 3146 if (unlikely(err)) 3147 goto out; 3148 3149 hba->dev_cmd.complete = &wait; 3150 3151 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); 3152 3153 ufshcd_send_command(hba, tag, hba->dev_cmd_queue); 3154 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); 3155 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, 3156 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); 3157 3158 out: 3159 up_read(&hba->clk_scaling_lock); 3160 return err; 3161 } 3162 3163 /** 3164 * ufshcd_init_query() - init the query response and request parameters 3165 * @hba: per-adapter instance 3166 * @request: address of the request pointer to be initialized 3167 * @response: address of the response pointer to be initialized 3168 * @opcode: operation to perform 3169 * @idn: flag idn to access 3170 * @index: LU number to access 3171 * @selector: query/flag/descriptor further identification 3172 */ 3173 static inline void ufshcd_init_query(struct ufs_hba *hba, 3174 struct ufs_query_req **request, struct ufs_query_res **response, 3175 enum query_opcode opcode, u8 idn, u8 index, u8 selector) 3176 { 3177 *request = &hba->dev_cmd.query.request; 3178 *response = &hba->dev_cmd.query.response; 3179 memset(*request, 0, sizeof(struct ufs_query_req)); 3180 memset(*response, 0, sizeof(struct ufs_query_res)); 3181 (*request)->upiu_req.opcode = opcode; 3182 (*request)->upiu_req.idn = idn; 3183 (*request)->upiu_req.index = index; 3184 (*request)->upiu_req.selector = selector; 3185 } 3186 3187 static int ufshcd_query_flag_retry(struct ufs_hba *hba, 3188 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res) 3189 { 3190 int ret; 3191 int retries; 3192 3193 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { 3194 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res); 3195 if (ret) 3196 dev_dbg(hba->dev, 3197 "%s: failed with error %d, retries %d\n", 3198 __func__, ret, retries); 3199 else 3200 break; 3201 } 3202 3203 if (ret) 3204 dev_err(hba->dev, 3205 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n", 3206 __func__, opcode, idn, ret, retries); 3207 return ret; 3208 } 3209 3210 /** 3211 * ufshcd_query_flag() - API function for sending flag query requests 3212 * @hba: per-adapter instance 3213 * @opcode: flag query to perform 3214 * @idn: flag idn to access 3215 * @index: flag index to access 3216 * @flag_res: the flag value after the query request completes 3217 * 3218 * Return: 0 for success, non-zero in case of failure. 3219 */ 3220 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, 3221 enum flag_idn idn, u8 index, bool *flag_res) 3222 { 3223 struct ufs_query_req *request = NULL; 3224 struct ufs_query_res *response = NULL; 3225 int err, selector = 0; 3226 int timeout = QUERY_REQ_TIMEOUT; 3227 3228 BUG_ON(!hba); 3229 3230 ufshcd_hold(hba); 3231 mutex_lock(&hba->dev_cmd.lock); 3232 ufshcd_init_query(hba, &request, &response, opcode, idn, index, 3233 selector); 3234 3235 switch (opcode) { 3236 case UPIU_QUERY_OPCODE_SET_FLAG: 3237 case UPIU_QUERY_OPCODE_CLEAR_FLAG: 3238 case UPIU_QUERY_OPCODE_TOGGLE_FLAG: 3239 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 3240 break; 3241 case UPIU_QUERY_OPCODE_READ_FLAG: 3242 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 3243 if (!flag_res) { 3244 /* No dummy reads */ 3245 dev_err(hba->dev, "%s: Invalid argument for read request\n", 3246 __func__); 3247 err = -EINVAL; 3248 goto out_unlock; 3249 } 3250 break; 3251 default: 3252 dev_err(hba->dev, 3253 "%s: Expected query flag opcode but got = %d\n", 3254 __func__, opcode); 3255 err = -EINVAL; 3256 goto out_unlock; 3257 } 3258 3259 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); 3260 3261 if (err) { 3262 dev_err(hba->dev, 3263 "%s: Sending flag query for idn %d failed, err = %d\n", 3264 __func__, idn, err); 3265 goto out_unlock; 3266 } 3267 3268 if (flag_res) 3269 *flag_res = (be32_to_cpu(response->upiu_res.value) & 3270 MASK_QUERY_UPIU_FLAG_LOC) & 0x1; 3271 3272 out_unlock: 3273 mutex_unlock(&hba->dev_cmd.lock); 3274 ufshcd_release(hba); 3275 return err; 3276 } 3277 3278 /** 3279 * ufshcd_query_attr - API function for sending attribute requests 3280 * @hba: per-adapter instance 3281 * @opcode: attribute opcode 3282 * @idn: attribute idn to access 3283 * @index: index field 3284 * @selector: selector field 3285 * @attr_val: the attribute value after the query request completes 3286 * 3287 * Return: 0 for success, non-zero in case of failure. 3288 */ 3289 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, 3290 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) 3291 { 3292 struct ufs_query_req *request = NULL; 3293 struct ufs_query_res *response = NULL; 3294 int err; 3295 3296 BUG_ON(!hba); 3297 3298 if (!attr_val) { 3299 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", 3300 __func__, opcode); 3301 return -EINVAL; 3302 } 3303 3304 ufshcd_hold(hba); 3305 3306 mutex_lock(&hba->dev_cmd.lock); 3307 ufshcd_init_query(hba, &request, &response, opcode, idn, index, 3308 selector); 3309 3310 switch (opcode) { 3311 case UPIU_QUERY_OPCODE_WRITE_ATTR: 3312 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 3313 request->upiu_req.value = cpu_to_be32(*attr_val); 3314 break; 3315 case UPIU_QUERY_OPCODE_READ_ATTR: 3316 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 3317 break; 3318 default: 3319 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", 3320 __func__, opcode); 3321 err = -EINVAL; 3322 goto out_unlock; 3323 } 3324 3325 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); 3326 3327 if (err) { 3328 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", 3329 __func__, opcode, idn, index, err); 3330 goto out_unlock; 3331 } 3332 3333 *attr_val = be32_to_cpu(response->upiu_res.value); 3334 3335 out_unlock: 3336 mutex_unlock(&hba->dev_cmd.lock); 3337 ufshcd_release(hba); 3338 return err; 3339 } 3340 3341 /** 3342 * ufshcd_query_attr_retry() - API function for sending query 3343 * attribute with retries 3344 * @hba: per-adapter instance 3345 * @opcode: attribute opcode 3346 * @idn: attribute idn to access 3347 * @index: index field 3348 * @selector: selector field 3349 * @attr_val: the attribute value after the query request 3350 * completes 3351 * 3352 * Return: 0 for success, non-zero in case of failure. 3353 */ 3354 int ufshcd_query_attr_retry(struct ufs_hba *hba, 3355 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, 3356 u32 *attr_val) 3357 { 3358 int ret = 0; 3359 u32 retries; 3360 3361 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { 3362 ret = ufshcd_query_attr(hba, opcode, idn, index, 3363 selector, attr_val); 3364 if (ret) 3365 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", 3366 __func__, ret, retries); 3367 else 3368 break; 3369 } 3370 3371 if (ret) 3372 dev_err(hba->dev, 3373 "%s: query attribute, idn %d, failed with error %d after %d retries\n", 3374 __func__, idn, ret, QUERY_REQ_RETRIES); 3375 return ret; 3376 } 3377 3378 static int __ufshcd_query_descriptor(struct ufs_hba *hba, 3379 enum query_opcode opcode, enum desc_idn idn, u8 index, 3380 u8 selector, u8 *desc_buf, int *buf_len) 3381 { 3382 struct ufs_query_req *request = NULL; 3383 struct ufs_query_res *response = NULL; 3384 int err; 3385 3386 BUG_ON(!hba); 3387 3388 if (!desc_buf) { 3389 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", 3390 __func__, opcode); 3391 return -EINVAL; 3392 } 3393 3394 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { 3395 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", 3396 __func__, *buf_len); 3397 return -EINVAL; 3398 } 3399 3400 ufshcd_hold(hba); 3401 3402 mutex_lock(&hba->dev_cmd.lock); 3403 ufshcd_init_query(hba, &request, &response, opcode, idn, index, 3404 selector); 3405 hba->dev_cmd.query.descriptor = desc_buf; 3406 request->upiu_req.length = cpu_to_be16(*buf_len); 3407 3408 switch (opcode) { 3409 case UPIU_QUERY_OPCODE_WRITE_DESC: 3410 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 3411 break; 3412 case UPIU_QUERY_OPCODE_READ_DESC: 3413 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 3414 break; 3415 default: 3416 dev_err(hba->dev, 3417 "%s: Expected query descriptor opcode but got = 0x%.2x\n", 3418 __func__, opcode); 3419 err = -EINVAL; 3420 goto out_unlock; 3421 } 3422 3423 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); 3424 3425 if (err) { 3426 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", 3427 __func__, opcode, idn, index, err); 3428 goto out_unlock; 3429 } 3430 3431 *buf_len = be16_to_cpu(response->upiu_res.length); 3432 3433 out_unlock: 3434 hba->dev_cmd.query.descriptor = NULL; 3435 mutex_unlock(&hba->dev_cmd.lock); 3436 ufshcd_release(hba); 3437 return err; 3438 } 3439 3440 /** 3441 * ufshcd_query_descriptor_retry - API function for sending descriptor requests 3442 * @hba: per-adapter instance 3443 * @opcode: attribute opcode 3444 * @idn: attribute idn to access 3445 * @index: index field 3446 * @selector: selector field 3447 * @desc_buf: the buffer that contains the descriptor 3448 * @buf_len: length parameter passed to the device 3449 * 3450 * The buf_len parameter will contain, on return, the length parameter 3451 * received on the response. 3452 * 3453 * Return: 0 for success, non-zero in case of failure. 3454 */ 3455 int ufshcd_query_descriptor_retry(struct ufs_hba *hba, 3456 enum query_opcode opcode, 3457 enum desc_idn idn, u8 index, 3458 u8 selector, 3459 u8 *desc_buf, int *buf_len) 3460 { 3461 int err; 3462 int retries; 3463 3464 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { 3465 err = __ufshcd_query_descriptor(hba, opcode, idn, index, 3466 selector, desc_buf, buf_len); 3467 if (!err || err == -EINVAL) 3468 break; 3469 } 3470 3471 return err; 3472 } 3473 3474 /** 3475 * ufshcd_read_desc_param - read the specified descriptor parameter 3476 * @hba: Pointer to adapter instance 3477 * @desc_id: descriptor idn value 3478 * @desc_index: descriptor index 3479 * @param_offset: offset of the parameter to read 3480 * @param_read_buf: pointer to buffer where parameter would be read 3481 * @param_size: sizeof(param_read_buf) 3482 * 3483 * Return: 0 in case of success, non-zero otherwise. 3484 */ 3485 int ufshcd_read_desc_param(struct ufs_hba *hba, 3486 enum desc_idn desc_id, 3487 int desc_index, 3488 u8 param_offset, 3489 u8 *param_read_buf, 3490 u8 param_size) 3491 { 3492 int ret; 3493 u8 *desc_buf; 3494 int buff_len = QUERY_DESC_MAX_SIZE; 3495 bool is_kmalloc = true; 3496 3497 /* Safety check */ 3498 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) 3499 return -EINVAL; 3500 3501 /* Check whether we need temp memory */ 3502 if (param_offset != 0 || param_size < buff_len) { 3503 desc_buf = kzalloc(buff_len, GFP_KERNEL); 3504 if (!desc_buf) 3505 return -ENOMEM; 3506 } else { 3507 desc_buf = param_read_buf; 3508 is_kmalloc = false; 3509 } 3510 3511 /* Request for full descriptor */ 3512 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, 3513 desc_id, desc_index, 0, 3514 desc_buf, &buff_len); 3515 if (ret) { 3516 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n", 3517 __func__, desc_id, desc_index, param_offset, ret); 3518 goto out; 3519 } 3520 3521 /* Update descriptor length */ 3522 buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET]; 3523 3524 if (param_offset >= buff_len) { 3525 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n", 3526 __func__, param_offset, desc_id, buff_len); 3527 ret = -EINVAL; 3528 goto out; 3529 } 3530 3531 /* Sanity check */ 3532 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) { 3533 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", 3534 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]); 3535 ret = -EINVAL; 3536 goto out; 3537 } 3538 3539 if (is_kmalloc) { 3540 /* Make sure we don't copy more data than available */ 3541 if (param_offset >= buff_len) 3542 ret = -EINVAL; 3543 else 3544 memcpy(param_read_buf, &desc_buf[param_offset], 3545 min_t(u32, param_size, buff_len - param_offset)); 3546 } 3547 out: 3548 if (is_kmalloc) 3549 kfree(desc_buf); 3550 return ret; 3551 } 3552 3553 /** 3554 * struct uc_string_id - unicode string 3555 * 3556 * @len: size of this descriptor inclusive 3557 * @type: descriptor type 3558 * @uc: unicode string character 3559 */ 3560 struct uc_string_id { 3561 u8 len; 3562 u8 type; 3563 wchar_t uc[]; 3564 } __packed; 3565 3566 /* replace non-printable or non-ASCII characters with spaces */ 3567 static inline char ufshcd_remove_non_printable(u8 ch) 3568 { 3569 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' '; 3570 } 3571 3572 /** 3573 * ufshcd_read_string_desc - read string descriptor 3574 * @hba: pointer to adapter instance 3575 * @desc_index: descriptor index 3576 * @buf: pointer to buffer where descriptor would be read, 3577 * the caller should free the memory. 3578 * @ascii: if true convert from unicode to ascii characters 3579 * null terminated string. 3580 * 3581 * Return: 3582 * * string size on success. 3583 * * -ENOMEM: on allocation failure 3584 * * -EINVAL: on a wrong parameter 3585 */ 3586 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, 3587 u8 **buf, bool ascii) 3588 { 3589 struct uc_string_id *uc_str; 3590 u8 *str; 3591 int ret; 3592 3593 if (!buf) 3594 return -EINVAL; 3595 3596 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); 3597 if (!uc_str) 3598 return -ENOMEM; 3599 3600 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0, 3601 (u8 *)uc_str, QUERY_DESC_MAX_SIZE); 3602 if (ret < 0) { 3603 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", 3604 QUERY_REQ_RETRIES, ret); 3605 str = NULL; 3606 goto out; 3607 } 3608 3609 if (uc_str->len <= QUERY_DESC_HDR_SIZE) { 3610 dev_dbg(hba->dev, "String Desc is of zero length\n"); 3611 str = NULL; 3612 ret = 0; 3613 goto out; 3614 } 3615 3616 if (ascii) { 3617 ssize_t ascii_len; 3618 int i; 3619 /* remove header and divide by 2 to move from UTF16 to UTF8 */ 3620 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1; 3621 str = kzalloc(ascii_len, GFP_KERNEL); 3622 if (!str) { 3623 ret = -ENOMEM; 3624 goto out; 3625 } 3626 3627 /* 3628 * the descriptor contains string in UTF16 format 3629 * we need to convert to utf-8 so it can be displayed 3630 */ 3631 ret = utf16s_to_utf8s(uc_str->uc, 3632 uc_str->len - QUERY_DESC_HDR_SIZE, 3633 UTF16_BIG_ENDIAN, str, ascii_len); 3634 3635 /* replace non-printable or non-ASCII characters with spaces */ 3636 for (i = 0; i < ret; i++) 3637 str[i] = ufshcd_remove_non_printable(str[i]); 3638 3639 str[ret++] = '\0'; 3640 3641 } else { 3642 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL); 3643 if (!str) { 3644 ret = -ENOMEM; 3645 goto out; 3646 } 3647 ret = uc_str->len; 3648 } 3649 out: 3650 *buf = str; 3651 kfree(uc_str); 3652 return ret; 3653 } 3654 3655 /** 3656 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter 3657 * @hba: Pointer to adapter instance 3658 * @lun: lun id 3659 * @param_offset: offset of the parameter to read 3660 * @param_read_buf: pointer to buffer where parameter would be read 3661 * @param_size: sizeof(param_read_buf) 3662 * 3663 * Return: 0 in case of success, non-zero otherwise. 3664 */ 3665 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, 3666 int lun, 3667 enum unit_desc_param param_offset, 3668 u8 *param_read_buf, 3669 u32 param_size) 3670 { 3671 /* 3672 * Unit descriptors are only available for general purpose LUs (LUN id 3673 * from 0 to 7) and RPMB Well known LU. 3674 */ 3675 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) 3676 return -EOPNOTSUPP; 3677 3678 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, 3679 param_offset, param_read_buf, param_size); 3680 } 3681 3682 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba) 3683 { 3684 int err = 0; 3685 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US; 3686 3687 if (hba->dev_info.wspecversion >= 0x300) { 3688 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 3689 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0, 3690 &gating_wait); 3691 if (err) 3692 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n", 3693 err, gating_wait); 3694 3695 if (gating_wait == 0) { 3696 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US; 3697 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n", 3698 gating_wait); 3699 } 3700 3701 hba->dev_info.clk_gating_wait_us = gating_wait; 3702 } 3703 3704 return err; 3705 } 3706 3707 /** 3708 * ufshcd_memory_alloc - allocate memory for host memory space data structures 3709 * @hba: per adapter instance 3710 * 3711 * 1. Allocate DMA memory for Command Descriptor array 3712 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT 3713 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL). 3714 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List 3715 * (UTMRDL) 3716 * 4. Allocate memory for local reference block(lrb). 3717 * 3718 * Return: 0 for success, non-zero in case of failure. 3719 */ 3720 static int ufshcd_memory_alloc(struct ufs_hba *hba) 3721 { 3722 size_t utmrdl_size, utrdl_size, ucdl_size; 3723 3724 /* Allocate memory for UTP command descriptors */ 3725 ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs; 3726 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, 3727 ucdl_size, 3728 &hba->ucdl_dma_addr, 3729 GFP_KERNEL); 3730 3731 /* 3732 * UFSHCI requires UTP command descriptor to be 128 byte aligned. 3733 */ 3734 if (!hba->ucdl_base_addr || 3735 WARN_ON(hba->ucdl_dma_addr & (128 - 1))) { 3736 dev_err(hba->dev, 3737 "Command Descriptor Memory allocation failed\n"); 3738 goto out; 3739 } 3740 3741 /* 3742 * Allocate memory for UTP Transfer descriptors 3743 * UFSHCI requires 1KB alignment of UTRD 3744 */ 3745 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); 3746 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, 3747 utrdl_size, 3748 &hba->utrdl_dma_addr, 3749 GFP_KERNEL); 3750 if (!hba->utrdl_base_addr || 3751 WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) { 3752 dev_err(hba->dev, 3753 "Transfer Descriptor Memory allocation failed\n"); 3754 goto out; 3755 } 3756 3757 /* 3758 * Skip utmrdl allocation; it may have been 3759 * allocated during first pass and not released during 3760 * MCQ memory allocation. 3761 * See ufshcd_release_sdb_queue() and ufshcd_config_mcq() 3762 */ 3763 if (hba->utmrdl_base_addr) 3764 goto skip_utmrdl; 3765 /* 3766 * Allocate memory for UTP Task Management descriptors 3767 * UFSHCI requires 1KB alignment of UTMRD 3768 */ 3769 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; 3770 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, 3771 utmrdl_size, 3772 &hba->utmrdl_dma_addr, 3773 GFP_KERNEL); 3774 if (!hba->utmrdl_base_addr || 3775 WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) { 3776 dev_err(hba->dev, 3777 "Task Management Descriptor Memory allocation failed\n"); 3778 goto out; 3779 } 3780 3781 skip_utmrdl: 3782 /* Allocate memory for local reference block */ 3783 hba->lrb = devm_kcalloc(hba->dev, 3784 hba->nutrs, sizeof(struct ufshcd_lrb), 3785 GFP_KERNEL); 3786 if (!hba->lrb) { 3787 dev_err(hba->dev, "LRB Memory allocation failed\n"); 3788 goto out; 3789 } 3790 return 0; 3791 out: 3792 return -ENOMEM; 3793 } 3794 3795 /** 3796 * ufshcd_host_memory_configure - configure local reference block with 3797 * memory offsets 3798 * @hba: per adapter instance 3799 * 3800 * Configure Host memory space 3801 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA 3802 * address. 3803 * 2. Update each UTRD with Response UPIU offset, Response UPIU length 3804 * and PRDT offset. 3805 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT 3806 * into local reference block. 3807 */ 3808 static void ufshcd_host_memory_configure(struct ufs_hba *hba) 3809 { 3810 struct utp_transfer_req_desc *utrdlp; 3811 dma_addr_t cmd_desc_dma_addr; 3812 dma_addr_t cmd_desc_element_addr; 3813 u16 response_offset; 3814 u16 prdt_offset; 3815 int cmd_desc_size; 3816 int i; 3817 3818 utrdlp = hba->utrdl_base_addr; 3819 3820 response_offset = 3821 offsetof(struct utp_transfer_cmd_desc, response_upiu); 3822 prdt_offset = 3823 offsetof(struct utp_transfer_cmd_desc, prd_table); 3824 3825 cmd_desc_size = ufshcd_get_ucd_size(hba); 3826 cmd_desc_dma_addr = hba->ucdl_dma_addr; 3827 3828 for (i = 0; i < hba->nutrs; i++) { 3829 /* Configure UTRD with command descriptor base address */ 3830 cmd_desc_element_addr = 3831 (cmd_desc_dma_addr + (cmd_desc_size * i)); 3832 utrdlp[i].command_desc_base_addr = 3833 cpu_to_le64(cmd_desc_element_addr); 3834 3835 /* Response upiu and prdt offset should be in double words */ 3836 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { 3837 utrdlp[i].response_upiu_offset = 3838 cpu_to_le16(response_offset); 3839 utrdlp[i].prd_table_offset = 3840 cpu_to_le16(prdt_offset); 3841 utrdlp[i].response_upiu_length = 3842 cpu_to_le16(ALIGNED_UPIU_SIZE); 3843 } else { 3844 utrdlp[i].response_upiu_offset = 3845 cpu_to_le16(response_offset >> 2); 3846 utrdlp[i].prd_table_offset = 3847 cpu_to_le16(prdt_offset >> 2); 3848 utrdlp[i].response_upiu_length = 3849 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); 3850 } 3851 3852 ufshcd_init_lrb(hba, &hba->lrb[i], i); 3853 } 3854 } 3855 3856 /** 3857 * ufshcd_dme_link_startup - Notify Unipro to perform link startup 3858 * @hba: per adapter instance 3859 * 3860 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer, 3861 * in order to initialize the Unipro link startup procedure. 3862 * Once the Unipro links are up, the device connected to the controller 3863 * is detected. 3864 * 3865 * Return: 0 on success, non-zero value on failure. 3866 */ 3867 static int ufshcd_dme_link_startup(struct ufs_hba *hba) 3868 { 3869 struct uic_command uic_cmd = {0}; 3870 int ret; 3871 3872 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; 3873 3874 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 3875 if (ret) 3876 dev_dbg(hba->dev, 3877 "dme-link-startup: error code %d\n", ret); 3878 return ret; 3879 } 3880 /** 3881 * ufshcd_dme_reset - UIC command for DME_RESET 3882 * @hba: per adapter instance 3883 * 3884 * DME_RESET command is issued in order to reset UniPro stack. 3885 * This function now deals with cold reset. 3886 * 3887 * Return: 0 on success, non-zero value on failure. 3888 */ 3889 static int ufshcd_dme_reset(struct ufs_hba *hba) 3890 { 3891 struct uic_command uic_cmd = {0}; 3892 int ret; 3893 3894 uic_cmd.command = UIC_CMD_DME_RESET; 3895 3896 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 3897 if (ret) 3898 dev_err(hba->dev, 3899 "dme-reset: error code %d\n", ret); 3900 3901 return ret; 3902 } 3903 3904 int ufshcd_dme_configure_adapt(struct ufs_hba *hba, 3905 int agreed_gear, 3906 int adapt_val) 3907 { 3908 int ret; 3909 3910 if (agreed_gear < UFS_HS_G4) 3911 adapt_val = PA_NO_ADAPT; 3912 3913 ret = ufshcd_dme_set(hba, 3914 UIC_ARG_MIB(PA_TXHSADAPTTYPE), 3915 adapt_val); 3916 return ret; 3917 } 3918 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt); 3919 3920 /** 3921 * ufshcd_dme_enable - UIC command for DME_ENABLE 3922 * @hba: per adapter instance 3923 * 3924 * DME_ENABLE command is issued in order to enable UniPro stack. 3925 * 3926 * Return: 0 on success, non-zero value on failure. 3927 */ 3928 static int ufshcd_dme_enable(struct ufs_hba *hba) 3929 { 3930 struct uic_command uic_cmd = {0}; 3931 int ret; 3932 3933 uic_cmd.command = UIC_CMD_DME_ENABLE; 3934 3935 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 3936 if (ret) 3937 dev_err(hba->dev, 3938 "dme-enable: error code %d\n", ret); 3939 3940 return ret; 3941 } 3942 3943 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) 3944 { 3945 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000 3946 unsigned long min_sleep_time_us; 3947 3948 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) 3949 return; 3950 3951 /* 3952 * last_dme_cmd_tstamp will be 0 only for 1st call to 3953 * this function 3954 */ 3955 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { 3956 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US; 3957 } else { 3958 unsigned long delta = 3959 (unsigned long) ktime_to_us( 3960 ktime_sub(ktime_get(), 3961 hba->last_dme_cmd_tstamp)); 3962 3963 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US) 3964 min_sleep_time_us = 3965 MIN_DELAY_BEFORE_DME_CMDS_US - delta; 3966 else 3967 return; /* no more delay required */ 3968 } 3969 3970 /* allow sleep for extra 50us if needed */ 3971 usleep_range(min_sleep_time_us, min_sleep_time_us + 50); 3972 } 3973 3974 /** 3975 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET 3976 * @hba: per adapter instance 3977 * @attr_sel: uic command argument1 3978 * @attr_set: attribute set type as uic command argument2 3979 * @mib_val: setting value as uic command argument3 3980 * @peer: indicate whether peer or local 3981 * 3982 * Return: 0 on success, non-zero value on failure. 3983 */ 3984 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, 3985 u8 attr_set, u32 mib_val, u8 peer) 3986 { 3987 struct uic_command uic_cmd = {0}; 3988 static const char *const action[] = { 3989 "dme-set", 3990 "dme-peer-set" 3991 }; 3992 const char *set = action[!!peer]; 3993 int ret; 3994 int retries = UFS_UIC_COMMAND_RETRIES; 3995 3996 uic_cmd.command = peer ? 3997 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; 3998 uic_cmd.argument1 = attr_sel; 3999 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); 4000 uic_cmd.argument3 = mib_val; 4001 4002 do { 4003 /* for peer attributes we retry upon failure */ 4004 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 4005 if (ret) 4006 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", 4007 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); 4008 } while (ret && peer && --retries); 4009 4010 if (ret) 4011 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", 4012 set, UIC_GET_ATTR_ID(attr_sel), mib_val, 4013 UFS_UIC_COMMAND_RETRIES - retries); 4014 4015 return ret; 4016 } 4017 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); 4018 4019 /** 4020 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET 4021 * @hba: per adapter instance 4022 * @attr_sel: uic command argument1 4023 * @mib_val: the value of the attribute as returned by the UIC command 4024 * @peer: indicate whether peer or local 4025 * 4026 * Return: 0 on success, non-zero value on failure. 4027 */ 4028 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, 4029 u32 *mib_val, u8 peer) 4030 { 4031 struct uic_command uic_cmd = {0}; 4032 static const char *const action[] = { 4033 "dme-get", 4034 "dme-peer-get" 4035 }; 4036 const char *get = action[!!peer]; 4037 int ret; 4038 int retries = UFS_UIC_COMMAND_RETRIES; 4039 struct ufs_pa_layer_attr orig_pwr_info; 4040 struct ufs_pa_layer_attr temp_pwr_info; 4041 bool pwr_mode_change = false; 4042 4043 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { 4044 orig_pwr_info = hba->pwr_info; 4045 temp_pwr_info = orig_pwr_info; 4046 4047 if (orig_pwr_info.pwr_tx == FAST_MODE || 4048 orig_pwr_info.pwr_rx == FAST_MODE) { 4049 temp_pwr_info.pwr_tx = FASTAUTO_MODE; 4050 temp_pwr_info.pwr_rx = FASTAUTO_MODE; 4051 pwr_mode_change = true; 4052 } else if (orig_pwr_info.pwr_tx == SLOW_MODE || 4053 orig_pwr_info.pwr_rx == SLOW_MODE) { 4054 temp_pwr_info.pwr_tx = SLOWAUTO_MODE; 4055 temp_pwr_info.pwr_rx = SLOWAUTO_MODE; 4056 pwr_mode_change = true; 4057 } 4058 if (pwr_mode_change) { 4059 ret = ufshcd_change_power_mode(hba, &temp_pwr_info); 4060 if (ret) 4061 goto out; 4062 } 4063 } 4064 4065 uic_cmd.command = peer ? 4066 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; 4067 uic_cmd.argument1 = attr_sel; 4068 4069 do { 4070 /* for peer attributes we retry upon failure */ 4071 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 4072 if (ret) 4073 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", 4074 get, UIC_GET_ATTR_ID(attr_sel), ret); 4075 } while (ret && peer && --retries); 4076 4077 if (ret) 4078 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", 4079 get, UIC_GET_ATTR_ID(attr_sel), 4080 UFS_UIC_COMMAND_RETRIES - retries); 4081 4082 if (mib_val && !ret) 4083 *mib_val = uic_cmd.argument3; 4084 4085 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) 4086 && pwr_mode_change) 4087 ufshcd_change_power_mode(hba, &orig_pwr_info); 4088 out: 4089 return ret; 4090 } 4091 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); 4092 4093 /** 4094 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power 4095 * state) and waits for it to take effect. 4096 * 4097 * @hba: per adapter instance 4098 * @cmd: UIC command to execute 4099 * 4100 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER & 4101 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host 4102 * and device UniPro link and hence it's final completion would be indicated by 4103 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in 4104 * addition to normal UIC command completion Status (UCCS). This function only 4105 * returns after the relevant status bits indicate the completion. 4106 * 4107 * Return: 0 on success, non-zero value on failure. 4108 */ 4109 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) 4110 { 4111 DECLARE_COMPLETION_ONSTACK(uic_async_done); 4112 unsigned long flags; 4113 u8 status; 4114 int ret; 4115 bool reenable_intr = false; 4116 4117 mutex_lock(&hba->uic_cmd_mutex); 4118 ufshcd_add_delay_before_dme_cmd(hba); 4119 4120 spin_lock_irqsave(hba->host->host_lock, flags); 4121 if (ufshcd_is_link_broken(hba)) { 4122 ret = -ENOLINK; 4123 goto out_unlock; 4124 } 4125 hba->uic_async_done = &uic_async_done; 4126 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { 4127 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); 4128 /* 4129 * Make sure UIC command completion interrupt is disabled before 4130 * issuing UIC command. 4131 */ 4132 wmb(); 4133 reenable_intr = true; 4134 } 4135 ret = __ufshcd_send_uic_cmd(hba, cmd, false); 4136 spin_unlock_irqrestore(hba->host->host_lock, flags); 4137 if (ret) { 4138 dev_err(hba->dev, 4139 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", 4140 cmd->command, cmd->argument3, ret); 4141 goto out; 4142 } 4143 4144 if (!wait_for_completion_timeout(hba->uic_async_done, 4145 msecs_to_jiffies(UIC_CMD_TIMEOUT))) { 4146 dev_err(hba->dev, 4147 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", 4148 cmd->command, cmd->argument3); 4149 4150 if (!cmd->cmd_active) { 4151 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n", 4152 __func__); 4153 goto check_upmcrs; 4154 } 4155 4156 ret = -ETIMEDOUT; 4157 goto out; 4158 } 4159 4160 check_upmcrs: 4161 status = ufshcd_get_upmcrs(hba); 4162 if (status != PWR_LOCAL) { 4163 dev_err(hba->dev, 4164 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", 4165 cmd->command, status); 4166 ret = (status != PWR_OK) ? status : -1; 4167 } 4168 out: 4169 if (ret) { 4170 ufshcd_print_host_state(hba); 4171 ufshcd_print_pwr_info(hba); 4172 ufshcd_print_evt_hist(hba); 4173 } 4174 4175 spin_lock_irqsave(hba->host->host_lock, flags); 4176 hba->active_uic_cmd = NULL; 4177 hba->uic_async_done = NULL; 4178 if (reenable_intr) 4179 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); 4180 if (ret) { 4181 ufshcd_set_link_broken(hba); 4182 ufshcd_schedule_eh_work(hba); 4183 } 4184 out_unlock: 4185 spin_unlock_irqrestore(hba->host->host_lock, flags); 4186 mutex_unlock(&hba->uic_cmd_mutex); 4187 4188 return ret; 4189 } 4190 4191 /** 4192 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage 4193 * using DME_SET primitives. 4194 * @hba: per adapter instance 4195 * @mode: powr mode value 4196 * 4197 * Return: 0 on success, non-zero value on failure. 4198 */ 4199 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) 4200 { 4201 struct uic_command uic_cmd = {0}; 4202 int ret; 4203 4204 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { 4205 ret = ufshcd_dme_set(hba, 4206 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1); 4207 if (ret) { 4208 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", 4209 __func__, ret); 4210 goto out; 4211 } 4212 } 4213 4214 uic_cmd.command = UIC_CMD_DME_SET; 4215 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); 4216 uic_cmd.argument3 = mode; 4217 ufshcd_hold(hba); 4218 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 4219 ufshcd_release(hba); 4220 4221 out: 4222 return ret; 4223 } 4224 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode); 4225 4226 int ufshcd_link_recovery(struct ufs_hba *hba) 4227 { 4228 int ret; 4229 unsigned long flags; 4230 4231 spin_lock_irqsave(hba->host->host_lock, flags); 4232 hba->ufshcd_state = UFSHCD_STATE_RESET; 4233 ufshcd_set_eh_in_progress(hba); 4234 spin_unlock_irqrestore(hba->host->host_lock, flags); 4235 4236 /* Reset the attached device */ 4237 ufshcd_device_reset(hba); 4238 4239 ret = ufshcd_host_reset_and_restore(hba); 4240 4241 spin_lock_irqsave(hba->host->host_lock, flags); 4242 if (ret) 4243 hba->ufshcd_state = UFSHCD_STATE_ERROR; 4244 ufshcd_clear_eh_in_progress(hba); 4245 spin_unlock_irqrestore(hba->host->host_lock, flags); 4246 4247 if (ret) 4248 dev_err(hba->dev, "%s: link recovery failed, err %d", 4249 __func__, ret); 4250 4251 return ret; 4252 } 4253 EXPORT_SYMBOL_GPL(ufshcd_link_recovery); 4254 4255 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) 4256 { 4257 int ret; 4258 struct uic_command uic_cmd = {0}; 4259 ktime_t start = ktime_get(); 4260 4261 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); 4262 4263 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; 4264 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 4265 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", 4266 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 4267 4268 if (ret) 4269 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", 4270 __func__, ret); 4271 else 4272 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, 4273 POST_CHANGE); 4274 4275 return ret; 4276 } 4277 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter); 4278 4279 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) 4280 { 4281 struct uic_command uic_cmd = {0}; 4282 int ret; 4283 ktime_t start = ktime_get(); 4284 4285 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); 4286 4287 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; 4288 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 4289 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", 4290 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 4291 4292 if (ret) { 4293 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", 4294 __func__, ret); 4295 } else { 4296 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, 4297 POST_CHANGE); 4298 hba->ufs_stats.last_hibern8_exit_tstamp = local_clock(); 4299 hba->ufs_stats.hibern8_exit_cnt++; 4300 } 4301 4302 return ret; 4303 } 4304 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit); 4305 4306 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) 4307 { 4308 unsigned long flags; 4309 bool update = false; 4310 4311 if (!ufshcd_is_auto_hibern8_supported(hba)) 4312 return; 4313 4314 spin_lock_irqsave(hba->host->host_lock, flags); 4315 if (hba->ahit != ahit) { 4316 hba->ahit = ahit; 4317 update = true; 4318 } 4319 spin_unlock_irqrestore(hba->host->host_lock, flags); 4320 4321 if (update && 4322 !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) { 4323 ufshcd_rpm_get_sync(hba); 4324 ufshcd_hold(hba); 4325 ufshcd_auto_hibern8_enable(hba); 4326 ufshcd_release(hba); 4327 ufshcd_rpm_put_sync(hba); 4328 } 4329 } 4330 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update); 4331 4332 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) 4333 { 4334 if (!ufshcd_is_auto_hibern8_supported(hba)) 4335 return; 4336 4337 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); 4338 } 4339 4340 /** 4341 * ufshcd_init_pwr_info - setting the POR (power on reset) 4342 * values in hba power info 4343 * @hba: per-adapter instance 4344 */ 4345 static void ufshcd_init_pwr_info(struct ufs_hba *hba) 4346 { 4347 hba->pwr_info.gear_rx = UFS_PWM_G1; 4348 hba->pwr_info.gear_tx = UFS_PWM_G1; 4349 hba->pwr_info.lane_rx = 1; 4350 hba->pwr_info.lane_tx = 1; 4351 hba->pwr_info.pwr_rx = SLOWAUTO_MODE; 4352 hba->pwr_info.pwr_tx = SLOWAUTO_MODE; 4353 hba->pwr_info.hs_rate = 0; 4354 } 4355 4356 /** 4357 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device 4358 * @hba: per-adapter instance 4359 * 4360 * Return: 0 upon success; < 0 upon failure. 4361 */ 4362 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) 4363 { 4364 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; 4365 4366 if (hba->max_pwr_info.is_valid) 4367 return 0; 4368 4369 if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) { 4370 pwr_info->pwr_tx = FASTAUTO_MODE; 4371 pwr_info->pwr_rx = FASTAUTO_MODE; 4372 } else { 4373 pwr_info->pwr_tx = FAST_MODE; 4374 pwr_info->pwr_rx = FAST_MODE; 4375 } 4376 pwr_info->hs_rate = PA_HS_MODE_B; 4377 4378 /* Get the connected lane count */ 4379 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), 4380 &pwr_info->lane_rx); 4381 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 4382 &pwr_info->lane_tx); 4383 4384 if (!pwr_info->lane_rx || !pwr_info->lane_tx) { 4385 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", 4386 __func__, 4387 pwr_info->lane_rx, 4388 pwr_info->lane_tx); 4389 return -EINVAL; 4390 } 4391 4392 /* 4393 * First, get the maximum gears of HS speed. 4394 * If a zero value, it means there is no HSGEAR capability. 4395 * Then, get the maximum gears of PWM speed. 4396 */ 4397 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); 4398 if (!pwr_info->gear_rx) { 4399 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), 4400 &pwr_info->gear_rx); 4401 if (!pwr_info->gear_rx) { 4402 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", 4403 __func__, pwr_info->gear_rx); 4404 return -EINVAL; 4405 } 4406 pwr_info->pwr_rx = SLOW_MODE; 4407 } 4408 4409 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), 4410 &pwr_info->gear_tx); 4411 if (!pwr_info->gear_tx) { 4412 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), 4413 &pwr_info->gear_tx); 4414 if (!pwr_info->gear_tx) { 4415 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", 4416 __func__, pwr_info->gear_tx); 4417 return -EINVAL; 4418 } 4419 pwr_info->pwr_tx = SLOW_MODE; 4420 } 4421 4422 hba->max_pwr_info.is_valid = true; 4423 return 0; 4424 } 4425 4426 static int ufshcd_change_power_mode(struct ufs_hba *hba, 4427 struct ufs_pa_layer_attr *pwr_mode) 4428 { 4429 int ret; 4430 4431 /* if already configured to the requested pwr_mode */ 4432 if (!hba->force_pmc && 4433 pwr_mode->gear_rx == hba->pwr_info.gear_rx && 4434 pwr_mode->gear_tx == hba->pwr_info.gear_tx && 4435 pwr_mode->lane_rx == hba->pwr_info.lane_rx && 4436 pwr_mode->lane_tx == hba->pwr_info.lane_tx && 4437 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && 4438 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && 4439 pwr_mode->hs_rate == hba->pwr_info.hs_rate) { 4440 dev_dbg(hba->dev, "%s: power already configured\n", __func__); 4441 return 0; 4442 } 4443 4444 /* 4445 * Configure attributes for power mode change with below. 4446 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, 4447 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, 4448 * - PA_HSSERIES 4449 */ 4450 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); 4451 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), 4452 pwr_mode->lane_rx); 4453 if (pwr_mode->pwr_rx == FASTAUTO_MODE || 4454 pwr_mode->pwr_rx == FAST_MODE) 4455 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true); 4456 else 4457 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false); 4458 4459 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); 4460 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), 4461 pwr_mode->lane_tx); 4462 if (pwr_mode->pwr_tx == FASTAUTO_MODE || 4463 pwr_mode->pwr_tx == FAST_MODE) 4464 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); 4465 else 4466 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false); 4467 4468 if (pwr_mode->pwr_rx == FASTAUTO_MODE || 4469 pwr_mode->pwr_tx == FASTAUTO_MODE || 4470 pwr_mode->pwr_rx == FAST_MODE || 4471 pwr_mode->pwr_tx == FAST_MODE) 4472 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), 4473 pwr_mode->hs_rate); 4474 4475 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { 4476 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 4477 DL_FC0ProtectionTimeOutVal_Default); 4478 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 4479 DL_TC0ReplayTimeOutVal_Default); 4480 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 4481 DL_AFC0ReqTimeOutVal_Default); 4482 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), 4483 DL_FC1ProtectionTimeOutVal_Default); 4484 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), 4485 DL_TC1ReplayTimeOutVal_Default); 4486 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), 4487 DL_AFC1ReqTimeOutVal_Default); 4488 4489 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), 4490 DL_FC0ProtectionTimeOutVal_Default); 4491 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), 4492 DL_TC0ReplayTimeOutVal_Default); 4493 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), 4494 DL_AFC0ReqTimeOutVal_Default); 4495 } 4496 4497 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 4498 | pwr_mode->pwr_tx); 4499 4500 if (ret) { 4501 dev_err(hba->dev, 4502 "%s: power mode change failed %d\n", __func__, ret); 4503 } else { 4504 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, 4505 pwr_mode); 4506 4507 memcpy(&hba->pwr_info, pwr_mode, 4508 sizeof(struct ufs_pa_layer_attr)); 4509 } 4510 4511 return ret; 4512 } 4513 4514 /** 4515 * ufshcd_config_pwr_mode - configure a new power mode 4516 * @hba: per-adapter instance 4517 * @desired_pwr_mode: desired power configuration 4518 * 4519 * Return: 0 upon success; < 0 upon failure. 4520 */ 4521 int ufshcd_config_pwr_mode(struct ufs_hba *hba, 4522 struct ufs_pa_layer_attr *desired_pwr_mode) 4523 { 4524 struct ufs_pa_layer_attr final_params = { 0 }; 4525 int ret; 4526 4527 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, 4528 desired_pwr_mode, &final_params); 4529 4530 if (ret) 4531 memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); 4532 4533 ret = ufshcd_change_power_mode(hba, &final_params); 4534 4535 return ret; 4536 } 4537 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); 4538 4539 /** 4540 * ufshcd_complete_dev_init() - checks device readiness 4541 * @hba: per-adapter instance 4542 * 4543 * Set fDeviceInit flag and poll until device toggles it. 4544 * 4545 * Return: 0 upon success; < 0 upon failure. 4546 */ 4547 static int ufshcd_complete_dev_init(struct ufs_hba *hba) 4548 { 4549 int err; 4550 bool flag_res = true; 4551 ktime_t timeout; 4552 4553 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, 4554 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL); 4555 if (err) { 4556 dev_err(hba->dev, 4557 "%s: setting fDeviceInit flag failed with error %d\n", 4558 __func__, err); 4559 goto out; 4560 } 4561 4562 /* Poll fDeviceInit flag to be cleared */ 4563 timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT); 4564 do { 4565 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, 4566 QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res); 4567 if (!flag_res) 4568 break; 4569 usleep_range(500, 1000); 4570 } while (ktime_before(ktime_get(), timeout)); 4571 4572 if (err) { 4573 dev_err(hba->dev, 4574 "%s: reading fDeviceInit flag failed with error %d\n", 4575 __func__, err); 4576 } else if (flag_res) { 4577 dev_err(hba->dev, 4578 "%s: fDeviceInit was not cleared by the device\n", 4579 __func__); 4580 err = -EBUSY; 4581 } 4582 out: 4583 return err; 4584 } 4585 4586 /** 4587 * ufshcd_make_hba_operational - Make UFS controller operational 4588 * @hba: per adapter instance 4589 * 4590 * To bring UFS host controller to operational state, 4591 * 1. Enable required interrupts 4592 * 2. Configure interrupt aggregation 4593 * 3. Program UTRL and UTMRL base address 4594 * 4. Configure run-stop-registers 4595 * 4596 * Return: 0 on success, non-zero value on failure. 4597 */ 4598 int ufshcd_make_hba_operational(struct ufs_hba *hba) 4599 { 4600 int err = 0; 4601 u32 reg; 4602 4603 /* Enable required interrupts */ 4604 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); 4605 4606 /* Configure interrupt aggregation */ 4607 if (ufshcd_is_intr_aggr_allowed(hba)) 4608 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); 4609 else 4610 ufshcd_disable_intr_aggr(hba); 4611 4612 /* Configure UTRL and UTMRL base address registers */ 4613 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), 4614 REG_UTP_TRANSFER_REQ_LIST_BASE_L); 4615 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), 4616 REG_UTP_TRANSFER_REQ_LIST_BASE_H); 4617 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), 4618 REG_UTP_TASK_REQ_LIST_BASE_L); 4619 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), 4620 REG_UTP_TASK_REQ_LIST_BASE_H); 4621 4622 /* 4623 * Make sure base address and interrupt setup are updated before 4624 * enabling the run/stop registers below. 4625 */ 4626 wmb(); 4627 4628 /* 4629 * UCRDY, UTMRLDY and UTRLRDY bits must be 1 4630 */ 4631 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); 4632 if (!(ufshcd_get_lists_status(reg))) { 4633 ufshcd_enable_run_stop_reg(hba); 4634 } else { 4635 dev_err(hba->dev, 4636 "Host controller not ready to process requests"); 4637 err = -EIO; 4638 } 4639 4640 return err; 4641 } 4642 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational); 4643 4644 /** 4645 * ufshcd_hba_stop - Send controller to reset state 4646 * @hba: per adapter instance 4647 */ 4648 void ufshcd_hba_stop(struct ufs_hba *hba) 4649 { 4650 unsigned long flags; 4651 int err; 4652 4653 /* 4654 * Obtain the host lock to prevent that the controller is disabled 4655 * while the UFS interrupt handler is active on another CPU. 4656 */ 4657 spin_lock_irqsave(hba->host->host_lock, flags); 4658 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); 4659 spin_unlock_irqrestore(hba->host->host_lock, flags); 4660 4661 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, 4662 CONTROLLER_ENABLE, CONTROLLER_DISABLE, 4663 10, 1); 4664 if (err) 4665 dev_err(hba->dev, "%s: Controller disable failed\n", __func__); 4666 } 4667 EXPORT_SYMBOL_GPL(ufshcd_hba_stop); 4668 4669 /** 4670 * ufshcd_hba_execute_hce - initialize the controller 4671 * @hba: per adapter instance 4672 * 4673 * The controller resets itself and controller firmware initialization 4674 * sequence kicks off. When controller is ready it will set 4675 * the Host Controller Enable bit to 1. 4676 * 4677 * Return: 0 on success, non-zero value on failure. 4678 */ 4679 static int ufshcd_hba_execute_hce(struct ufs_hba *hba) 4680 { 4681 int retry_outer = 3; 4682 int retry_inner; 4683 4684 start: 4685 if (ufshcd_is_hba_active(hba)) 4686 /* change controller state to "reset state" */ 4687 ufshcd_hba_stop(hba); 4688 4689 /* UniPro link is disabled at this point */ 4690 ufshcd_set_link_off(hba); 4691 4692 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); 4693 4694 /* start controller initialization sequence */ 4695 ufshcd_hba_start(hba); 4696 4697 /* 4698 * To initialize a UFS host controller HCE bit must be set to 1. 4699 * During initialization the HCE bit value changes from 1->0->1. 4700 * When the host controller completes initialization sequence 4701 * it sets the value of HCE bit to 1. The same HCE bit is read back 4702 * to check if the controller has completed initialization sequence. 4703 * So without this delay the value HCE = 1, set in the previous 4704 * instruction might be read back. 4705 * This delay can be changed based on the controller. 4706 */ 4707 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); 4708 4709 /* wait for the host controller to complete initialization */ 4710 retry_inner = 50; 4711 while (!ufshcd_is_hba_active(hba)) { 4712 if (retry_inner) { 4713 retry_inner--; 4714 } else { 4715 dev_err(hba->dev, 4716 "Controller enable failed\n"); 4717 if (retry_outer) { 4718 retry_outer--; 4719 goto start; 4720 } 4721 return -EIO; 4722 } 4723 usleep_range(1000, 1100); 4724 } 4725 4726 /* enable UIC related interrupts */ 4727 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); 4728 4729 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); 4730 4731 return 0; 4732 } 4733 4734 int ufshcd_hba_enable(struct ufs_hba *hba) 4735 { 4736 int ret; 4737 4738 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) { 4739 ufshcd_set_link_off(hba); 4740 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); 4741 4742 /* enable UIC related interrupts */ 4743 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); 4744 ret = ufshcd_dme_reset(hba); 4745 if (ret) { 4746 dev_err(hba->dev, "DME_RESET failed\n"); 4747 return ret; 4748 } 4749 4750 ret = ufshcd_dme_enable(hba); 4751 if (ret) { 4752 dev_err(hba->dev, "Enabling DME failed\n"); 4753 return ret; 4754 } 4755 4756 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); 4757 } else { 4758 ret = ufshcd_hba_execute_hce(hba); 4759 } 4760 4761 return ret; 4762 } 4763 EXPORT_SYMBOL_GPL(ufshcd_hba_enable); 4764 4765 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) 4766 { 4767 int tx_lanes = 0, i, err = 0; 4768 4769 if (!peer) 4770 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 4771 &tx_lanes); 4772 else 4773 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 4774 &tx_lanes); 4775 for (i = 0; i < tx_lanes; i++) { 4776 if (!peer) 4777 err = ufshcd_dme_set(hba, 4778 UIC_ARG_MIB_SEL(TX_LCC_ENABLE, 4779 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), 4780 0); 4781 else 4782 err = ufshcd_dme_peer_set(hba, 4783 UIC_ARG_MIB_SEL(TX_LCC_ENABLE, 4784 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), 4785 0); 4786 if (err) { 4787 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", 4788 __func__, peer, i, err); 4789 break; 4790 } 4791 } 4792 4793 return err; 4794 } 4795 4796 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) 4797 { 4798 return ufshcd_disable_tx_lcc(hba, true); 4799 } 4800 4801 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val) 4802 { 4803 struct ufs_event_hist *e; 4804 4805 if (id >= UFS_EVT_CNT) 4806 return; 4807 4808 e = &hba->ufs_stats.event[id]; 4809 e->val[e->pos] = val; 4810 e->tstamp[e->pos] = local_clock(); 4811 e->cnt += 1; 4812 e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH; 4813 4814 ufshcd_vops_event_notify(hba, id, &val); 4815 } 4816 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist); 4817 4818 /** 4819 * ufshcd_link_startup - Initialize unipro link startup 4820 * @hba: per adapter instance 4821 * 4822 * Return: 0 for success, non-zero in case of failure. 4823 */ 4824 static int ufshcd_link_startup(struct ufs_hba *hba) 4825 { 4826 int ret; 4827 int retries = DME_LINKSTARTUP_RETRIES; 4828 bool link_startup_again = false; 4829 4830 /* 4831 * If UFS device isn't active then we will have to issue link startup 4832 * 2 times to make sure the device state move to active. 4833 */ 4834 if (!ufshcd_is_ufs_dev_active(hba)) 4835 link_startup_again = true; 4836 4837 link_startup: 4838 do { 4839 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); 4840 4841 ret = ufshcd_dme_link_startup(hba); 4842 4843 /* check if device is detected by inter-connect layer */ 4844 if (!ret && !ufshcd_is_device_present(hba)) { 4845 ufshcd_update_evt_hist(hba, 4846 UFS_EVT_LINK_STARTUP_FAIL, 4847 0); 4848 dev_err(hba->dev, "%s: Device not present\n", __func__); 4849 ret = -ENXIO; 4850 goto out; 4851 } 4852 4853 /* 4854 * DME link lost indication is only received when link is up, 4855 * but we can't be sure if the link is up until link startup 4856 * succeeds. So reset the local Uni-Pro and try again. 4857 */ 4858 if (ret && retries && ufshcd_hba_enable(hba)) { 4859 ufshcd_update_evt_hist(hba, 4860 UFS_EVT_LINK_STARTUP_FAIL, 4861 (u32)ret); 4862 goto out; 4863 } 4864 } while (ret && retries--); 4865 4866 if (ret) { 4867 /* failed to get the link up... retire */ 4868 ufshcd_update_evt_hist(hba, 4869 UFS_EVT_LINK_STARTUP_FAIL, 4870 (u32)ret); 4871 goto out; 4872 } 4873 4874 if (link_startup_again) { 4875 link_startup_again = false; 4876 retries = DME_LINKSTARTUP_RETRIES; 4877 goto link_startup; 4878 } 4879 4880 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ 4881 ufshcd_init_pwr_info(hba); 4882 ufshcd_print_pwr_info(hba); 4883 4884 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { 4885 ret = ufshcd_disable_device_tx_lcc(hba); 4886 if (ret) 4887 goto out; 4888 } 4889 4890 /* Include any host controller configuration via UIC commands */ 4891 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE); 4892 if (ret) 4893 goto out; 4894 4895 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */ 4896 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); 4897 ret = ufshcd_make_hba_operational(hba); 4898 out: 4899 if (ret) { 4900 dev_err(hba->dev, "link startup failed %d\n", ret); 4901 ufshcd_print_host_state(hba); 4902 ufshcd_print_pwr_info(hba); 4903 ufshcd_print_evt_hist(hba); 4904 } 4905 return ret; 4906 } 4907 4908 /** 4909 * ufshcd_verify_dev_init() - Verify device initialization 4910 * @hba: per-adapter instance 4911 * 4912 * Send NOP OUT UPIU and wait for NOP IN response to check whether the 4913 * device Transport Protocol (UTP) layer is ready after a reset. 4914 * If the UTP layer at the device side is not initialized, it may 4915 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT 4916 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. 4917 * 4918 * Return: 0 upon success; < 0 upon failure. 4919 */ 4920 static int ufshcd_verify_dev_init(struct ufs_hba *hba) 4921 { 4922 int err = 0; 4923 int retries; 4924 4925 ufshcd_hold(hba); 4926 mutex_lock(&hba->dev_cmd.lock); 4927 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { 4928 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, 4929 hba->nop_out_timeout); 4930 4931 if (!err || err == -ETIMEDOUT) 4932 break; 4933 4934 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); 4935 } 4936 mutex_unlock(&hba->dev_cmd.lock); 4937 ufshcd_release(hba); 4938 4939 if (err) 4940 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); 4941 return err; 4942 } 4943 4944 /** 4945 * ufshcd_setup_links - associate link b/w device wlun and other luns 4946 * @sdev: pointer to SCSI device 4947 * @hba: pointer to ufs hba 4948 */ 4949 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev) 4950 { 4951 struct device_link *link; 4952 4953 /* 4954 * Device wlun is the supplier & rest of the luns are consumers. 4955 * This ensures that device wlun suspends after all other luns. 4956 */ 4957 if (hba->ufs_device_wlun) { 4958 link = device_link_add(&sdev->sdev_gendev, 4959 &hba->ufs_device_wlun->sdev_gendev, 4960 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); 4961 if (!link) { 4962 dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n", 4963 dev_name(&hba->ufs_device_wlun->sdev_gendev)); 4964 return; 4965 } 4966 hba->luns_avail--; 4967 /* Ignore REPORT_LUN wlun probing */ 4968 if (hba->luns_avail == 1) { 4969 ufshcd_rpm_put(hba); 4970 return; 4971 } 4972 } else { 4973 /* 4974 * Device wlun is probed. The assumption is that WLUNs are 4975 * scanned before other LUNs. 4976 */ 4977 hba->luns_avail--; 4978 } 4979 } 4980 4981 /** 4982 * ufshcd_lu_init - Initialize the relevant parameters of the LU 4983 * @hba: per-adapter instance 4984 * @sdev: pointer to SCSI device 4985 */ 4986 static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev) 4987 { 4988 int len = QUERY_DESC_MAX_SIZE; 4989 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); 4990 u8 lun_qdepth = hba->nutrs; 4991 u8 *desc_buf; 4992 int ret; 4993 4994 desc_buf = kzalloc(len, GFP_KERNEL); 4995 if (!desc_buf) 4996 goto set_qdepth; 4997 4998 ret = ufshcd_read_unit_desc_param(hba, lun, 0, desc_buf, len); 4999 if (ret < 0) { 5000 if (ret == -EOPNOTSUPP) 5001 /* If LU doesn't support unit descriptor, its queue depth is set to 1 */ 5002 lun_qdepth = 1; 5003 kfree(desc_buf); 5004 goto set_qdepth; 5005 } 5006 5007 if (desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]) { 5008 /* 5009 * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will 5010 * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth 5011 */ 5012 lun_qdepth = min_t(int, desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH], hba->nutrs); 5013 } 5014 /* 5015 * According to UFS device specification, the write protection mode is only supported by 5016 * normal LU, not supported by WLUN. 5017 */ 5018 if (hba->dev_info.f_power_on_wp_en && lun < hba->dev_info.max_lu_supported && 5019 !hba->dev_info.is_lu_power_on_wp && 5020 desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP) 5021 hba->dev_info.is_lu_power_on_wp = true; 5022 5023 /* In case of RPMB LU, check if advanced RPMB mode is enabled */ 5024 if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN && 5025 desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4)) 5026 hba->dev_info.b_advanced_rpmb_en = true; 5027 5028 5029 kfree(desc_buf); 5030 set_qdepth: 5031 /* 5032 * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose 5033 * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue. 5034 */ 5035 dev_dbg(hba->dev, "Set LU %x queue depth %d\n", lun, lun_qdepth); 5036 scsi_change_queue_depth(sdev, lun_qdepth); 5037 } 5038 5039 /** 5040 * ufshcd_slave_alloc - handle initial SCSI device configurations 5041 * @sdev: pointer to SCSI device 5042 * 5043 * Return: success. 5044 */ 5045 static int ufshcd_slave_alloc(struct scsi_device *sdev) 5046 { 5047 struct ufs_hba *hba; 5048 5049 hba = shost_priv(sdev->host); 5050 5051 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ 5052 sdev->use_10_for_ms = 1; 5053 5054 /* DBD field should be set to 1 in mode sense(10) */ 5055 sdev->set_dbd_for_ms = 1; 5056 5057 /* allow SCSI layer to restart the device in case of errors */ 5058 sdev->allow_restart = 1; 5059 5060 /* REPORT SUPPORTED OPERATION CODES is not supported */ 5061 sdev->no_report_opcodes = 1; 5062 5063 /* WRITE_SAME command is not supported */ 5064 sdev->no_write_same = 1; 5065 5066 ufshcd_lu_init(hba, sdev); 5067 5068 ufshcd_setup_links(hba, sdev); 5069 5070 return 0; 5071 } 5072 5073 /** 5074 * ufshcd_change_queue_depth - change queue depth 5075 * @sdev: pointer to SCSI device 5076 * @depth: required depth to set 5077 * 5078 * Change queue depth and make sure the max. limits are not crossed. 5079 * 5080 * Return: new queue depth. 5081 */ 5082 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) 5083 { 5084 return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue)); 5085 } 5086 5087 /** 5088 * ufshcd_slave_configure - adjust SCSI device configurations 5089 * @sdev: pointer to SCSI device 5090 * 5091 * Return: 0 (success). 5092 */ 5093 static int ufshcd_slave_configure(struct scsi_device *sdev) 5094 { 5095 struct ufs_hba *hba = shost_priv(sdev->host); 5096 struct request_queue *q = sdev->request_queue; 5097 5098 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); 5099 if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT) 5100 blk_queue_update_dma_alignment(q, SZ_4K - 1); 5101 /* 5102 * Block runtime-pm until all consumers are added. 5103 * Refer ufshcd_setup_links(). 5104 */ 5105 if (is_device_wlun(sdev)) 5106 pm_runtime_get_noresume(&sdev->sdev_gendev); 5107 else if (ufshcd_is_rpm_autosuspend_allowed(hba)) 5108 sdev->rpm_autosuspend = 1; 5109 /* 5110 * Do not print messages during runtime PM to avoid never-ending cycles 5111 * of messages written back to storage by user space causing runtime 5112 * resume, causing more messages and so on. 5113 */ 5114 sdev->silence_suspend = 1; 5115 5116 ufshcd_crypto_register(hba, q); 5117 5118 return 0; 5119 } 5120 5121 /** 5122 * ufshcd_slave_destroy - remove SCSI device configurations 5123 * @sdev: pointer to SCSI device 5124 */ 5125 static void ufshcd_slave_destroy(struct scsi_device *sdev) 5126 { 5127 struct ufs_hba *hba; 5128 unsigned long flags; 5129 5130 hba = shost_priv(sdev->host); 5131 5132 /* Drop the reference as it won't be needed anymore */ 5133 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { 5134 spin_lock_irqsave(hba->host->host_lock, flags); 5135 hba->ufs_device_wlun = NULL; 5136 spin_unlock_irqrestore(hba->host->host_lock, flags); 5137 } else if (hba->ufs_device_wlun) { 5138 struct device *supplier = NULL; 5139 5140 /* Ensure UFS Device WLUN exists and does not disappear */ 5141 spin_lock_irqsave(hba->host->host_lock, flags); 5142 if (hba->ufs_device_wlun) { 5143 supplier = &hba->ufs_device_wlun->sdev_gendev; 5144 get_device(supplier); 5145 } 5146 spin_unlock_irqrestore(hba->host->host_lock, flags); 5147 5148 if (supplier) { 5149 /* 5150 * If a LUN fails to probe (e.g. absent BOOT WLUN), the 5151 * device will not have been registered but can still 5152 * have a device link holding a reference to the device. 5153 */ 5154 device_link_remove(&sdev->sdev_gendev, supplier); 5155 put_device(supplier); 5156 } 5157 } 5158 } 5159 5160 /** 5161 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status 5162 * @lrbp: pointer to local reference block of completed command 5163 * @scsi_status: SCSI command status 5164 * 5165 * Return: value base on SCSI command status. 5166 */ 5167 static inline int 5168 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) 5169 { 5170 int result = 0; 5171 5172 switch (scsi_status) { 5173 case SAM_STAT_CHECK_CONDITION: 5174 ufshcd_copy_sense_data(lrbp); 5175 fallthrough; 5176 case SAM_STAT_GOOD: 5177 result |= DID_OK << 16 | scsi_status; 5178 break; 5179 case SAM_STAT_TASK_SET_FULL: 5180 case SAM_STAT_BUSY: 5181 case SAM_STAT_TASK_ABORTED: 5182 ufshcd_copy_sense_data(lrbp); 5183 result |= scsi_status; 5184 break; 5185 default: 5186 result |= DID_ERROR << 16; 5187 break; 5188 } /* end of switch */ 5189 5190 return result; 5191 } 5192 5193 /** 5194 * ufshcd_transfer_rsp_status - Get overall status of the response 5195 * @hba: per adapter instance 5196 * @lrbp: pointer to local reference block of completed command 5197 * @cqe: pointer to the completion queue entry 5198 * 5199 * Return: result of the command to notify SCSI midlayer. 5200 */ 5201 static inline int 5202 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, 5203 struct cq_entry *cqe) 5204 { 5205 int result = 0; 5206 int scsi_status; 5207 enum utp_ocs ocs; 5208 u8 upiu_flags; 5209 u32 resid; 5210 5211 upiu_flags = lrbp->ucd_rsp_ptr->header.flags; 5212 resid = be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count); 5213 /* 5214 * Test !overflow instead of underflow to support UFS devices that do 5215 * not set either flag. 5216 */ 5217 if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW)) 5218 scsi_set_resid(lrbp->cmd, resid); 5219 5220 /* overall command status of utrd */ 5221 ocs = ufshcd_get_tr_ocs(lrbp, cqe); 5222 5223 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) { 5224 if (lrbp->ucd_rsp_ptr->header.response || 5225 lrbp->ucd_rsp_ptr->header.status) 5226 ocs = OCS_SUCCESS; 5227 } 5228 5229 switch (ocs) { 5230 case OCS_SUCCESS: 5231 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); 5232 switch (ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr)) { 5233 case UPIU_TRANSACTION_RESPONSE: 5234 /* 5235 * get the result based on SCSI status response 5236 * to notify the SCSI midlayer of the command status 5237 */ 5238 scsi_status = lrbp->ucd_rsp_ptr->header.status; 5239 result = ufshcd_scsi_cmd_status(lrbp, scsi_status); 5240 5241 /* 5242 * Currently we are only supporting BKOPs exception 5243 * events hence we can ignore BKOPs exception event 5244 * during power management callbacks. BKOPs exception 5245 * event is not expected to be raised in runtime suspend 5246 * callback as it allows the urgent bkops. 5247 * During system suspend, we are anyway forcefully 5248 * disabling the bkops and if urgent bkops is needed 5249 * it will be enabled on system resume. Long term 5250 * solution could be to abort the system suspend if 5251 * UFS device needs urgent BKOPs. 5252 */ 5253 if (!hba->pm_op_in_progress && 5254 !ufshcd_eh_in_progress(hba) && 5255 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) 5256 /* Flushed in suspend */ 5257 schedule_work(&hba->eeh_work); 5258 break; 5259 case UPIU_TRANSACTION_REJECT_UPIU: 5260 /* TODO: handle Reject UPIU Response */ 5261 result = DID_ERROR << 16; 5262 dev_err(hba->dev, 5263 "Reject UPIU not fully implemented\n"); 5264 break; 5265 default: 5266 dev_err(hba->dev, 5267 "Unexpected request response code = %x\n", 5268 result); 5269 result = DID_ERROR << 16; 5270 break; 5271 } 5272 break; 5273 case OCS_ABORTED: 5274 result |= DID_ABORT << 16; 5275 break; 5276 case OCS_INVALID_COMMAND_STATUS: 5277 result |= DID_REQUEUE << 16; 5278 break; 5279 case OCS_INVALID_CMD_TABLE_ATTR: 5280 case OCS_INVALID_PRDT_ATTR: 5281 case OCS_MISMATCH_DATA_BUF_SIZE: 5282 case OCS_MISMATCH_RESP_UPIU_SIZE: 5283 case OCS_PEER_COMM_FAILURE: 5284 case OCS_FATAL_ERROR: 5285 case OCS_DEVICE_FATAL_ERROR: 5286 case OCS_INVALID_CRYPTO_CONFIG: 5287 case OCS_GENERAL_CRYPTO_ERROR: 5288 default: 5289 result |= DID_ERROR << 16; 5290 dev_err(hba->dev, 5291 "OCS error from controller = %x for tag %d\n", 5292 ocs, lrbp->task_tag); 5293 ufshcd_print_evt_hist(hba); 5294 ufshcd_print_host_state(hba); 5295 break; 5296 } /* end of switch */ 5297 5298 if ((host_byte(result) != DID_OK) && 5299 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs) 5300 ufshcd_print_tr(hba, lrbp->task_tag, true); 5301 return result; 5302 } 5303 5304 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, 5305 u32 intr_mask) 5306 { 5307 if (!ufshcd_is_auto_hibern8_supported(hba) || 5308 !ufshcd_is_auto_hibern8_enabled(hba)) 5309 return false; 5310 5311 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK)) 5312 return false; 5313 5314 if (hba->active_uic_cmd && 5315 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER || 5316 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT)) 5317 return false; 5318 5319 return true; 5320 } 5321 5322 /** 5323 * ufshcd_uic_cmd_compl - handle completion of uic command 5324 * @hba: per adapter instance 5325 * @intr_status: interrupt status generated by the controller 5326 * 5327 * Return: 5328 * IRQ_HANDLED - If interrupt is valid 5329 * IRQ_NONE - If invalid interrupt 5330 */ 5331 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) 5332 { 5333 irqreturn_t retval = IRQ_NONE; 5334 5335 spin_lock(hba->host->host_lock); 5336 if (ufshcd_is_auto_hibern8_error(hba, intr_status)) 5337 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); 5338 5339 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { 5340 hba->active_uic_cmd->argument2 |= 5341 ufshcd_get_uic_cmd_result(hba); 5342 hba->active_uic_cmd->argument3 = 5343 ufshcd_get_dme_attr_val(hba); 5344 if (!hba->uic_async_done) 5345 hba->active_uic_cmd->cmd_active = 0; 5346 complete(&hba->active_uic_cmd->done); 5347 retval = IRQ_HANDLED; 5348 } 5349 5350 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { 5351 hba->active_uic_cmd->cmd_active = 0; 5352 complete(hba->uic_async_done); 5353 retval = IRQ_HANDLED; 5354 } 5355 5356 if (retval == IRQ_HANDLED) 5357 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, 5358 UFS_CMD_COMP); 5359 spin_unlock(hba->host->host_lock); 5360 return retval; 5361 } 5362 5363 /* Release the resources allocated for processing a SCSI command. */ 5364 void ufshcd_release_scsi_cmd(struct ufs_hba *hba, 5365 struct ufshcd_lrb *lrbp) 5366 { 5367 struct scsi_cmnd *cmd = lrbp->cmd; 5368 5369 scsi_dma_unmap(cmd); 5370 ufshcd_release(hba); 5371 ufshcd_clk_scaling_update_busy(hba); 5372 } 5373 5374 /** 5375 * ufshcd_compl_one_cqe - handle a completion queue entry 5376 * @hba: per adapter instance 5377 * @task_tag: the task tag of the request to be completed 5378 * @cqe: pointer to the completion queue entry 5379 */ 5380 void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, 5381 struct cq_entry *cqe) 5382 { 5383 struct ufshcd_lrb *lrbp; 5384 struct scsi_cmnd *cmd; 5385 enum utp_ocs ocs; 5386 5387 lrbp = &hba->lrb[task_tag]; 5388 lrbp->compl_time_stamp = ktime_get(); 5389 cmd = lrbp->cmd; 5390 if (cmd) { 5391 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) 5392 ufshcd_update_monitor(hba, lrbp); 5393 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP); 5394 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe); 5395 ufshcd_release_scsi_cmd(hba, lrbp); 5396 /* Do not touch lrbp after scsi done */ 5397 scsi_done(cmd); 5398 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || 5399 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { 5400 if (hba->dev_cmd.complete) { 5401 if (cqe) { 5402 ocs = le32_to_cpu(cqe->status) & MASK_OCS; 5403 lrbp->utr_descriptor_ptr->header.ocs = ocs; 5404 } 5405 complete(hba->dev_cmd.complete); 5406 ufshcd_clk_scaling_update_busy(hba); 5407 } 5408 } 5409 } 5410 5411 /** 5412 * __ufshcd_transfer_req_compl - handle SCSI and query command completion 5413 * @hba: per adapter instance 5414 * @completed_reqs: bitmask that indicates which requests to complete 5415 */ 5416 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, 5417 unsigned long completed_reqs) 5418 { 5419 int tag; 5420 5421 for_each_set_bit(tag, &completed_reqs, hba->nutrs) 5422 ufshcd_compl_one_cqe(hba, tag, NULL); 5423 } 5424 5425 /* Any value that is not an existing queue number is fine for this constant. */ 5426 enum { 5427 UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1 5428 }; 5429 5430 static void ufshcd_clear_polled(struct ufs_hba *hba, 5431 unsigned long *completed_reqs) 5432 { 5433 int tag; 5434 5435 for_each_set_bit(tag, completed_reqs, hba->nutrs) { 5436 struct scsi_cmnd *cmd = hba->lrb[tag].cmd; 5437 5438 if (!cmd) 5439 continue; 5440 if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED) 5441 __clear_bit(tag, completed_reqs); 5442 } 5443 } 5444 5445 /* 5446 * Return: > 0 if one or more commands have been completed or 0 if no 5447 * requests have been completed. 5448 */ 5449 static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num) 5450 { 5451 struct ufs_hba *hba = shost_priv(shost); 5452 unsigned long completed_reqs, flags; 5453 u32 tr_doorbell; 5454 struct ufs_hw_queue *hwq; 5455 5456 if (is_mcq_enabled(hba)) { 5457 hwq = &hba->uhq[queue_num]; 5458 5459 return ufshcd_mcq_poll_cqe_lock(hba, hwq); 5460 } 5461 5462 spin_lock_irqsave(&hba->outstanding_lock, flags); 5463 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 5464 completed_reqs = ~tr_doorbell & hba->outstanding_reqs; 5465 WARN_ONCE(completed_reqs & ~hba->outstanding_reqs, 5466 "completed: %#lx; outstanding: %#lx\n", completed_reqs, 5467 hba->outstanding_reqs); 5468 if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) { 5469 /* Do not complete polled requests from interrupt context. */ 5470 ufshcd_clear_polled(hba, &completed_reqs); 5471 } 5472 hba->outstanding_reqs &= ~completed_reqs; 5473 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 5474 5475 if (completed_reqs) 5476 __ufshcd_transfer_req_compl(hba, completed_reqs); 5477 5478 return completed_reqs != 0; 5479 } 5480 5481 /** 5482 * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is 5483 * invoked from the error handler context or ufshcd_host_reset_and_restore() 5484 * to complete the pending transfers and free the resources associated with 5485 * the scsi command. 5486 * 5487 * @hba: per adapter instance 5488 * @force_compl: This flag is set to true when invoked 5489 * from ufshcd_host_reset_and_restore() in which case it requires special 5490 * handling because the host controller has been reset by ufshcd_hba_stop(). 5491 */ 5492 static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba, 5493 bool force_compl) 5494 { 5495 struct ufs_hw_queue *hwq; 5496 struct ufshcd_lrb *lrbp; 5497 struct scsi_cmnd *cmd; 5498 unsigned long flags; 5499 u32 hwq_num, utag; 5500 int tag; 5501 5502 for (tag = 0; tag < hba->nutrs; tag++) { 5503 lrbp = &hba->lrb[tag]; 5504 cmd = lrbp->cmd; 5505 if (!ufshcd_cmd_inflight(cmd) || 5506 test_bit(SCMD_STATE_COMPLETE, &cmd->state)) 5507 continue; 5508 5509 utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd)); 5510 hwq_num = blk_mq_unique_tag_to_hwq(utag); 5511 hwq = &hba->uhq[hwq_num]; 5512 5513 if (force_compl) { 5514 ufshcd_mcq_compl_all_cqes_lock(hba, hwq); 5515 /* 5516 * For those cmds of which the cqes are not present 5517 * in the cq, complete them explicitly. 5518 */ 5519 if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) { 5520 spin_lock_irqsave(&hwq->cq_lock, flags); 5521 set_host_byte(cmd, DID_REQUEUE); 5522 ufshcd_release_scsi_cmd(hba, lrbp); 5523 scsi_done(cmd); 5524 spin_unlock_irqrestore(&hwq->cq_lock, flags); 5525 } 5526 } else { 5527 ufshcd_mcq_poll_cqe_lock(hba, hwq); 5528 } 5529 } 5530 } 5531 5532 /** 5533 * ufshcd_transfer_req_compl - handle SCSI and query command completion 5534 * @hba: per adapter instance 5535 * 5536 * Return: 5537 * IRQ_HANDLED - If interrupt is valid 5538 * IRQ_NONE - If invalid interrupt 5539 */ 5540 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) 5541 { 5542 /* Resetting interrupt aggregation counters first and reading the 5543 * DOOR_BELL afterward allows us to handle all the completed requests. 5544 * In order to prevent other interrupts starvation the DB is read once 5545 * after reset. The down side of this solution is the possibility of 5546 * false interrupt if device completes another request after resetting 5547 * aggregation and before reading the DB. 5548 */ 5549 if (ufshcd_is_intr_aggr_allowed(hba) && 5550 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) 5551 ufshcd_reset_intr_aggr(hba); 5552 5553 if (ufs_fail_completion()) 5554 return IRQ_HANDLED; 5555 5556 /* 5557 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we 5558 * do not want polling to trigger spurious interrupt complaints. 5559 */ 5560 ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT); 5561 5562 return IRQ_HANDLED; 5563 } 5564 5565 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask) 5566 { 5567 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 5568 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, 5569 &ee_ctrl_mask); 5570 } 5571 5572 int ufshcd_write_ee_control(struct ufs_hba *hba) 5573 { 5574 int err; 5575 5576 mutex_lock(&hba->ee_ctrl_mutex); 5577 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask); 5578 mutex_unlock(&hba->ee_ctrl_mutex); 5579 if (err) 5580 dev_err(hba->dev, "%s: failed to write ee control %d\n", 5581 __func__, err); 5582 return err; 5583 } 5584 5585 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, 5586 const u16 *other_mask, u16 set, u16 clr) 5587 { 5588 u16 new_mask, ee_ctrl_mask; 5589 int err = 0; 5590 5591 mutex_lock(&hba->ee_ctrl_mutex); 5592 new_mask = (*mask & ~clr) | set; 5593 ee_ctrl_mask = new_mask | *other_mask; 5594 if (ee_ctrl_mask != hba->ee_ctrl_mask) 5595 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask); 5596 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */ 5597 if (!err) { 5598 hba->ee_ctrl_mask = ee_ctrl_mask; 5599 *mask = new_mask; 5600 } 5601 mutex_unlock(&hba->ee_ctrl_mutex); 5602 return err; 5603 } 5604 5605 /** 5606 * ufshcd_disable_ee - disable exception event 5607 * @hba: per-adapter instance 5608 * @mask: exception event to disable 5609 * 5610 * Disables exception event in the device so that the EVENT_ALERT 5611 * bit is not set. 5612 * 5613 * Return: zero on success, non-zero error value on failure. 5614 */ 5615 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) 5616 { 5617 return ufshcd_update_ee_drv_mask(hba, 0, mask); 5618 } 5619 5620 /** 5621 * ufshcd_enable_ee - enable exception event 5622 * @hba: per-adapter instance 5623 * @mask: exception event to enable 5624 * 5625 * Enable corresponding exception event in the device to allow 5626 * device to alert host in critical scenarios. 5627 * 5628 * Return: zero on success, non-zero error value on failure. 5629 */ 5630 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) 5631 { 5632 return ufshcd_update_ee_drv_mask(hba, mask, 0); 5633 } 5634 5635 /** 5636 * ufshcd_enable_auto_bkops - Allow device managed BKOPS 5637 * @hba: per-adapter instance 5638 * 5639 * Allow device to manage background operations on its own. Enabling 5640 * this might lead to inconsistent latencies during normal data transfers 5641 * as the device is allowed to manage its own way of handling background 5642 * operations. 5643 * 5644 * Return: zero on success, non-zero on failure. 5645 */ 5646 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) 5647 { 5648 int err = 0; 5649 5650 if (hba->auto_bkops_enabled) 5651 goto out; 5652 5653 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, 5654 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL); 5655 if (err) { 5656 dev_err(hba->dev, "%s: failed to enable bkops %d\n", 5657 __func__, err); 5658 goto out; 5659 } 5660 5661 hba->auto_bkops_enabled = true; 5662 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled"); 5663 5664 /* No need of URGENT_BKOPS exception from the device */ 5665 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); 5666 if (err) 5667 dev_err(hba->dev, "%s: failed to disable exception event %d\n", 5668 __func__, err); 5669 out: 5670 return err; 5671 } 5672 5673 /** 5674 * ufshcd_disable_auto_bkops - block device in doing background operations 5675 * @hba: per-adapter instance 5676 * 5677 * Disabling background operations improves command response latency but 5678 * has drawback of device moving into critical state where the device is 5679 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the 5680 * host is idle so that BKOPS are managed effectively without any negative 5681 * impacts. 5682 * 5683 * Return: zero on success, non-zero on failure. 5684 */ 5685 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) 5686 { 5687 int err = 0; 5688 5689 if (!hba->auto_bkops_enabled) 5690 goto out; 5691 5692 /* 5693 * If host assisted BKOPs is to be enabled, make sure 5694 * urgent bkops exception is allowed. 5695 */ 5696 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); 5697 if (err) { 5698 dev_err(hba->dev, "%s: failed to enable exception event %d\n", 5699 __func__, err); 5700 goto out; 5701 } 5702 5703 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, 5704 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL); 5705 if (err) { 5706 dev_err(hba->dev, "%s: failed to disable bkops %d\n", 5707 __func__, err); 5708 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); 5709 goto out; 5710 } 5711 5712 hba->auto_bkops_enabled = false; 5713 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); 5714 hba->is_urgent_bkops_lvl_checked = false; 5715 out: 5716 return err; 5717 } 5718 5719 /** 5720 * ufshcd_force_reset_auto_bkops - force reset auto bkops state 5721 * @hba: per adapter instance 5722 * 5723 * After a device reset the device may toggle the BKOPS_EN flag 5724 * to default value. The s/w tracking variables should be updated 5725 * as well. This function would change the auto-bkops state based on 5726 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND. 5727 */ 5728 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) 5729 { 5730 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) { 5731 hba->auto_bkops_enabled = false; 5732 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; 5733 ufshcd_enable_auto_bkops(hba); 5734 } else { 5735 hba->auto_bkops_enabled = true; 5736 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; 5737 ufshcd_disable_auto_bkops(hba); 5738 } 5739 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; 5740 hba->is_urgent_bkops_lvl_checked = false; 5741 } 5742 5743 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) 5744 { 5745 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5746 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status); 5747 } 5748 5749 /** 5750 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status 5751 * @hba: per-adapter instance 5752 * @status: bkops_status value 5753 * 5754 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn 5755 * flag in the device to permit background operations if the device 5756 * bkops_status is greater than or equal to "status" argument passed to 5757 * this function, disable otherwise. 5758 * 5759 * Return: 0 for success, non-zero in case of failure. 5760 * 5761 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag 5762 * to know whether auto bkops is enabled or disabled after this function 5763 * returns control to it. 5764 */ 5765 static int ufshcd_bkops_ctrl(struct ufs_hba *hba, 5766 enum bkops_status status) 5767 { 5768 int err; 5769 u32 curr_status = 0; 5770 5771 err = ufshcd_get_bkops_status(hba, &curr_status); 5772 if (err) { 5773 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", 5774 __func__, err); 5775 goto out; 5776 } else if (curr_status > BKOPS_STATUS_MAX) { 5777 dev_err(hba->dev, "%s: invalid BKOPS status %d\n", 5778 __func__, curr_status); 5779 err = -EINVAL; 5780 goto out; 5781 } 5782 5783 if (curr_status >= status) 5784 err = ufshcd_enable_auto_bkops(hba); 5785 else 5786 err = ufshcd_disable_auto_bkops(hba); 5787 out: 5788 return err; 5789 } 5790 5791 /** 5792 * ufshcd_urgent_bkops - handle urgent bkops exception event 5793 * @hba: per-adapter instance 5794 * 5795 * Enable fBackgroundOpsEn flag in the device to permit background 5796 * operations. 5797 * 5798 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled 5799 * and negative error value for any other failure. 5800 * 5801 * Return: 0 upon success; < 0 upon failure. 5802 */ 5803 static int ufshcd_urgent_bkops(struct ufs_hba *hba) 5804 { 5805 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); 5806 } 5807 5808 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) 5809 { 5810 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5811 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); 5812 } 5813 5814 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) 5815 { 5816 int err; 5817 u32 curr_status = 0; 5818 5819 if (hba->is_urgent_bkops_lvl_checked) 5820 goto enable_auto_bkops; 5821 5822 err = ufshcd_get_bkops_status(hba, &curr_status); 5823 if (err) { 5824 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", 5825 __func__, err); 5826 goto out; 5827 } 5828 5829 /* 5830 * We are seeing that some devices are raising the urgent bkops 5831 * exception events even when BKOPS status doesn't indicate performace 5832 * impacted or critical. Handle these device by determining their urgent 5833 * bkops status at runtime. 5834 */ 5835 if (curr_status < BKOPS_STATUS_PERF_IMPACT) { 5836 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", 5837 __func__, curr_status); 5838 /* update the current status as the urgent bkops level */ 5839 hba->urgent_bkops_lvl = curr_status; 5840 hba->is_urgent_bkops_lvl_checked = true; 5841 } 5842 5843 enable_auto_bkops: 5844 err = ufshcd_enable_auto_bkops(hba); 5845 out: 5846 if (err < 0) 5847 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", 5848 __func__, err); 5849 } 5850 5851 static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status) 5852 { 5853 u32 value; 5854 5855 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5856 QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value)) 5857 return; 5858 5859 dev_info(hba->dev, "exception Tcase %d\n", value - 80); 5860 5861 ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP); 5862 5863 /* 5864 * A placeholder for the platform vendors to add whatever additional 5865 * steps required 5866 */ 5867 } 5868 5869 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn) 5870 { 5871 u8 index; 5872 enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG : 5873 UPIU_QUERY_OPCODE_CLEAR_FLAG; 5874 5875 index = ufshcd_wb_get_query_index(hba); 5876 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL); 5877 } 5878 5879 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable) 5880 { 5881 int ret; 5882 5883 if (!ufshcd_is_wb_allowed(hba) || 5884 hba->dev_info.wb_enabled == enable) 5885 return 0; 5886 5887 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN); 5888 if (ret) { 5889 dev_err(hba->dev, "%s: Write Booster %s failed %d\n", 5890 __func__, enable ? "enabling" : "disabling", ret); 5891 return ret; 5892 } 5893 5894 hba->dev_info.wb_enabled = enable; 5895 dev_dbg(hba->dev, "%s: Write Booster %s\n", 5896 __func__, enable ? "enabled" : "disabled"); 5897 5898 return ret; 5899 } 5900 5901 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba, 5902 bool enable) 5903 { 5904 int ret; 5905 5906 ret = __ufshcd_wb_toggle(hba, enable, 5907 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8); 5908 if (ret) { 5909 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n", 5910 __func__, enable ? "enabling" : "disabling", ret); 5911 return; 5912 } 5913 dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n", 5914 __func__, enable ? "enabled" : "disabled"); 5915 } 5916 5917 int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable) 5918 { 5919 int ret; 5920 5921 if (!ufshcd_is_wb_allowed(hba) || 5922 hba->dev_info.wb_buf_flush_enabled == enable) 5923 return 0; 5924 5925 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN); 5926 if (ret) { 5927 dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n", 5928 __func__, enable ? "enabling" : "disabling", ret); 5929 return ret; 5930 } 5931 5932 hba->dev_info.wb_buf_flush_enabled = enable; 5933 dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n", 5934 __func__, enable ? "enabled" : "disabled"); 5935 5936 return ret; 5937 } 5938 5939 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, 5940 u32 avail_buf) 5941 { 5942 u32 cur_buf; 5943 int ret; 5944 u8 index; 5945 5946 index = ufshcd_wb_get_query_index(hba); 5947 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5948 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE, 5949 index, 0, &cur_buf); 5950 if (ret) { 5951 dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n", 5952 __func__, ret); 5953 return false; 5954 } 5955 5956 if (!cur_buf) { 5957 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n", 5958 cur_buf); 5959 return false; 5960 } 5961 /* Let it continue to flush when available buffer exceeds threshold */ 5962 return avail_buf < hba->vps->wb_flush_threshold; 5963 } 5964 5965 static void ufshcd_wb_force_disable(struct ufs_hba *hba) 5966 { 5967 if (ufshcd_is_wb_buf_flush_allowed(hba)) 5968 ufshcd_wb_toggle_buf_flush(hba, false); 5969 5970 ufshcd_wb_toggle_buf_flush_during_h8(hba, false); 5971 ufshcd_wb_toggle(hba, false); 5972 hba->caps &= ~UFSHCD_CAP_WB_EN; 5973 5974 dev_info(hba->dev, "%s: WB force disabled\n", __func__); 5975 } 5976 5977 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba) 5978 { 5979 u32 lifetime; 5980 int ret; 5981 u8 index; 5982 5983 index = ufshcd_wb_get_query_index(hba); 5984 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5985 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST, 5986 index, 0, &lifetime); 5987 if (ret) { 5988 dev_err(hba->dev, 5989 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n", 5990 __func__, ret); 5991 return false; 5992 } 5993 5994 if (lifetime == UFS_WB_EXCEED_LIFETIME) { 5995 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n", 5996 __func__, lifetime); 5997 return false; 5998 } 5999 6000 dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n", 6001 __func__, lifetime); 6002 6003 return true; 6004 } 6005 6006 static bool ufshcd_wb_need_flush(struct ufs_hba *hba) 6007 { 6008 int ret; 6009 u32 avail_buf; 6010 u8 index; 6011 6012 if (!ufshcd_is_wb_allowed(hba)) 6013 return false; 6014 6015 if (!ufshcd_is_wb_buf_lifetime_available(hba)) { 6016 ufshcd_wb_force_disable(hba); 6017 return false; 6018 } 6019 6020 /* 6021 * The ufs device needs the vcc to be ON to flush. 6022 * With user-space reduction enabled, it's enough to enable flush 6023 * by checking only the available buffer. The threshold 6024 * defined here is > 90% full. 6025 * With user-space preserved enabled, the current-buffer 6026 * should be checked too because the wb buffer size can reduce 6027 * when disk tends to be full. This info is provided by current 6028 * buffer (dCurrentWriteBoosterBufferSize). There's no point in 6029 * keeping vcc on when current buffer is empty. 6030 */ 6031 index = ufshcd_wb_get_query_index(hba); 6032 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 6033 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE, 6034 index, 0, &avail_buf); 6035 if (ret) { 6036 dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n", 6037 __func__, ret); 6038 return false; 6039 } 6040 6041 if (!hba->dev_info.b_presrv_uspc_en) 6042 return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10); 6043 6044 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf); 6045 } 6046 6047 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work) 6048 { 6049 struct ufs_hba *hba = container_of(to_delayed_work(work), 6050 struct ufs_hba, 6051 rpm_dev_flush_recheck_work); 6052 /* 6053 * To prevent unnecessary VCC power drain after device finishes 6054 * WriteBooster buffer flush or Auto BKOPs, force runtime resume 6055 * after a certain delay to recheck the threshold by next runtime 6056 * suspend. 6057 */ 6058 ufshcd_rpm_get_sync(hba); 6059 ufshcd_rpm_put_sync(hba); 6060 } 6061 6062 /** 6063 * ufshcd_exception_event_handler - handle exceptions raised by device 6064 * @work: pointer to work data 6065 * 6066 * Read bExceptionEventStatus attribute from the device and handle the 6067 * exception event accordingly. 6068 */ 6069 static void ufshcd_exception_event_handler(struct work_struct *work) 6070 { 6071 struct ufs_hba *hba; 6072 int err; 6073 u32 status = 0; 6074 hba = container_of(work, struct ufs_hba, eeh_work); 6075 6076 ufshcd_scsi_block_requests(hba); 6077 err = ufshcd_get_ee_status(hba, &status); 6078 if (err) { 6079 dev_err(hba->dev, "%s: failed to get exception status %d\n", 6080 __func__, err); 6081 goto out; 6082 } 6083 6084 trace_ufshcd_exception_event(dev_name(hba->dev), status); 6085 6086 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS) 6087 ufshcd_bkops_exception_event_handler(hba); 6088 6089 if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP) 6090 ufshcd_temp_exception_event_handler(hba, status); 6091 6092 ufs_debugfs_exception_event(hba, status); 6093 out: 6094 ufshcd_scsi_unblock_requests(hba); 6095 } 6096 6097 /* Complete requests that have door-bell cleared */ 6098 static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl) 6099 { 6100 if (is_mcq_enabled(hba)) 6101 ufshcd_mcq_compl_pending_transfer(hba, force_compl); 6102 else 6103 ufshcd_transfer_req_compl(hba); 6104 6105 ufshcd_tmc_handler(hba); 6106 } 6107 6108 /** 6109 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is 6110 * to recover from the DL NAC errors or not. 6111 * @hba: per-adapter instance 6112 * 6113 * Return: true if error handling is required, false otherwise. 6114 */ 6115 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) 6116 { 6117 unsigned long flags; 6118 bool err_handling = true; 6119 6120 spin_lock_irqsave(hba->host->host_lock, flags); 6121 /* 6122 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the 6123 * device fatal error and/or DL NAC & REPLAY timeout errors. 6124 */ 6125 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) 6126 goto out; 6127 6128 if ((hba->saved_err & DEVICE_FATAL_ERROR) || 6129 ((hba->saved_err & UIC_ERROR) && 6130 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) 6131 goto out; 6132 6133 if ((hba->saved_err & UIC_ERROR) && 6134 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { 6135 int err; 6136 /* 6137 * wait for 50ms to see if we can get any other errors or not. 6138 */ 6139 spin_unlock_irqrestore(hba->host->host_lock, flags); 6140 msleep(50); 6141 spin_lock_irqsave(hba->host->host_lock, flags); 6142 6143 /* 6144 * now check if we have got any other severe errors other than 6145 * DL NAC error? 6146 */ 6147 if ((hba->saved_err & INT_FATAL_ERRORS) || 6148 ((hba->saved_err & UIC_ERROR) && 6149 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) 6150 goto out; 6151 6152 /* 6153 * As DL NAC is the only error received so far, send out NOP 6154 * command to confirm if link is still active or not. 6155 * - If we don't get any response then do error recovery. 6156 * - If we get response then clear the DL NAC error bit. 6157 */ 6158 6159 spin_unlock_irqrestore(hba->host->host_lock, flags); 6160 err = ufshcd_verify_dev_init(hba); 6161 spin_lock_irqsave(hba->host->host_lock, flags); 6162 6163 if (err) 6164 goto out; 6165 6166 /* Link seems to be alive hence ignore the DL NAC errors */ 6167 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) 6168 hba->saved_err &= ~UIC_ERROR; 6169 /* clear NAC error */ 6170 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; 6171 if (!hba->saved_uic_err) 6172 err_handling = false; 6173 } 6174 out: 6175 spin_unlock_irqrestore(hba->host->host_lock, flags); 6176 return err_handling; 6177 } 6178 6179 /* host lock must be held before calling this func */ 6180 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) 6181 { 6182 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || 6183 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); 6184 } 6185 6186 void ufshcd_schedule_eh_work(struct ufs_hba *hba) 6187 { 6188 lockdep_assert_held(hba->host->host_lock); 6189 6190 /* handle fatal errors only when link is not in error state */ 6191 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { 6192 if (hba->force_reset || ufshcd_is_link_broken(hba) || 6193 ufshcd_is_saved_err_fatal(hba)) 6194 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; 6195 else 6196 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; 6197 queue_work(hba->eh_wq, &hba->eh_work); 6198 } 6199 } 6200 6201 static void ufshcd_force_error_recovery(struct ufs_hba *hba) 6202 { 6203 spin_lock_irq(hba->host->host_lock); 6204 hba->force_reset = true; 6205 ufshcd_schedule_eh_work(hba); 6206 spin_unlock_irq(hba->host->host_lock); 6207 } 6208 6209 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) 6210 { 6211 mutex_lock(&hba->wb_mutex); 6212 down_write(&hba->clk_scaling_lock); 6213 hba->clk_scaling.is_allowed = allow; 6214 up_write(&hba->clk_scaling_lock); 6215 mutex_unlock(&hba->wb_mutex); 6216 } 6217 6218 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend) 6219 { 6220 if (suspend) { 6221 if (hba->clk_scaling.is_enabled) 6222 ufshcd_suspend_clkscaling(hba); 6223 ufshcd_clk_scaling_allow(hba, false); 6224 } else { 6225 ufshcd_clk_scaling_allow(hba, true); 6226 if (hba->clk_scaling.is_enabled) 6227 ufshcd_resume_clkscaling(hba); 6228 } 6229 } 6230 6231 static void ufshcd_err_handling_prepare(struct ufs_hba *hba) 6232 { 6233 ufshcd_rpm_get_sync(hba); 6234 if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) || 6235 hba->is_sys_suspended) { 6236 enum ufs_pm_op pm_op; 6237 6238 /* 6239 * Don't assume anything of resume, if 6240 * resume fails, irq and clocks can be OFF, and powers 6241 * can be OFF or in LPM. 6242 */ 6243 ufshcd_setup_hba_vreg(hba, true); 6244 ufshcd_enable_irq(hba); 6245 ufshcd_setup_vreg(hba, true); 6246 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); 6247 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); 6248 ufshcd_hold(hba); 6249 if (!ufshcd_is_clkgating_allowed(hba)) 6250 ufshcd_setup_clocks(hba, true); 6251 ufshcd_release(hba); 6252 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; 6253 ufshcd_vops_resume(hba, pm_op); 6254 } else { 6255 ufshcd_hold(hba); 6256 if (ufshcd_is_clkscaling_supported(hba) && 6257 hba->clk_scaling.is_enabled) 6258 ufshcd_suspend_clkscaling(hba); 6259 ufshcd_clk_scaling_allow(hba, false); 6260 } 6261 ufshcd_scsi_block_requests(hba); 6262 /* Wait for ongoing ufshcd_queuecommand() calls to finish. */ 6263 blk_mq_wait_quiesce_done(&hba->host->tag_set); 6264 cancel_work_sync(&hba->eeh_work); 6265 } 6266 6267 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) 6268 { 6269 ufshcd_scsi_unblock_requests(hba); 6270 ufshcd_release(hba); 6271 if (ufshcd_is_clkscaling_supported(hba)) 6272 ufshcd_clk_scaling_suspend(hba, false); 6273 ufshcd_rpm_put(hba); 6274 } 6275 6276 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) 6277 { 6278 return (!hba->is_powered || hba->shutting_down || 6279 !hba->ufs_device_wlun || 6280 hba->ufshcd_state == UFSHCD_STATE_ERROR || 6281 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset || 6282 ufshcd_is_link_broken(hba)))); 6283 } 6284 6285 #ifdef CONFIG_PM 6286 static void ufshcd_recover_pm_error(struct ufs_hba *hba) 6287 { 6288 struct Scsi_Host *shost = hba->host; 6289 struct scsi_device *sdev; 6290 struct request_queue *q; 6291 int ret; 6292 6293 hba->is_sys_suspended = false; 6294 /* 6295 * Set RPM status of wlun device to RPM_ACTIVE, 6296 * this also clears its runtime error. 6297 */ 6298 ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev); 6299 6300 /* hba device might have a runtime error otherwise */ 6301 if (ret) 6302 ret = pm_runtime_set_active(hba->dev); 6303 /* 6304 * If wlun device had runtime error, we also need to resume those 6305 * consumer scsi devices in case any of them has failed to be 6306 * resumed due to supplier runtime resume failure. This is to unblock 6307 * blk_queue_enter in case there are bios waiting inside it. 6308 */ 6309 if (!ret) { 6310 shost_for_each_device(sdev, shost) { 6311 q = sdev->request_queue; 6312 if (q->dev && (q->rpm_status == RPM_SUSPENDED || 6313 q->rpm_status == RPM_SUSPENDING)) 6314 pm_request_resume(q->dev); 6315 } 6316 } 6317 } 6318 #else 6319 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba) 6320 { 6321 } 6322 #endif 6323 6324 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba) 6325 { 6326 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info; 6327 u32 mode; 6328 6329 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode); 6330 6331 if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK)) 6332 return true; 6333 6334 if (pwr_info->pwr_tx != (mode & PWRMODE_MASK)) 6335 return true; 6336 6337 return false; 6338 } 6339 6340 static bool ufshcd_abort_one(struct request *rq, void *priv) 6341 { 6342 int *ret = priv; 6343 u32 tag = rq->tag; 6344 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 6345 struct scsi_device *sdev = cmd->device; 6346 struct Scsi_Host *shost = sdev->host; 6347 struct ufs_hba *hba = shost_priv(shost); 6348 6349 *ret = ufshcd_try_to_abort_task(hba, tag); 6350 dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag, 6351 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1, 6352 *ret ? "failed" : "succeeded"); 6353 return *ret == 0; 6354 } 6355 6356 /** 6357 * ufshcd_abort_all - Abort all pending commands. 6358 * @hba: Host bus adapter pointer. 6359 * 6360 * Return: true if and only if the host controller needs to be reset. 6361 */ 6362 static bool ufshcd_abort_all(struct ufs_hba *hba) 6363 { 6364 int tag, ret = 0; 6365 6366 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_abort_one, &ret); 6367 if (ret) 6368 goto out; 6369 6370 /* Clear pending task management requests */ 6371 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { 6372 ret = ufshcd_clear_tm_cmd(hba, tag); 6373 if (ret) 6374 goto out; 6375 } 6376 6377 out: 6378 /* Complete the requests that are cleared by s/w */ 6379 ufshcd_complete_requests(hba, false); 6380 6381 return ret != 0; 6382 } 6383 6384 /** 6385 * ufshcd_err_handler - handle UFS errors that require s/w attention 6386 * @work: pointer to work structure 6387 */ 6388 static void ufshcd_err_handler(struct work_struct *work) 6389 { 6390 int retries = MAX_ERR_HANDLER_RETRIES; 6391 struct ufs_hba *hba; 6392 unsigned long flags; 6393 bool needs_restore; 6394 bool needs_reset; 6395 int pmc_err; 6396 6397 hba = container_of(work, struct ufs_hba, eh_work); 6398 6399 dev_info(hba->dev, 6400 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n", 6401 __func__, ufshcd_state_name[hba->ufshcd_state], 6402 hba->is_powered, hba->shutting_down, hba->saved_err, 6403 hba->saved_uic_err, hba->force_reset, 6404 ufshcd_is_link_broken(hba) ? "; link is broken" : ""); 6405 6406 down(&hba->host_sem); 6407 spin_lock_irqsave(hba->host->host_lock, flags); 6408 if (ufshcd_err_handling_should_stop(hba)) { 6409 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) 6410 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; 6411 spin_unlock_irqrestore(hba->host->host_lock, flags); 6412 up(&hba->host_sem); 6413 return; 6414 } 6415 ufshcd_set_eh_in_progress(hba); 6416 spin_unlock_irqrestore(hba->host->host_lock, flags); 6417 ufshcd_err_handling_prepare(hba); 6418 /* Complete requests that have door-bell cleared by h/w */ 6419 ufshcd_complete_requests(hba, false); 6420 spin_lock_irqsave(hba->host->host_lock, flags); 6421 again: 6422 needs_restore = false; 6423 needs_reset = false; 6424 6425 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) 6426 hba->ufshcd_state = UFSHCD_STATE_RESET; 6427 /* 6428 * A full reset and restore might have happened after preparation 6429 * is finished, double check whether we should stop. 6430 */ 6431 if (ufshcd_err_handling_should_stop(hba)) 6432 goto skip_err_handling; 6433 6434 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { 6435 bool ret; 6436 6437 spin_unlock_irqrestore(hba->host->host_lock, flags); 6438 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */ 6439 ret = ufshcd_quirk_dl_nac_errors(hba); 6440 spin_lock_irqsave(hba->host->host_lock, flags); 6441 if (!ret && ufshcd_err_handling_should_stop(hba)) 6442 goto skip_err_handling; 6443 } 6444 6445 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || 6446 (hba->saved_uic_err && 6447 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { 6448 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR); 6449 6450 spin_unlock_irqrestore(hba->host->host_lock, flags); 6451 ufshcd_print_host_state(hba); 6452 ufshcd_print_pwr_info(hba); 6453 ufshcd_print_evt_hist(hba); 6454 ufshcd_print_tmrs(hba, hba->outstanding_tasks); 6455 ufshcd_print_trs_all(hba, pr_prdt); 6456 spin_lock_irqsave(hba->host->host_lock, flags); 6457 } 6458 6459 /* 6460 * if host reset is required then skip clearing the pending 6461 * transfers forcefully because they will get cleared during 6462 * host reset and restore 6463 */ 6464 if (hba->force_reset || ufshcd_is_link_broken(hba) || 6465 ufshcd_is_saved_err_fatal(hba) || 6466 ((hba->saved_err & UIC_ERROR) && 6467 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | 6468 UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) { 6469 needs_reset = true; 6470 goto do_reset; 6471 } 6472 6473 /* 6474 * If LINERESET was caught, UFS might have been put to PWM mode, 6475 * check if power mode restore is needed. 6476 */ 6477 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) { 6478 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR; 6479 if (!hba->saved_uic_err) 6480 hba->saved_err &= ~UIC_ERROR; 6481 spin_unlock_irqrestore(hba->host->host_lock, flags); 6482 if (ufshcd_is_pwr_mode_restore_needed(hba)) 6483 needs_restore = true; 6484 spin_lock_irqsave(hba->host->host_lock, flags); 6485 if (!hba->saved_err && !needs_restore) 6486 goto skip_err_handling; 6487 } 6488 6489 hba->silence_err_logs = true; 6490 /* release lock as clear command might sleep */ 6491 spin_unlock_irqrestore(hba->host->host_lock, flags); 6492 6493 needs_reset = ufshcd_abort_all(hba); 6494 6495 spin_lock_irqsave(hba->host->host_lock, flags); 6496 hba->silence_err_logs = false; 6497 if (needs_reset) 6498 goto do_reset; 6499 6500 /* 6501 * After all reqs and tasks are cleared from doorbell, 6502 * now it is safe to retore power mode. 6503 */ 6504 if (needs_restore) { 6505 spin_unlock_irqrestore(hba->host->host_lock, flags); 6506 /* 6507 * Hold the scaling lock just in case dev cmds 6508 * are sent via bsg and/or sysfs. 6509 */ 6510 down_write(&hba->clk_scaling_lock); 6511 hba->force_pmc = true; 6512 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info)); 6513 if (pmc_err) { 6514 needs_reset = true; 6515 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n", 6516 __func__, pmc_err); 6517 } 6518 hba->force_pmc = false; 6519 ufshcd_print_pwr_info(hba); 6520 up_write(&hba->clk_scaling_lock); 6521 spin_lock_irqsave(hba->host->host_lock, flags); 6522 } 6523 6524 do_reset: 6525 /* Fatal errors need reset */ 6526 if (needs_reset) { 6527 int err; 6528 6529 hba->force_reset = false; 6530 spin_unlock_irqrestore(hba->host->host_lock, flags); 6531 err = ufshcd_reset_and_restore(hba); 6532 if (err) 6533 dev_err(hba->dev, "%s: reset and restore failed with err %d\n", 6534 __func__, err); 6535 else 6536 ufshcd_recover_pm_error(hba); 6537 spin_lock_irqsave(hba->host->host_lock, flags); 6538 } 6539 6540 skip_err_handling: 6541 if (!needs_reset) { 6542 if (hba->ufshcd_state == UFSHCD_STATE_RESET) 6543 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; 6544 if (hba->saved_err || hba->saved_uic_err) 6545 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", 6546 __func__, hba->saved_err, hba->saved_uic_err); 6547 } 6548 /* Exit in an operational state or dead */ 6549 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && 6550 hba->ufshcd_state != UFSHCD_STATE_ERROR) { 6551 if (--retries) 6552 goto again; 6553 hba->ufshcd_state = UFSHCD_STATE_ERROR; 6554 } 6555 ufshcd_clear_eh_in_progress(hba); 6556 spin_unlock_irqrestore(hba->host->host_lock, flags); 6557 ufshcd_err_handling_unprepare(hba); 6558 up(&hba->host_sem); 6559 6560 dev_info(hba->dev, "%s finished; HBA state %s\n", __func__, 6561 ufshcd_state_name[hba->ufshcd_state]); 6562 } 6563 6564 /** 6565 * ufshcd_update_uic_error - check and set fatal UIC error flags. 6566 * @hba: per-adapter instance 6567 * 6568 * Return: 6569 * IRQ_HANDLED - If interrupt is valid 6570 * IRQ_NONE - If invalid interrupt 6571 */ 6572 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) 6573 { 6574 u32 reg; 6575 irqreturn_t retval = IRQ_NONE; 6576 6577 /* PHY layer error */ 6578 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); 6579 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) && 6580 (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) { 6581 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg); 6582 /* 6583 * To know whether this error is fatal or not, DB timeout 6584 * must be checked but this error is handled separately. 6585 */ 6586 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK) 6587 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", 6588 __func__); 6589 6590 /* Got a LINERESET indication. */ 6591 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) { 6592 struct uic_command *cmd = NULL; 6593 6594 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR; 6595 if (hba->uic_async_done && hba->active_uic_cmd) 6596 cmd = hba->active_uic_cmd; 6597 /* 6598 * Ignore the LINERESET during power mode change 6599 * operation via DME_SET command. 6600 */ 6601 if (cmd && (cmd->command == UIC_CMD_DME_SET)) 6602 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR; 6603 } 6604 retval |= IRQ_HANDLED; 6605 } 6606 6607 /* PA_INIT_ERROR is fatal and needs UIC reset */ 6608 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); 6609 if ((reg & UIC_DATA_LINK_LAYER_ERROR) && 6610 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) { 6611 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg); 6612 6613 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) 6614 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; 6615 else if (hba->dev_quirks & 6616 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { 6617 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) 6618 hba->uic_error |= 6619 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; 6620 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) 6621 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; 6622 } 6623 retval |= IRQ_HANDLED; 6624 } 6625 6626 /* UIC NL/TL/DME errors needs software retry */ 6627 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); 6628 if ((reg & UIC_NETWORK_LAYER_ERROR) && 6629 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) { 6630 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg); 6631 hba->uic_error |= UFSHCD_UIC_NL_ERROR; 6632 retval |= IRQ_HANDLED; 6633 } 6634 6635 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); 6636 if ((reg & UIC_TRANSPORT_LAYER_ERROR) && 6637 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) { 6638 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg); 6639 hba->uic_error |= UFSHCD_UIC_TL_ERROR; 6640 retval |= IRQ_HANDLED; 6641 } 6642 6643 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); 6644 if ((reg & UIC_DME_ERROR) && 6645 (reg & UIC_DME_ERROR_CODE_MASK)) { 6646 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg); 6647 hba->uic_error |= UFSHCD_UIC_DME_ERROR; 6648 retval |= IRQ_HANDLED; 6649 } 6650 6651 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", 6652 __func__, hba->uic_error); 6653 return retval; 6654 } 6655 6656 /** 6657 * ufshcd_check_errors - Check for errors that need s/w attention 6658 * @hba: per-adapter instance 6659 * @intr_status: interrupt status generated by the controller 6660 * 6661 * Return: 6662 * IRQ_HANDLED - If interrupt is valid 6663 * IRQ_NONE - If invalid interrupt 6664 */ 6665 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) 6666 { 6667 bool queue_eh_work = false; 6668 irqreturn_t retval = IRQ_NONE; 6669 6670 spin_lock(hba->host->host_lock); 6671 hba->errors |= UFSHCD_ERROR_MASK & intr_status; 6672 6673 if (hba->errors & INT_FATAL_ERRORS) { 6674 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR, 6675 hba->errors); 6676 queue_eh_work = true; 6677 } 6678 6679 if (hba->errors & UIC_ERROR) { 6680 hba->uic_error = 0; 6681 retval = ufshcd_update_uic_error(hba); 6682 if (hba->uic_error) 6683 queue_eh_work = true; 6684 } 6685 6686 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) { 6687 dev_err(hba->dev, 6688 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n", 6689 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ? 6690 "Enter" : "Exit", 6691 hba->errors, ufshcd_get_upmcrs(hba)); 6692 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR, 6693 hba->errors); 6694 ufshcd_set_link_broken(hba); 6695 queue_eh_work = true; 6696 } 6697 6698 if (queue_eh_work) { 6699 /* 6700 * update the transfer error masks to sticky bits, let's do this 6701 * irrespective of current ufshcd_state. 6702 */ 6703 hba->saved_err |= hba->errors; 6704 hba->saved_uic_err |= hba->uic_error; 6705 6706 /* dump controller state before resetting */ 6707 if ((hba->saved_err & 6708 (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || 6709 (hba->saved_uic_err && 6710 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { 6711 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n", 6712 __func__, hba->saved_err, 6713 hba->saved_uic_err); 6714 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, 6715 "host_regs: "); 6716 ufshcd_print_pwr_info(hba); 6717 } 6718 ufshcd_schedule_eh_work(hba); 6719 retval |= IRQ_HANDLED; 6720 } 6721 /* 6722 * if (!queue_eh_work) - 6723 * Other errors are either non-fatal where host recovers 6724 * itself without s/w intervention or errors that will be 6725 * handled by the SCSI core layer. 6726 */ 6727 hba->errors = 0; 6728 hba->uic_error = 0; 6729 spin_unlock(hba->host->host_lock); 6730 return retval; 6731 } 6732 6733 /** 6734 * ufshcd_tmc_handler - handle task management function completion 6735 * @hba: per adapter instance 6736 * 6737 * Return: 6738 * IRQ_HANDLED - If interrupt is valid 6739 * IRQ_NONE - If invalid interrupt 6740 */ 6741 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) 6742 { 6743 unsigned long flags, pending, issued; 6744 irqreturn_t ret = IRQ_NONE; 6745 int tag; 6746 6747 spin_lock_irqsave(hba->host->host_lock, flags); 6748 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); 6749 issued = hba->outstanding_tasks & ~pending; 6750 for_each_set_bit(tag, &issued, hba->nutmrs) { 6751 struct request *req = hba->tmf_rqs[tag]; 6752 struct completion *c = req->end_io_data; 6753 6754 complete(c); 6755 ret = IRQ_HANDLED; 6756 } 6757 spin_unlock_irqrestore(hba->host->host_lock, flags); 6758 6759 return ret; 6760 } 6761 6762 /** 6763 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events 6764 * @hba: per adapter instance 6765 * 6766 * Return: IRQ_HANDLED if interrupt is handled. 6767 */ 6768 static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba) 6769 { 6770 struct ufs_hw_queue *hwq; 6771 unsigned long outstanding_cqs; 6772 unsigned int nr_queues; 6773 int i, ret; 6774 u32 events; 6775 6776 ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs); 6777 if (ret) 6778 outstanding_cqs = (1U << hba->nr_hw_queues) - 1; 6779 6780 /* Exclude the poll queues */ 6781 nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; 6782 for_each_set_bit(i, &outstanding_cqs, nr_queues) { 6783 hwq = &hba->uhq[i]; 6784 6785 events = ufshcd_mcq_read_cqis(hba, i); 6786 if (events) 6787 ufshcd_mcq_write_cqis(hba, events, i); 6788 6789 if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS) 6790 ufshcd_mcq_poll_cqe_lock(hba, hwq); 6791 } 6792 6793 return IRQ_HANDLED; 6794 } 6795 6796 /** 6797 * ufshcd_sl_intr - Interrupt service routine 6798 * @hba: per adapter instance 6799 * @intr_status: contains interrupts generated by the controller 6800 * 6801 * Return: 6802 * IRQ_HANDLED - If interrupt is valid 6803 * IRQ_NONE - If invalid interrupt 6804 */ 6805 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) 6806 { 6807 irqreturn_t retval = IRQ_NONE; 6808 6809 if (intr_status & UFSHCD_UIC_MASK) 6810 retval |= ufshcd_uic_cmd_compl(hba, intr_status); 6811 6812 if (intr_status & UFSHCD_ERROR_MASK || hba->errors) 6813 retval |= ufshcd_check_errors(hba, intr_status); 6814 6815 if (intr_status & UTP_TASK_REQ_COMPL) 6816 retval |= ufshcd_tmc_handler(hba); 6817 6818 if (intr_status & UTP_TRANSFER_REQ_COMPL) 6819 retval |= ufshcd_transfer_req_compl(hba); 6820 6821 if (intr_status & MCQ_CQ_EVENT_STATUS) 6822 retval |= ufshcd_handle_mcq_cq_events(hba); 6823 6824 return retval; 6825 } 6826 6827 /** 6828 * ufshcd_intr - Main interrupt service routine 6829 * @irq: irq number 6830 * @__hba: pointer to adapter instance 6831 * 6832 * Return: 6833 * IRQ_HANDLED - If interrupt is valid 6834 * IRQ_NONE - If invalid interrupt 6835 */ 6836 static irqreturn_t ufshcd_intr(int irq, void *__hba) 6837 { 6838 u32 intr_status, enabled_intr_status = 0; 6839 irqreturn_t retval = IRQ_NONE; 6840 struct ufs_hba *hba = __hba; 6841 int retries = hba->nutrs; 6842 6843 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 6844 hba->ufs_stats.last_intr_status = intr_status; 6845 hba->ufs_stats.last_intr_ts = local_clock(); 6846 6847 /* 6848 * There could be max of hba->nutrs reqs in flight and in worst case 6849 * if the reqs get finished 1 by 1 after the interrupt status is 6850 * read, make sure we handle them by checking the interrupt status 6851 * again in a loop until we process all of the reqs before returning. 6852 */ 6853 while (intr_status && retries--) { 6854 enabled_intr_status = 6855 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); 6856 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); 6857 if (enabled_intr_status) 6858 retval |= ufshcd_sl_intr(hba, enabled_intr_status); 6859 6860 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 6861 } 6862 6863 if (enabled_intr_status && retval == IRQ_NONE && 6864 (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) || 6865 hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) { 6866 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", 6867 __func__, 6868 intr_status, 6869 hba->ufs_stats.last_intr_status, 6870 enabled_intr_status); 6871 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); 6872 } 6873 6874 return retval; 6875 } 6876 6877 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) 6878 { 6879 int err = 0; 6880 u32 mask = 1 << tag; 6881 unsigned long flags; 6882 6883 if (!test_bit(tag, &hba->outstanding_tasks)) 6884 goto out; 6885 6886 spin_lock_irqsave(hba->host->host_lock, flags); 6887 ufshcd_utmrl_clear(hba, tag); 6888 spin_unlock_irqrestore(hba->host->host_lock, flags); 6889 6890 /* poll for max. 1 sec to clear door bell register by h/w */ 6891 err = ufshcd_wait_for_register(hba, 6892 REG_UTP_TASK_REQ_DOOR_BELL, 6893 mask, 0, 1000, 1000); 6894 6895 dev_err(hba->dev, "Clearing task management function with tag %d %s\n", 6896 tag, err ? "succeeded" : "failed"); 6897 6898 out: 6899 return err; 6900 } 6901 6902 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, 6903 struct utp_task_req_desc *treq, u8 tm_function) 6904 { 6905 struct request_queue *q = hba->tmf_queue; 6906 struct Scsi_Host *host = hba->host; 6907 DECLARE_COMPLETION_ONSTACK(wait); 6908 struct request *req; 6909 unsigned long flags; 6910 int task_tag, err; 6911 6912 /* 6913 * blk_mq_alloc_request() is used here only to get a free tag. 6914 */ 6915 req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0); 6916 if (IS_ERR(req)) 6917 return PTR_ERR(req); 6918 6919 req->end_io_data = &wait; 6920 ufshcd_hold(hba); 6921 6922 spin_lock_irqsave(host->host_lock, flags); 6923 6924 task_tag = req->tag; 6925 WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n", 6926 task_tag); 6927 hba->tmf_rqs[req->tag] = req; 6928 treq->upiu_req.req_header.task_tag = task_tag; 6929 6930 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); 6931 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); 6932 6933 /* send command to the controller */ 6934 __set_bit(task_tag, &hba->outstanding_tasks); 6935 6936 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); 6937 /* Make sure that doorbell is committed immediately */ 6938 wmb(); 6939 6940 spin_unlock_irqrestore(host->host_lock, flags); 6941 6942 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND); 6943 6944 /* wait until the task management command is completed */ 6945 err = wait_for_completion_io_timeout(&wait, 6946 msecs_to_jiffies(TM_CMD_TIMEOUT)); 6947 if (!err) { 6948 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR); 6949 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", 6950 __func__, tm_function); 6951 if (ufshcd_clear_tm_cmd(hba, task_tag)) 6952 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", 6953 __func__, task_tag); 6954 err = -ETIMEDOUT; 6955 } else { 6956 err = 0; 6957 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); 6958 6959 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP); 6960 } 6961 6962 spin_lock_irqsave(hba->host->host_lock, flags); 6963 hba->tmf_rqs[req->tag] = NULL; 6964 __clear_bit(task_tag, &hba->outstanding_tasks); 6965 spin_unlock_irqrestore(hba->host->host_lock, flags); 6966 6967 ufshcd_release(hba); 6968 blk_mq_free_request(req); 6969 6970 return err; 6971 } 6972 6973 /** 6974 * ufshcd_issue_tm_cmd - issues task management commands to controller 6975 * @hba: per adapter instance 6976 * @lun_id: LUN ID to which TM command is sent 6977 * @task_id: task ID to which the TM command is applicable 6978 * @tm_function: task management function opcode 6979 * @tm_response: task management service response return value 6980 * 6981 * Return: non-zero value on error, zero on success. 6982 */ 6983 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, 6984 u8 tm_function, u8 *tm_response) 6985 { 6986 struct utp_task_req_desc treq = { }; 6987 enum utp_ocs ocs_value; 6988 int err; 6989 6990 /* Configure task request descriptor */ 6991 treq.header.interrupt = 1; 6992 treq.header.ocs = OCS_INVALID_COMMAND_STATUS; 6993 6994 /* Configure task request UPIU */ 6995 treq.upiu_req.req_header.transaction_code = UPIU_TRANSACTION_TASK_REQ; 6996 treq.upiu_req.req_header.lun = lun_id; 6997 treq.upiu_req.req_header.tm_function = tm_function; 6998 6999 /* 7000 * The host shall provide the same value for LUN field in the basic 7001 * header and for Input Parameter. 7002 */ 7003 treq.upiu_req.input_param1 = cpu_to_be32(lun_id); 7004 treq.upiu_req.input_param2 = cpu_to_be32(task_id); 7005 7006 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function); 7007 if (err == -ETIMEDOUT) 7008 return err; 7009 7010 ocs_value = treq.header.ocs & MASK_OCS; 7011 if (ocs_value != OCS_SUCCESS) 7012 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", 7013 __func__, ocs_value); 7014 else if (tm_response) 7015 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) & 7016 MASK_TM_SERVICE_RESP; 7017 return err; 7018 } 7019 7020 /** 7021 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests 7022 * @hba: per-adapter instance 7023 * @req_upiu: upiu request 7024 * @rsp_upiu: upiu reply 7025 * @desc_buff: pointer to descriptor buffer, NULL if NA 7026 * @buff_len: descriptor size, 0 if NA 7027 * @cmd_type: specifies the type (NOP, Query...) 7028 * @desc_op: descriptor operation 7029 * 7030 * Those type of requests uses UTP Transfer Request Descriptor - utrd. 7031 * Therefore, it "rides" the device management infrastructure: uses its tag and 7032 * tasks work queues. 7033 * 7034 * Since there is only one available tag for device management commands, 7035 * the caller is expected to hold the hba->dev_cmd.lock mutex. 7036 * 7037 * Return: 0 upon success; < 0 upon failure. 7038 */ 7039 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, 7040 struct utp_upiu_req *req_upiu, 7041 struct utp_upiu_req *rsp_upiu, 7042 u8 *desc_buff, int *buff_len, 7043 enum dev_cmd_type cmd_type, 7044 enum query_opcode desc_op) 7045 { 7046 DECLARE_COMPLETION_ONSTACK(wait); 7047 const u32 tag = hba->reserved_slot; 7048 struct ufshcd_lrb *lrbp; 7049 int err = 0; 7050 u8 upiu_flags; 7051 7052 /* Protects use of hba->reserved_slot. */ 7053 lockdep_assert_held(&hba->dev_cmd.lock); 7054 7055 down_read(&hba->clk_scaling_lock); 7056 7057 lrbp = &hba->lrb[tag]; 7058 lrbp->cmd = NULL; 7059 lrbp->task_tag = tag; 7060 lrbp->lun = 0; 7061 lrbp->intr_cmd = true; 7062 ufshcd_prepare_lrbp_crypto(NULL, lrbp); 7063 hba->dev_cmd.type = cmd_type; 7064 7065 if (hba->ufs_version <= ufshci_version(1, 1)) 7066 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; 7067 else 7068 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; 7069 7070 /* update the task tag in the request upiu */ 7071 req_upiu->header.task_tag = tag; 7072 7073 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0); 7074 7075 /* just copy the upiu request as it is */ 7076 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr)); 7077 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) { 7078 /* The Data Segment Area is optional depending upon the query 7079 * function value. for WRITE DESCRIPTOR, the data segment 7080 * follows right after the tsf. 7081 */ 7082 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len); 7083 *buff_len = 0; 7084 } 7085 7086 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 7087 7088 hba->dev_cmd.complete = &wait; 7089 7090 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); 7091 7092 ufshcd_send_command(hba, tag, hba->dev_cmd_queue); 7093 /* 7094 * ignore the returning value here - ufshcd_check_query_response is 7095 * bound to fail since dev_cmd.query and dev_cmd.type were left empty. 7096 * read the response directly ignoring all errors. 7097 */ 7098 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT); 7099 7100 /* just copy the upiu response as it is */ 7101 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); 7102 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) { 7103 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu); 7104 u16 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header 7105 .data_segment_length); 7106 7107 if (*buff_len >= resp_len) { 7108 memcpy(desc_buff, descp, resp_len); 7109 *buff_len = resp_len; 7110 } else { 7111 dev_warn(hba->dev, 7112 "%s: rsp size %d is bigger than buffer size %d", 7113 __func__, resp_len, *buff_len); 7114 *buff_len = 0; 7115 err = -EINVAL; 7116 } 7117 } 7118 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, 7119 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); 7120 7121 up_read(&hba->clk_scaling_lock); 7122 return err; 7123 } 7124 7125 /** 7126 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands 7127 * @hba: per-adapter instance 7128 * @req_upiu: upiu request 7129 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands 7130 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target 7131 * @desc_buff: pointer to descriptor buffer, NULL if NA 7132 * @buff_len: descriptor size, 0 if NA 7133 * @desc_op: descriptor operation 7134 * 7135 * Supports UTP Transfer requests (nop and query), and UTP Task 7136 * Management requests. 7137 * It is up to the caller to fill the upiu conent properly, as it will 7138 * be copied without any further input validations. 7139 * 7140 * Return: 0 upon success; < 0 upon failure. 7141 */ 7142 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, 7143 struct utp_upiu_req *req_upiu, 7144 struct utp_upiu_req *rsp_upiu, 7145 enum upiu_request_transaction msgcode, 7146 u8 *desc_buff, int *buff_len, 7147 enum query_opcode desc_op) 7148 { 7149 int err; 7150 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY; 7151 struct utp_task_req_desc treq = { }; 7152 enum utp_ocs ocs_value; 7153 u8 tm_f = req_upiu->header.tm_function; 7154 7155 switch (msgcode) { 7156 case UPIU_TRANSACTION_NOP_OUT: 7157 cmd_type = DEV_CMD_TYPE_NOP; 7158 fallthrough; 7159 case UPIU_TRANSACTION_QUERY_REQ: 7160 ufshcd_hold(hba); 7161 mutex_lock(&hba->dev_cmd.lock); 7162 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu, 7163 desc_buff, buff_len, 7164 cmd_type, desc_op); 7165 mutex_unlock(&hba->dev_cmd.lock); 7166 ufshcd_release(hba); 7167 7168 break; 7169 case UPIU_TRANSACTION_TASK_REQ: 7170 treq.header.interrupt = 1; 7171 treq.header.ocs = OCS_INVALID_COMMAND_STATUS; 7172 7173 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu)); 7174 7175 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f); 7176 if (err == -ETIMEDOUT) 7177 break; 7178 7179 ocs_value = treq.header.ocs & MASK_OCS; 7180 if (ocs_value != OCS_SUCCESS) { 7181 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__, 7182 ocs_value); 7183 break; 7184 } 7185 7186 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu)); 7187 7188 break; 7189 default: 7190 err = -EINVAL; 7191 7192 break; 7193 } 7194 7195 return err; 7196 } 7197 7198 /** 7199 * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request 7200 * @hba: per adapter instance 7201 * @req_upiu: upiu request 7202 * @rsp_upiu: upiu reply 7203 * @req_ehs: EHS field which contains Advanced RPMB Request Message 7204 * @rsp_ehs: EHS field which returns Advanced RPMB Response Message 7205 * @sg_cnt: The number of sg lists actually used 7206 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation 7207 * @dir: DMA direction 7208 * 7209 * Return: zero on success, non-zero on failure. 7210 */ 7211 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu, 7212 struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs, 7213 struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list, 7214 enum dma_data_direction dir) 7215 { 7216 DECLARE_COMPLETION_ONSTACK(wait); 7217 const u32 tag = hba->reserved_slot; 7218 struct ufshcd_lrb *lrbp; 7219 int err = 0; 7220 int result; 7221 u8 upiu_flags; 7222 u8 *ehs_data; 7223 u16 ehs_len; 7224 7225 /* Protects use of hba->reserved_slot. */ 7226 ufshcd_hold(hba); 7227 mutex_lock(&hba->dev_cmd.lock); 7228 down_read(&hba->clk_scaling_lock); 7229 7230 lrbp = &hba->lrb[tag]; 7231 lrbp->cmd = NULL; 7232 lrbp->task_tag = tag; 7233 lrbp->lun = UFS_UPIU_RPMB_WLUN; 7234 7235 lrbp->intr_cmd = true; 7236 ufshcd_prepare_lrbp_crypto(NULL, lrbp); 7237 hba->dev_cmd.type = DEV_CMD_TYPE_RPMB; 7238 7239 /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */ 7240 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; 7241 7242 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2); 7243 7244 /* update the task tag and LUN in the request upiu */ 7245 req_upiu->header.flags = upiu_flags; 7246 req_upiu->header.lun = UFS_UPIU_RPMB_WLUN; 7247 req_upiu->header.task_tag = tag; 7248 7249 /* copy the UPIU(contains CDB) request as it is */ 7250 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr)); 7251 /* Copy EHS, starting with byte32, immediately after the CDB package */ 7252 memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs)); 7253 7254 if (dir != DMA_NONE && sg_list) 7255 ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list); 7256 7257 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 7258 7259 hba->dev_cmd.complete = &wait; 7260 7261 ufshcd_send_command(hba, tag, hba->dev_cmd_queue); 7262 7263 err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT); 7264 7265 if (!err) { 7266 /* Just copy the upiu response as it is */ 7267 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); 7268 /* Get the response UPIU result */ 7269 result = (lrbp->ucd_rsp_ptr->header.response << 8) | 7270 lrbp->ucd_rsp_ptr->header.status; 7271 7272 ehs_len = lrbp->ucd_rsp_ptr->header.ehs_length; 7273 /* 7274 * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data 7275 * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB 7276 * Message is 02h 7277 */ 7278 if (ehs_len == 2 && rsp_ehs) { 7279 /* 7280 * ucd_rsp_ptr points to a buffer with a length of 512 bytes 7281 * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32 7282 */ 7283 ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE; 7284 memcpy(rsp_ehs, ehs_data, ehs_len * 32); 7285 } 7286 } 7287 7288 up_read(&hba->clk_scaling_lock); 7289 mutex_unlock(&hba->dev_cmd.lock); 7290 ufshcd_release(hba); 7291 return err ? : result; 7292 } 7293 7294 /** 7295 * ufshcd_eh_device_reset_handler() - Reset a single logical unit. 7296 * @cmd: SCSI command pointer 7297 * 7298 * Return: SUCCESS or FAILED. 7299 */ 7300 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) 7301 { 7302 unsigned long flags, pending_reqs = 0, not_cleared = 0; 7303 struct Scsi_Host *host; 7304 struct ufs_hba *hba; 7305 struct ufs_hw_queue *hwq; 7306 struct ufshcd_lrb *lrbp; 7307 u32 pos, not_cleared_mask = 0; 7308 int err; 7309 u8 resp = 0xF, lun; 7310 7311 host = cmd->device->host; 7312 hba = shost_priv(host); 7313 7314 lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); 7315 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); 7316 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { 7317 if (!err) 7318 err = resp; 7319 goto out; 7320 } 7321 7322 if (is_mcq_enabled(hba)) { 7323 for (pos = 0; pos < hba->nutrs; pos++) { 7324 lrbp = &hba->lrb[pos]; 7325 if (ufshcd_cmd_inflight(lrbp->cmd) && 7326 lrbp->lun == lun) { 7327 ufshcd_clear_cmd(hba, pos); 7328 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd)); 7329 ufshcd_mcq_poll_cqe_lock(hba, hwq); 7330 } 7331 } 7332 err = 0; 7333 goto out; 7334 } 7335 7336 /* clear the commands that were pending for corresponding LUN */ 7337 spin_lock_irqsave(&hba->outstanding_lock, flags); 7338 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) 7339 if (hba->lrb[pos].lun == lun) 7340 __set_bit(pos, &pending_reqs); 7341 hba->outstanding_reqs &= ~pending_reqs; 7342 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 7343 7344 for_each_set_bit(pos, &pending_reqs, hba->nutrs) { 7345 if (ufshcd_clear_cmd(hba, pos) < 0) { 7346 spin_lock_irqsave(&hba->outstanding_lock, flags); 7347 not_cleared = 1U << pos & 7348 ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 7349 hba->outstanding_reqs |= not_cleared; 7350 not_cleared_mask |= not_cleared; 7351 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 7352 7353 dev_err(hba->dev, "%s: failed to clear request %d\n", 7354 __func__, pos); 7355 } 7356 } 7357 __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask); 7358 7359 out: 7360 hba->req_abort_count = 0; 7361 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err); 7362 if (!err) { 7363 err = SUCCESS; 7364 } else { 7365 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); 7366 err = FAILED; 7367 } 7368 return err; 7369 } 7370 7371 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) 7372 { 7373 struct ufshcd_lrb *lrbp; 7374 int tag; 7375 7376 for_each_set_bit(tag, &bitmap, hba->nutrs) { 7377 lrbp = &hba->lrb[tag]; 7378 lrbp->req_abort_skip = true; 7379 } 7380 } 7381 7382 /** 7383 * ufshcd_try_to_abort_task - abort a specific task 7384 * @hba: Pointer to adapter instance 7385 * @tag: Task tag/index to be aborted 7386 * 7387 * Abort the pending command in device by sending UFS_ABORT_TASK task management 7388 * command, and in host controller by clearing the door-bell register. There can 7389 * be race between controller sending the command to the device while abort is 7390 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is 7391 * really issued and then try to abort it. 7392 * 7393 * Return: zero on success, non-zero on failure. 7394 */ 7395 int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) 7396 { 7397 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; 7398 int err = 0; 7399 int poll_cnt; 7400 u8 resp = 0xF; 7401 u32 reg; 7402 7403 for (poll_cnt = 100; poll_cnt; poll_cnt--) { 7404 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, 7405 UFS_QUERY_TASK, &resp); 7406 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) { 7407 /* cmd pending in the device */ 7408 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n", 7409 __func__, tag); 7410 break; 7411 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { 7412 /* 7413 * cmd not pending in the device, check if it is 7414 * in transition. 7415 */ 7416 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", 7417 __func__, tag); 7418 if (is_mcq_enabled(hba)) { 7419 /* MCQ mode */ 7420 if (ufshcd_cmd_inflight(lrbp->cmd)) { 7421 /* sleep for max. 200us same delay as in SDB mode */ 7422 usleep_range(100, 200); 7423 continue; 7424 } 7425 /* command completed already */ 7426 dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n", 7427 __func__, tag); 7428 goto out; 7429 } 7430 7431 /* Single Doorbell Mode */ 7432 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 7433 if (reg & (1 << tag)) { 7434 /* sleep for max. 200us to stabilize */ 7435 usleep_range(100, 200); 7436 continue; 7437 } 7438 /* command completed already */ 7439 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", 7440 __func__, tag); 7441 goto out; 7442 } else { 7443 dev_err(hba->dev, 7444 "%s: no response from device. tag = %d, err %d\n", 7445 __func__, tag, err); 7446 if (!err) 7447 err = resp; /* service response error */ 7448 goto out; 7449 } 7450 } 7451 7452 if (!poll_cnt) { 7453 err = -EBUSY; 7454 goto out; 7455 } 7456 7457 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, 7458 UFS_ABORT_TASK, &resp); 7459 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { 7460 if (!err) { 7461 err = resp; /* service response error */ 7462 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", 7463 __func__, tag, err); 7464 } 7465 goto out; 7466 } 7467 7468 err = ufshcd_clear_cmd(hba, tag); 7469 if (err) 7470 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", 7471 __func__, tag, err); 7472 7473 out: 7474 return err; 7475 } 7476 7477 /** 7478 * ufshcd_abort - scsi host template eh_abort_handler callback 7479 * @cmd: SCSI command pointer 7480 * 7481 * Return: SUCCESS or FAILED. 7482 */ 7483 static int ufshcd_abort(struct scsi_cmnd *cmd) 7484 { 7485 struct Scsi_Host *host = cmd->device->host; 7486 struct ufs_hba *hba = shost_priv(host); 7487 int tag = scsi_cmd_to_rq(cmd)->tag; 7488 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; 7489 unsigned long flags; 7490 int err = FAILED; 7491 bool outstanding; 7492 u32 reg; 7493 7494 WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); 7495 7496 ufshcd_hold(hba); 7497 7498 if (!is_mcq_enabled(hba)) { 7499 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 7500 if (!test_bit(tag, &hba->outstanding_reqs)) { 7501 /* If command is already aborted/completed, return FAILED. */ 7502 dev_err(hba->dev, 7503 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n", 7504 __func__, tag, hba->outstanding_reqs, reg); 7505 goto release; 7506 } 7507 } 7508 7509 /* Print Transfer Request of aborted task */ 7510 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); 7511 7512 /* 7513 * Print detailed info about aborted request. 7514 * As more than one request might get aborted at the same time, 7515 * print full information only for the first aborted request in order 7516 * to reduce repeated printouts. For other aborted requests only print 7517 * basic details. 7518 */ 7519 scsi_print_command(cmd); 7520 if (!hba->req_abort_count) { 7521 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag); 7522 ufshcd_print_evt_hist(hba); 7523 ufshcd_print_host_state(hba); 7524 ufshcd_print_pwr_info(hba); 7525 ufshcd_print_tr(hba, tag, true); 7526 } else { 7527 ufshcd_print_tr(hba, tag, false); 7528 } 7529 hba->req_abort_count++; 7530 7531 if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) { 7532 /* only execute this code in single doorbell mode */ 7533 dev_err(hba->dev, 7534 "%s: cmd was completed, but without a notifying intr, tag = %d", 7535 __func__, tag); 7536 __ufshcd_transfer_req_compl(hba, 1UL << tag); 7537 goto release; 7538 } 7539 7540 /* 7541 * Task abort to the device W-LUN is illegal. When this command 7542 * will fail, due to spec violation, scsi err handling next step 7543 * will be to send LU reset which, again, is a spec violation. 7544 * To avoid these unnecessary/illegal steps, first we clean up 7545 * the lrb taken by this cmd and re-set it in outstanding_reqs, 7546 * then queue the eh_work and bail. 7547 */ 7548 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) { 7549 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); 7550 7551 spin_lock_irqsave(host->host_lock, flags); 7552 hba->force_reset = true; 7553 ufshcd_schedule_eh_work(hba); 7554 spin_unlock_irqrestore(host->host_lock, flags); 7555 goto release; 7556 } 7557 7558 if (is_mcq_enabled(hba)) { 7559 /* MCQ mode. Branch off to handle abort for mcq mode */ 7560 err = ufshcd_mcq_abort(cmd); 7561 goto release; 7562 } 7563 7564 /* Skip task abort in case previous aborts failed and report failure */ 7565 if (lrbp->req_abort_skip) { 7566 dev_err(hba->dev, "%s: skipping abort\n", __func__); 7567 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); 7568 goto release; 7569 } 7570 7571 err = ufshcd_try_to_abort_task(hba, tag); 7572 if (err) { 7573 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); 7574 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); 7575 err = FAILED; 7576 goto release; 7577 } 7578 7579 /* 7580 * Clear the corresponding bit from outstanding_reqs since the command 7581 * has been aborted successfully. 7582 */ 7583 spin_lock_irqsave(&hba->outstanding_lock, flags); 7584 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs); 7585 spin_unlock_irqrestore(&hba->outstanding_lock, flags); 7586 7587 if (outstanding) 7588 ufshcd_release_scsi_cmd(hba, lrbp); 7589 7590 err = SUCCESS; 7591 7592 release: 7593 /* Matches the ufshcd_hold() call at the start of this function. */ 7594 ufshcd_release(hba); 7595 return err; 7596 } 7597 7598 /** 7599 * ufshcd_host_reset_and_restore - reset and restore host controller 7600 * @hba: per-adapter instance 7601 * 7602 * Note that host controller reset may issue DME_RESET to 7603 * local and remote (device) Uni-Pro stack and the attributes 7604 * are reset to default state. 7605 * 7606 * Return: zero on success, non-zero on failure. 7607 */ 7608 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) 7609 { 7610 int err; 7611 7612 /* 7613 * Stop the host controller and complete the requests 7614 * cleared by h/w 7615 */ 7616 ufshcd_hba_stop(hba); 7617 hba->silence_err_logs = true; 7618 ufshcd_complete_requests(hba, true); 7619 hba->silence_err_logs = false; 7620 7621 /* scale up clocks to max frequency before full reinitialization */ 7622 ufshcd_scale_clks(hba, true); 7623 7624 err = ufshcd_hba_enable(hba); 7625 7626 /* Establish the link again and restore the device */ 7627 if (!err) 7628 err = ufshcd_probe_hba(hba, false); 7629 7630 if (err) 7631 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); 7632 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err); 7633 return err; 7634 } 7635 7636 /** 7637 * ufshcd_reset_and_restore - reset and re-initialize host/device 7638 * @hba: per-adapter instance 7639 * 7640 * Reset and recover device, host and re-establish link. This 7641 * is helpful to recover the communication in fatal error conditions. 7642 * 7643 * Return: zero on success, non-zero on failure. 7644 */ 7645 static int ufshcd_reset_and_restore(struct ufs_hba *hba) 7646 { 7647 u32 saved_err = 0; 7648 u32 saved_uic_err = 0; 7649 int err = 0; 7650 unsigned long flags; 7651 int retries = MAX_HOST_RESET_RETRIES; 7652 7653 spin_lock_irqsave(hba->host->host_lock, flags); 7654 do { 7655 /* 7656 * This is a fresh start, cache and clear saved error first, 7657 * in case new error generated during reset and restore. 7658 */ 7659 saved_err |= hba->saved_err; 7660 saved_uic_err |= hba->saved_uic_err; 7661 hba->saved_err = 0; 7662 hba->saved_uic_err = 0; 7663 hba->force_reset = false; 7664 hba->ufshcd_state = UFSHCD_STATE_RESET; 7665 spin_unlock_irqrestore(hba->host->host_lock, flags); 7666 7667 /* Reset the attached device */ 7668 ufshcd_device_reset(hba); 7669 7670 err = ufshcd_host_reset_and_restore(hba); 7671 7672 spin_lock_irqsave(hba->host->host_lock, flags); 7673 if (err) 7674 continue; 7675 /* Do not exit unless operational or dead */ 7676 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && 7677 hba->ufshcd_state != UFSHCD_STATE_ERROR && 7678 hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL) 7679 err = -EAGAIN; 7680 } while (err && --retries); 7681 7682 /* 7683 * Inform scsi mid-layer that we did reset and allow to handle 7684 * Unit Attention properly. 7685 */ 7686 scsi_report_bus_reset(hba->host, 0); 7687 if (err) { 7688 hba->ufshcd_state = UFSHCD_STATE_ERROR; 7689 hba->saved_err |= saved_err; 7690 hba->saved_uic_err |= saved_uic_err; 7691 } 7692 spin_unlock_irqrestore(hba->host->host_lock, flags); 7693 7694 return err; 7695 } 7696 7697 /** 7698 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer 7699 * @cmd: SCSI command pointer 7700 * 7701 * Return: SUCCESS or FAILED. 7702 */ 7703 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) 7704 { 7705 int err = SUCCESS; 7706 unsigned long flags; 7707 struct ufs_hba *hba; 7708 7709 hba = shost_priv(cmd->device->host); 7710 7711 spin_lock_irqsave(hba->host->host_lock, flags); 7712 hba->force_reset = true; 7713 ufshcd_schedule_eh_work(hba); 7714 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); 7715 spin_unlock_irqrestore(hba->host->host_lock, flags); 7716 7717 flush_work(&hba->eh_work); 7718 7719 spin_lock_irqsave(hba->host->host_lock, flags); 7720 if (hba->ufshcd_state == UFSHCD_STATE_ERROR) 7721 err = FAILED; 7722 spin_unlock_irqrestore(hba->host->host_lock, flags); 7723 7724 return err; 7725 } 7726 7727 /** 7728 * ufshcd_get_max_icc_level - calculate the ICC level 7729 * @sup_curr_uA: max. current supported by the regulator 7730 * @start_scan: row at the desc table to start scan from 7731 * @buff: power descriptor buffer 7732 * 7733 * Return: calculated max ICC level for specific regulator. 7734 */ 7735 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, 7736 const char *buff) 7737 { 7738 int i; 7739 int curr_uA; 7740 u16 data; 7741 u16 unit; 7742 7743 for (i = start_scan; i >= 0; i--) { 7744 data = get_unaligned_be16(&buff[2 * i]); 7745 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >> 7746 ATTR_ICC_LVL_UNIT_OFFSET; 7747 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK; 7748 switch (unit) { 7749 case UFSHCD_NANO_AMP: 7750 curr_uA = curr_uA / 1000; 7751 break; 7752 case UFSHCD_MILI_AMP: 7753 curr_uA = curr_uA * 1000; 7754 break; 7755 case UFSHCD_AMP: 7756 curr_uA = curr_uA * 1000 * 1000; 7757 break; 7758 case UFSHCD_MICRO_AMP: 7759 default: 7760 break; 7761 } 7762 if (sup_curr_uA >= curr_uA) 7763 break; 7764 } 7765 if (i < 0) { 7766 i = 0; 7767 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i); 7768 } 7769 7770 return (u32)i; 7771 } 7772 7773 /** 7774 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level 7775 * In case regulators are not initialized we'll return 0 7776 * @hba: per-adapter instance 7777 * @desc_buf: power descriptor buffer to extract ICC levels from. 7778 * 7779 * Return: calculated ICC level. 7780 */ 7781 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, 7782 const u8 *desc_buf) 7783 { 7784 u32 icc_level = 0; 7785 7786 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || 7787 !hba->vreg_info.vccq2) { 7788 /* 7789 * Using dev_dbg to avoid messages during runtime PM to avoid 7790 * never-ending cycles of messages written back to storage by 7791 * user space causing runtime resume, causing more messages and 7792 * so on. 7793 */ 7794 dev_dbg(hba->dev, 7795 "%s: Regulator capability was not set, actvIccLevel=%d", 7796 __func__, icc_level); 7797 goto out; 7798 } 7799 7800 if (hba->vreg_info.vcc->max_uA) 7801 icc_level = ufshcd_get_max_icc_level( 7802 hba->vreg_info.vcc->max_uA, 7803 POWER_DESC_MAX_ACTV_ICC_LVLS - 1, 7804 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]); 7805 7806 if (hba->vreg_info.vccq->max_uA) 7807 icc_level = ufshcd_get_max_icc_level( 7808 hba->vreg_info.vccq->max_uA, 7809 icc_level, 7810 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]); 7811 7812 if (hba->vreg_info.vccq2->max_uA) 7813 icc_level = ufshcd_get_max_icc_level( 7814 hba->vreg_info.vccq2->max_uA, 7815 icc_level, 7816 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]); 7817 out: 7818 return icc_level; 7819 } 7820 7821 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba) 7822 { 7823 int ret; 7824 u8 *desc_buf; 7825 u32 icc_level; 7826 7827 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); 7828 if (!desc_buf) 7829 return; 7830 7831 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0, 7832 desc_buf, QUERY_DESC_MAX_SIZE); 7833 if (ret) { 7834 dev_err(hba->dev, 7835 "%s: Failed reading power descriptor ret = %d", 7836 __func__, ret); 7837 goto out; 7838 } 7839 7840 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf); 7841 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level); 7842 7843 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 7844 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level); 7845 7846 if (ret) 7847 dev_err(hba->dev, 7848 "%s: Failed configuring bActiveICCLevel = %d ret = %d", 7849 __func__, icc_level, ret); 7850 7851 out: 7852 kfree(desc_buf); 7853 } 7854 7855 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev) 7856 { 7857 scsi_autopm_get_device(sdev); 7858 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev); 7859 if (sdev->rpm_autosuspend) 7860 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev, 7861 RPM_AUTOSUSPEND_DELAY_MS); 7862 scsi_autopm_put_device(sdev); 7863 } 7864 7865 /** 7866 * ufshcd_scsi_add_wlus - Adds required W-LUs 7867 * @hba: per-adapter instance 7868 * 7869 * UFS device specification requires the UFS devices to support 4 well known 7870 * logical units: 7871 * "REPORT_LUNS" (address: 01h) 7872 * "UFS Device" (address: 50h) 7873 * "RPMB" (address: 44h) 7874 * "BOOT" (address: 30h) 7875 * UFS device's power management needs to be controlled by "POWER CONDITION" 7876 * field of SSU (START STOP UNIT) command. But this "power condition" field 7877 * will take effect only when its sent to "UFS device" well known logical unit 7878 * hence we require the scsi_device instance to represent this logical unit in 7879 * order for the UFS host driver to send the SSU command for power management. 7880 * 7881 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory 7882 * Block) LU so user space process can control this LU. User space may also 7883 * want to have access to BOOT LU. 7884 * 7885 * This function adds scsi device instances for each of all well known LUs 7886 * (except "REPORT LUNS" LU). 7887 * 7888 * Return: zero on success (all required W-LUs are added successfully), 7889 * non-zero error value on failure (if failed to add any of the required W-LU). 7890 */ 7891 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) 7892 { 7893 int ret = 0; 7894 struct scsi_device *sdev_boot, *sdev_rpmb; 7895 7896 hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0, 7897 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); 7898 if (IS_ERR(hba->ufs_device_wlun)) { 7899 ret = PTR_ERR(hba->ufs_device_wlun); 7900 hba->ufs_device_wlun = NULL; 7901 goto out; 7902 } 7903 scsi_device_put(hba->ufs_device_wlun); 7904 7905 sdev_rpmb = __scsi_add_device(hba->host, 0, 0, 7906 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); 7907 if (IS_ERR(sdev_rpmb)) { 7908 ret = PTR_ERR(sdev_rpmb); 7909 goto remove_ufs_device_wlun; 7910 } 7911 ufshcd_blk_pm_runtime_init(sdev_rpmb); 7912 scsi_device_put(sdev_rpmb); 7913 7914 sdev_boot = __scsi_add_device(hba->host, 0, 0, 7915 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); 7916 if (IS_ERR(sdev_boot)) { 7917 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__); 7918 } else { 7919 ufshcd_blk_pm_runtime_init(sdev_boot); 7920 scsi_device_put(sdev_boot); 7921 } 7922 goto out; 7923 7924 remove_ufs_device_wlun: 7925 scsi_remove_device(hba->ufs_device_wlun); 7926 out: 7927 return ret; 7928 } 7929 7930 static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf) 7931 { 7932 struct ufs_dev_info *dev_info = &hba->dev_info; 7933 u8 lun; 7934 u32 d_lu_wb_buf_alloc; 7935 u32 ext_ufs_feature; 7936 7937 if (!ufshcd_is_wb_allowed(hba)) 7938 return; 7939 7940 /* 7941 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or 7942 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES 7943 * enabled 7944 */ 7945 if (!(dev_info->wspecversion >= 0x310 || 7946 dev_info->wspecversion == 0x220 || 7947 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))) 7948 goto wb_disabled; 7949 7950 ext_ufs_feature = get_unaligned_be32(desc_buf + 7951 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); 7952 7953 if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP)) 7954 goto wb_disabled; 7955 7956 /* 7957 * WB may be supported but not configured while provisioning. The spec 7958 * says, in dedicated wb buffer mode, a max of 1 lun would have wb 7959 * buffer configured. 7960 */ 7961 dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE]; 7962 7963 dev_info->b_presrv_uspc_en = 7964 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN]; 7965 7966 if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) { 7967 if (!get_unaligned_be32(desc_buf + 7968 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS)) 7969 goto wb_disabled; 7970 } else { 7971 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) { 7972 d_lu_wb_buf_alloc = 0; 7973 ufshcd_read_unit_desc_param(hba, 7974 lun, 7975 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS, 7976 (u8 *)&d_lu_wb_buf_alloc, 7977 sizeof(d_lu_wb_buf_alloc)); 7978 if (d_lu_wb_buf_alloc) { 7979 dev_info->wb_dedicated_lu = lun; 7980 break; 7981 } 7982 } 7983 7984 if (!d_lu_wb_buf_alloc) 7985 goto wb_disabled; 7986 } 7987 7988 if (!ufshcd_is_wb_buf_lifetime_available(hba)) 7989 goto wb_disabled; 7990 7991 return; 7992 7993 wb_disabled: 7994 hba->caps &= ~UFSHCD_CAP_WB_EN; 7995 } 7996 7997 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf) 7998 { 7999 struct ufs_dev_info *dev_info = &hba->dev_info; 8000 u32 ext_ufs_feature; 8001 u8 mask = 0; 8002 8003 if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300) 8004 return; 8005 8006 ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); 8007 8008 if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF) 8009 mask |= MASK_EE_TOO_LOW_TEMP; 8010 8011 if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF) 8012 mask |= MASK_EE_TOO_HIGH_TEMP; 8013 8014 if (mask) { 8015 ufshcd_enable_ee(hba, mask); 8016 ufs_hwmon_probe(hba, mask); 8017 } 8018 } 8019 8020 static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf) 8021 { 8022 struct ufs_dev_info *dev_info = &hba->dev_info; 8023 u32 ext_ufs_feature; 8024 u32 ext_iid_en = 0; 8025 int err; 8026 8027 /* Only UFS-4.0 and above may support EXT_IID */ 8028 if (dev_info->wspecversion < 0x400) 8029 goto out; 8030 8031 ext_ufs_feature = get_unaligned_be32(desc_buf + 8032 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); 8033 if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP)) 8034 goto out; 8035 8036 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 8037 QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en); 8038 if (err) 8039 dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err); 8040 8041 out: 8042 dev_info->b_ext_iid_en = ext_iid_en; 8043 } 8044 8045 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, 8046 const struct ufs_dev_quirk *fixups) 8047 { 8048 const struct ufs_dev_quirk *f; 8049 struct ufs_dev_info *dev_info = &hba->dev_info; 8050 8051 if (!fixups) 8052 return; 8053 8054 for (f = fixups; f->quirk; f++) { 8055 if ((f->wmanufacturerid == dev_info->wmanufacturerid || 8056 f->wmanufacturerid == UFS_ANY_VENDOR) && 8057 ((dev_info->model && 8058 STR_PRFX_EQUAL(f->model, dev_info->model)) || 8059 !strcmp(f->model, UFS_ANY_MODEL))) 8060 hba->dev_quirks |= f->quirk; 8061 } 8062 } 8063 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks); 8064 8065 static void ufs_fixup_device_setup(struct ufs_hba *hba) 8066 { 8067 /* fix by general quirk table */ 8068 ufshcd_fixup_dev_quirks(hba, ufs_fixups); 8069 8070 /* allow vendors to fix quirks */ 8071 ufshcd_vops_fixup_dev_quirks(hba); 8072 } 8073 8074 static int ufs_get_device_desc(struct ufs_hba *hba) 8075 { 8076 int err; 8077 u8 model_index; 8078 u8 *desc_buf; 8079 struct ufs_dev_info *dev_info = &hba->dev_info; 8080 8081 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); 8082 if (!desc_buf) { 8083 err = -ENOMEM; 8084 goto out; 8085 } 8086 8087 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf, 8088 QUERY_DESC_MAX_SIZE); 8089 if (err) { 8090 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", 8091 __func__, err); 8092 goto out; 8093 } 8094 8095 /* 8096 * getting vendor (manufacturerID) and Bank Index in big endian 8097 * format 8098 */ 8099 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | 8100 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; 8101 8102 /* getting Specification Version in big endian format */ 8103 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 | 8104 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]; 8105 dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH]; 8106 8107 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; 8108 8109 err = ufshcd_read_string_desc(hba, model_index, 8110 &dev_info->model, SD_ASCII_STD); 8111 if (err < 0) { 8112 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", 8113 __func__, err); 8114 goto out; 8115 } 8116 8117 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] + 8118 desc_buf[DEVICE_DESC_PARAM_NUM_WLU]; 8119 8120 ufs_fixup_device_setup(hba); 8121 8122 ufshcd_wb_probe(hba, desc_buf); 8123 8124 ufshcd_temp_notif_probe(hba, desc_buf); 8125 8126 if (hba->ext_iid_sup) 8127 ufshcd_ext_iid_probe(hba, desc_buf); 8128 8129 /* 8130 * ufshcd_read_string_desc returns size of the string 8131 * reset the error value 8132 */ 8133 err = 0; 8134 8135 out: 8136 kfree(desc_buf); 8137 return err; 8138 } 8139 8140 static void ufs_put_device_desc(struct ufs_hba *hba) 8141 { 8142 struct ufs_dev_info *dev_info = &hba->dev_info; 8143 8144 kfree(dev_info->model); 8145 dev_info->model = NULL; 8146 } 8147 8148 /** 8149 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro 8150 * @hba: per-adapter instance 8151 * 8152 * PA_TActivate parameter can be tuned manually if UniPro version is less than 8153 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's 8154 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce 8155 * the hibern8 exit latency. 8156 * 8157 * Return: zero on success, non-zero error value on failure. 8158 */ 8159 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) 8160 { 8161 int ret = 0; 8162 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate; 8163 8164 ret = ufshcd_dme_peer_get(hba, 8165 UIC_ARG_MIB_SEL( 8166 RX_MIN_ACTIVATETIME_CAPABILITY, 8167 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 8168 &peer_rx_min_activatetime); 8169 if (ret) 8170 goto out; 8171 8172 /* make sure proper unit conversion is applied */ 8173 tuned_pa_tactivate = 8174 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US) 8175 / PA_TACTIVATE_TIME_UNIT_US); 8176 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8177 tuned_pa_tactivate); 8178 8179 out: 8180 return ret; 8181 } 8182 8183 /** 8184 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro 8185 * @hba: per-adapter instance 8186 * 8187 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than 8188 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's 8189 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY. 8190 * This optimal value can help reduce the hibern8 exit latency. 8191 * 8192 * Return: zero on success, non-zero error value on failure. 8193 */ 8194 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) 8195 { 8196 int ret = 0; 8197 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0; 8198 u32 max_hibern8_time, tuned_pa_hibern8time; 8199 8200 ret = ufshcd_dme_get(hba, 8201 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY, 8202 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), 8203 &local_tx_hibern8_time_cap); 8204 if (ret) 8205 goto out; 8206 8207 ret = ufshcd_dme_peer_get(hba, 8208 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY, 8209 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 8210 &peer_rx_hibern8_time_cap); 8211 if (ret) 8212 goto out; 8213 8214 max_hibern8_time = max(local_tx_hibern8_time_cap, 8215 peer_rx_hibern8_time_cap); 8216 /* make sure proper unit conversion is applied */ 8217 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US) 8218 / PA_HIBERN8_TIME_UNIT_US); 8219 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 8220 tuned_pa_hibern8time); 8221 out: 8222 return ret; 8223 } 8224 8225 /** 8226 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is 8227 * less than device PA_TACTIVATE time. 8228 * @hba: per-adapter instance 8229 * 8230 * Some UFS devices require host PA_TACTIVATE to be lower than device 8231 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk 8232 * for such devices. 8233 * 8234 * Return: zero on success, non-zero error value on failure. 8235 */ 8236 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) 8237 { 8238 int ret = 0; 8239 u32 granularity, peer_granularity; 8240 u32 pa_tactivate, peer_pa_tactivate; 8241 u32 pa_tactivate_us, peer_pa_tactivate_us; 8242 static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100}; 8243 8244 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), 8245 &granularity); 8246 if (ret) 8247 goto out; 8248 8249 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), 8250 &peer_granularity); 8251 if (ret) 8252 goto out; 8253 8254 if ((granularity < PA_GRANULARITY_MIN_VAL) || 8255 (granularity > PA_GRANULARITY_MAX_VAL)) { 8256 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d", 8257 __func__, granularity); 8258 return -EINVAL; 8259 } 8260 8261 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) || 8262 (peer_granularity > PA_GRANULARITY_MAX_VAL)) { 8263 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d", 8264 __func__, peer_granularity); 8265 return -EINVAL; 8266 } 8267 8268 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); 8269 if (ret) 8270 goto out; 8271 8272 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), 8273 &peer_pa_tactivate); 8274 if (ret) 8275 goto out; 8276 8277 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1]; 8278 peer_pa_tactivate_us = peer_pa_tactivate * 8279 gran_to_us_table[peer_granularity - 1]; 8280 8281 if (pa_tactivate_us >= peer_pa_tactivate_us) { 8282 u32 new_peer_pa_tactivate; 8283 8284 new_peer_pa_tactivate = pa_tactivate_us / 8285 gran_to_us_table[peer_granularity - 1]; 8286 new_peer_pa_tactivate++; 8287 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8288 new_peer_pa_tactivate); 8289 } 8290 8291 out: 8292 return ret; 8293 } 8294 8295 static void ufshcd_tune_unipro_params(struct ufs_hba *hba) 8296 { 8297 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { 8298 ufshcd_tune_pa_tactivate(hba); 8299 ufshcd_tune_pa_hibern8time(hba); 8300 } 8301 8302 ufshcd_vops_apply_dev_quirks(hba); 8303 8304 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) 8305 /* set 1ms timeout for PA_TACTIVATE */ 8306 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); 8307 8308 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) 8309 ufshcd_quirk_tune_host_pa_tactivate(hba); 8310 } 8311 8312 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) 8313 { 8314 hba->ufs_stats.hibern8_exit_cnt = 0; 8315 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); 8316 hba->req_abort_count = 0; 8317 } 8318 8319 static int ufshcd_device_geo_params_init(struct ufs_hba *hba) 8320 { 8321 int err; 8322 u8 *desc_buf; 8323 8324 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); 8325 if (!desc_buf) { 8326 err = -ENOMEM; 8327 goto out; 8328 } 8329 8330 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0, 8331 desc_buf, QUERY_DESC_MAX_SIZE); 8332 if (err) { 8333 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", 8334 __func__, err); 8335 goto out; 8336 } 8337 8338 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1) 8339 hba->dev_info.max_lu_supported = 32; 8340 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0) 8341 hba->dev_info.max_lu_supported = 8; 8342 8343 out: 8344 kfree(desc_buf); 8345 return err; 8346 } 8347 8348 struct ufs_ref_clk { 8349 unsigned long freq_hz; 8350 enum ufs_ref_clk_freq val; 8351 }; 8352 8353 static const struct ufs_ref_clk ufs_ref_clk_freqs[] = { 8354 {19200000, REF_CLK_FREQ_19_2_MHZ}, 8355 {26000000, REF_CLK_FREQ_26_MHZ}, 8356 {38400000, REF_CLK_FREQ_38_4_MHZ}, 8357 {52000000, REF_CLK_FREQ_52_MHZ}, 8358 {0, REF_CLK_FREQ_INVAL}, 8359 }; 8360 8361 static enum ufs_ref_clk_freq 8362 ufs_get_bref_clk_from_hz(unsigned long freq) 8363 { 8364 int i; 8365 8366 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++) 8367 if (ufs_ref_clk_freqs[i].freq_hz == freq) 8368 return ufs_ref_clk_freqs[i].val; 8369 8370 return REF_CLK_FREQ_INVAL; 8371 } 8372 8373 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk) 8374 { 8375 unsigned long freq; 8376 8377 freq = clk_get_rate(refclk); 8378 8379 hba->dev_ref_clk_freq = 8380 ufs_get_bref_clk_from_hz(freq); 8381 8382 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) 8383 dev_err(hba->dev, 8384 "invalid ref_clk setting = %ld\n", freq); 8385 } 8386 8387 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) 8388 { 8389 int err; 8390 u32 ref_clk; 8391 u32 freq = hba->dev_ref_clk_freq; 8392 8393 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 8394 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk); 8395 8396 if (err) { 8397 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n", 8398 err); 8399 goto out; 8400 } 8401 8402 if (ref_clk == freq) 8403 goto out; /* nothing to update */ 8404 8405 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 8406 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq); 8407 8408 if (err) { 8409 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n", 8410 ufs_ref_clk_freqs[freq].freq_hz); 8411 goto out; 8412 } 8413 8414 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n", 8415 ufs_ref_clk_freqs[freq].freq_hz); 8416 8417 out: 8418 return err; 8419 } 8420 8421 static int ufshcd_device_params_init(struct ufs_hba *hba) 8422 { 8423 bool flag; 8424 int ret; 8425 8426 /* Init UFS geometry descriptor related parameters */ 8427 ret = ufshcd_device_geo_params_init(hba); 8428 if (ret) 8429 goto out; 8430 8431 /* Check and apply UFS device quirks */ 8432 ret = ufs_get_device_desc(hba); 8433 if (ret) { 8434 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", 8435 __func__, ret); 8436 goto out; 8437 } 8438 8439 ufshcd_get_ref_clk_gating_wait(hba); 8440 8441 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, 8442 QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag)) 8443 hba->dev_info.f_power_on_wp_en = flag; 8444 8445 /* Probe maximum power mode co-supported by both UFS host and device */ 8446 if (ufshcd_get_max_pwr_mode(hba)) 8447 dev_err(hba->dev, 8448 "%s: Failed getting max supported power mode\n", 8449 __func__); 8450 out: 8451 return ret; 8452 } 8453 8454 /** 8455 * ufshcd_add_lus - probe and add UFS logical units 8456 * @hba: per-adapter instance 8457 * 8458 * Return: 0 upon success; < 0 upon failure. 8459 */ 8460 static int ufshcd_add_lus(struct ufs_hba *hba) 8461 { 8462 int ret; 8463 8464 /* Add required well known logical units to scsi mid layer */ 8465 ret = ufshcd_scsi_add_wlus(hba); 8466 if (ret) 8467 goto out; 8468 8469 /* Initialize devfreq after UFS device is detected */ 8470 if (ufshcd_is_clkscaling_supported(hba)) { 8471 memcpy(&hba->clk_scaling.saved_pwr_info, 8472 &hba->pwr_info, 8473 sizeof(struct ufs_pa_layer_attr)); 8474 hba->clk_scaling.is_allowed = true; 8475 8476 ret = ufshcd_devfreq_init(hba); 8477 if (ret) 8478 goto out; 8479 8480 hba->clk_scaling.is_enabled = true; 8481 ufshcd_init_clk_scaling_sysfs(hba); 8482 } 8483 8484 ufs_bsg_probe(hba); 8485 scsi_scan_host(hba->host); 8486 pm_runtime_put_sync(hba->dev); 8487 8488 out: 8489 return ret; 8490 } 8491 8492 /* SDB - Single Doorbell */ 8493 static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs) 8494 { 8495 size_t ucdl_size, utrdl_size; 8496 8497 ucdl_size = ufshcd_get_ucd_size(hba) * nutrs; 8498 dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr, 8499 hba->ucdl_dma_addr); 8500 8501 utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs; 8502 dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr, 8503 hba->utrdl_dma_addr); 8504 8505 devm_kfree(hba->dev, hba->lrb); 8506 } 8507 8508 static int ufshcd_alloc_mcq(struct ufs_hba *hba) 8509 { 8510 int ret; 8511 int old_nutrs = hba->nutrs; 8512 8513 ret = ufshcd_mcq_decide_queue_depth(hba); 8514 if (ret < 0) 8515 return ret; 8516 8517 hba->nutrs = ret; 8518 ret = ufshcd_mcq_init(hba); 8519 if (ret) 8520 goto err; 8521 8522 /* 8523 * Previously allocated memory for nutrs may not be enough in MCQ mode. 8524 * Number of supported tags in MCQ mode may be larger than SDB mode. 8525 */ 8526 if (hba->nutrs != old_nutrs) { 8527 ufshcd_release_sdb_queue(hba, old_nutrs); 8528 ret = ufshcd_memory_alloc(hba); 8529 if (ret) 8530 goto err; 8531 ufshcd_host_memory_configure(hba); 8532 } 8533 8534 ret = ufshcd_mcq_memory_alloc(hba); 8535 if (ret) 8536 goto err; 8537 8538 return 0; 8539 err: 8540 hba->nutrs = old_nutrs; 8541 return ret; 8542 } 8543 8544 static void ufshcd_config_mcq(struct ufs_hba *hba) 8545 { 8546 int ret; 8547 u32 intrs; 8548 8549 ret = ufshcd_mcq_vops_config_esi(hba); 8550 dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : ""); 8551 8552 intrs = UFSHCD_ENABLE_MCQ_INTRS; 8553 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR) 8554 intrs &= ~MCQ_CQ_EVENT_STATUS; 8555 ufshcd_enable_intr(hba, intrs); 8556 ufshcd_mcq_make_queues_operational(hba); 8557 ufshcd_mcq_config_mac(hba, hba->nutrs); 8558 8559 hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; 8560 hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED; 8561 8562 /* Select MCQ mode */ 8563 ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1, 8564 REG_UFS_MEM_CFG); 8565 hba->mcq_enabled = true; 8566 8567 dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n", 8568 hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT], 8569 hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL], 8570 hba->nutrs); 8571 } 8572 8573 static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) 8574 { 8575 int ret; 8576 struct Scsi_Host *host = hba->host; 8577 8578 hba->ufshcd_state = UFSHCD_STATE_RESET; 8579 8580 ret = ufshcd_link_startup(hba); 8581 if (ret) 8582 return ret; 8583 8584 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) 8585 return ret; 8586 8587 /* Debug counters initialization */ 8588 ufshcd_clear_dbg_ufs_stats(hba); 8589 8590 /* UniPro link is active now */ 8591 ufshcd_set_link_active(hba); 8592 8593 /* Reconfigure MCQ upon reset */ 8594 if (is_mcq_enabled(hba) && !init_dev_params) 8595 ufshcd_config_mcq(hba); 8596 8597 /* Verify device initialization by sending NOP OUT UPIU */ 8598 ret = ufshcd_verify_dev_init(hba); 8599 if (ret) 8600 return ret; 8601 8602 /* Initiate UFS initialization, and waiting until completion */ 8603 ret = ufshcd_complete_dev_init(hba); 8604 if (ret) 8605 return ret; 8606 8607 /* 8608 * Initialize UFS device parameters used by driver, these 8609 * parameters are associated with UFS descriptors. 8610 */ 8611 if (init_dev_params) { 8612 ret = ufshcd_device_params_init(hba); 8613 if (ret) 8614 return ret; 8615 if (is_mcq_supported(hba) && !hba->scsi_host_added) { 8616 ret = ufshcd_alloc_mcq(hba); 8617 if (!ret) { 8618 ufshcd_config_mcq(hba); 8619 } else { 8620 /* Continue with SDB mode */ 8621 use_mcq_mode = false; 8622 dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", 8623 ret); 8624 } 8625 ret = scsi_add_host(host, hba->dev); 8626 if (ret) { 8627 dev_err(hba->dev, "scsi_add_host failed\n"); 8628 return ret; 8629 } 8630 hba->scsi_host_added = true; 8631 } else if (is_mcq_supported(hba)) { 8632 /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */ 8633 ufshcd_config_mcq(hba); 8634 } 8635 } 8636 8637 ufshcd_tune_unipro_params(hba); 8638 8639 /* UFS device is also active now */ 8640 ufshcd_set_ufs_dev_active(hba); 8641 ufshcd_force_reset_auto_bkops(hba); 8642 8643 /* Gear up to HS gear if supported */ 8644 if (hba->max_pwr_info.is_valid) { 8645 /* 8646 * Set the right value to bRefClkFreq before attempting to 8647 * switch to HS gears. 8648 */ 8649 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) 8650 ufshcd_set_dev_ref_clk(hba); 8651 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); 8652 if (ret) { 8653 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", 8654 __func__, ret); 8655 return ret; 8656 } 8657 } 8658 8659 return 0; 8660 } 8661 8662 /** 8663 * ufshcd_probe_hba - probe hba to detect device and initialize it 8664 * @hba: per-adapter instance 8665 * @init_dev_params: whether or not to call ufshcd_device_params_init(). 8666 * 8667 * Execute link-startup and verify device initialization 8668 * 8669 * Return: 0 upon success; < 0 upon failure. 8670 */ 8671 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) 8672 { 8673 ktime_t start = ktime_get(); 8674 unsigned long flags; 8675 int ret; 8676 8677 ret = ufshcd_device_init(hba, init_dev_params); 8678 if (ret) 8679 goto out; 8680 8681 if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) { 8682 /* Reset the device and controller before doing reinit */ 8683 ufshcd_device_reset(hba); 8684 ufshcd_hba_stop(hba); 8685 ufshcd_vops_reinit_notify(hba); 8686 ret = ufshcd_hba_enable(hba); 8687 if (ret) { 8688 dev_err(hba->dev, "Host controller enable failed\n"); 8689 ufshcd_print_evt_hist(hba); 8690 ufshcd_print_host_state(hba); 8691 goto out; 8692 } 8693 8694 /* Reinit the device */ 8695 ret = ufshcd_device_init(hba, init_dev_params); 8696 if (ret) 8697 goto out; 8698 } 8699 8700 ufshcd_print_pwr_info(hba); 8701 8702 /* 8703 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec) 8704 * and for removable UFS card as well, hence always set the parameter. 8705 * Note: Error handler may issue the device reset hence resetting 8706 * bActiveICCLevel as well so it is always safe to set this here. 8707 */ 8708 ufshcd_set_active_icc_lvl(hba); 8709 8710 /* Enable UFS Write Booster if supported */ 8711 ufshcd_configure_wb(hba); 8712 8713 if (hba->ee_usr_mask) 8714 ufshcd_write_ee_control(hba); 8715 /* Enable Auto-Hibernate if configured */ 8716 ufshcd_auto_hibern8_enable(hba); 8717 8718 out: 8719 spin_lock_irqsave(hba->host->host_lock, flags); 8720 if (ret) 8721 hba->ufshcd_state = UFSHCD_STATE_ERROR; 8722 else if (hba->ufshcd_state == UFSHCD_STATE_RESET) 8723 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; 8724 spin_unlock_irqrestore(hba->host->host_lock, flags); 8725 8726 trace_ufshcd_init(dev_name(hba->dev), ret, 8727 ktime_to_us(ktime_sub(ktime_get(), start)), 8728 hba->curr_dev_pwr_mode, hba->uic_link_state); 8729 return ret; 8730 } 8731 8732 /** 8733 * ufshcd_async_scan - asynchronous execution for probing hba 8734 * @data: data pointer to pass to this function 8735 * @cookie: cookie data 8736 */ 8737 static void ufshcd_async_scan(void *data, async_cookie_t cookie) 8738 { 8739 struct ufs_hba *hba = (struct ufs_hba *)data; 8740 int ret; 8741 8742 down(&hba->host_sem); 8743 /* Initialize hba, detect and initialize UFS device */ 8744 ret = ufshcd_probe_hba(hba, true); 8745 up(&hba->host_sem); 8746 if (ret) 8747 goto out; 8748 8749 /* Probe and add UFS logical units */ 8750 ret = ufshcd_add_lus(hba); 8751 out: 8752 /* 8753 * If we failed to initialize the device or the device is not 8754 * present, turn off the power/clocks etc. 8755 */ 8756 if (ret) { 8757 pm_runtime_put_sync(hba->dev); 8758 ufshcd_hba_exit(hba); 8759 } 8760 } 8761 8762 static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd) 8763 { 8764 struct ufs_hba *hba = shost_priv(scmd->device->host); 8765 8766 if (!hba->system_suspending) { 8767 /* Activate the error handler in the SCSI core. */ 8768 return SCSI_EH_NOT_HANDLED; 8769 } 8770 8771 /* 8772 * If we get here we know that no TMFs are outstanding and also that 8773 * the only pending command is a START STOP UNIT command. Handle the 8774 * timeout of that command directly to prevent a deadlock between 8775 * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler(). 8776 */ 8777 ufshcd_link_recovery(hba); 8778 dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n", 8779 __func__, hba->outstanding_tasks); 8780 8781 return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE; 8782 } 8783 8784 static const struct attribute_group *ufshcd_driver_groups[] = { 8785 &ufs_sysfs_unit_descriptor_group, 8786 &ufs_sysfs_lun_attributes_group, 8787 NULL, 8788 }; 8789 8790 static struct ufs_hba_variant_params ufs_hba_vps = { 8791 .hba_enable_delay_us = 1000, 8792 .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40), 8793 .devfreq_profile.polling_ms = 100, 8794 .devfreq_profile.target = ufshcd_devfreq_target, 8795 .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status, 8796 .ondemand_data.upthreshold = 70, 8797 .ondemand_data.downdifferential = 5, 8798 }; 8799 8800 static const struct scsi_host_template ufshcd_driver_template = { 8801 .module = THIS_MODULE, 8802 .name = UFSHCD, 8803 .proc_name = UFSHCD, 8804 .map_queues = ufshcd_map_queues, 8805 .queuecommand = ufshcd_queuecommand, 8806 .mq_poll = ufshcd_poll, 8807 .slave_alloc = ufshcd_slave_alloc, 8808 .slave_configure = ufshcd_slave_configure, 8809 .slave_destroy = ufshcd_slave_destroy, 8810 .change_queue_depth = ufshcd_change_queue_depth, 8811 .eh_abort_handler = ufshcd_abort, 8812 .eh_device_reset_handler = ufshcd_eh_device_reset_handler, 8813 .eh_host_reset_handler = ufshcd_eh_host_reset_handler, 8814 .eh_timed_out = ufshcd_eh_timed_out, 8815 .this_id = -1, 8816 .sg_tablesize = SG_ALL, 8817 .cmd_per_lun = UFSHCD_CMD_PER_LUN, 8818 .can_queue = UFSHCD_CAN_QUEUE, 8819 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX, 8820 .max_sectors = SZ_1M / SECTOR_SIZE, 8821 .max_host_blocked = 1, 8822 .track_queue_depth = 1, 8823 .skip_settle_delay = 1, 8824 .sdev_groups = ufshcd_driver_groups, 8825 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS, 8826 }; 8827 8828 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, 8829 int ua) 8830 { 8831 int ret; 8832 8833 if (!vreg) 8834 return 0; 8835 8836 /* 8837 * "set_load" operation shall be required on those regulators 8838 * which specifically configured current limitation. Otherwise 8839 * zero max_uA may cause unexpected behavior when regulator is 8840 * enabled or set as high power mode. 8841 */ 8842 if (!vreg->max_uA) 8843 return 0; 8844 8845 ret = regulator_set_load(vreg->reg, ua); 8846 if (ret < 0) { 8847 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n", 8848 __func__, vreg->name, ua, ret); 8849 } 8850 8851 return ret; 8852 } 8853 8854 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, 8855 struct ufs_vreg *vreg) 8856 { 8857 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); 8858 } 8859 8860 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 8861 struct ufs_vreg *vreg) 8862 { 8863 if (!vreg) 8864 return 0; 8865 8866 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); 8867 } 8868 8869 static int ufshcd_config_vreg(struct device *dev, 8870 struct ufs_vreg *vreg, bool on) 8871 { 8872 if (regulator_count_voltages(vreg->reg) <= 0) 8873 return 0; 8874 8875 return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0); 8876 } 8877 8878 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) 8879 { 8880 int ret = 0; 8881 8882 if (!vreg || vreg->enabled) 8883 goto out; 8884 8885 ret = ufshcd_config_vreg(dev, vreg, true); 8886 if (!ret) 8887 ret = regulator_enable(vreg->reg); 8888 8889 if (!ret) 8890 vreg->enabled = true; 8891 else 8892 dev_err(dev, "%s: %s enable failed, err=%d\n", 8893 __func__, vreg->name, ret); 8894 out: 8895 return ret; 8896 } 8897 8898 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) 8899 { 8900 int ret = 0; 8901 8902 if (!vreg || !vreg->enabled || vreg->always_on) 8903 goto out; 8904 8905 ret = regulator_disable(vreg->reg); 8906 8907 if (!ret) { 8908 /* ignore errors on applying disable config */ 8909 ufshcd_config_vreg(dev, vreg, false); 8910 vreg->enabled = false; 8911 } else { 8912 dev_err(dev, "%s: %s disable failed, err=%d\n", 8913 __func__, vreg->name, ret); 8914 } 8915 out: 8916 return ret; 8917 } 8918 8919 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) 8920 { 8921 int ret = 0; 8922 struct device *dev = hba->dev; 8923 struct ufs_vreg_info *info = &hba->vreg_info; 8924 8925 ret = ufshcd_toggle_vreg(dev, info->vcc, on); 8926 if (ret) 8927 goto out; 8928 8929 ret = ufshcd_toggle_vreg(dev, info->vccq, on); 8930 if (ret) 8931 goto out; 8932 8933 ret = ufshcd_toggle_vreg(dev, info->vccq2, on); 8934 8935 out: 8936 if (ret) { 8937 ufshcd_toggle_vreg(dev, info->vccq2, false); 8938 ufshcd_toggle_vreg(dev, info->vccq, false); 8939 ufshcd_toggle_vreg(dev, info->vcc, false); 8940 } 8941 return ret; 8942 } 8943 8944 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) 8945 { 8946 struct ufs_vreg_info *info = &hba->vreg_info; 8947 8948 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); 8949 } 8950 8951 int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) 8952 { 8953 int ret = 0; 8954 8955 if (!vreg) 8956 goto out; 8957 8958 vreg->reg = devm_regulator_get(dev, vreg->name); 8959 if (IS_ERR(vreg->reg)) { 8960 ret = PTR_ERR(vreg->reg); 8961 dev_err(dev, "%s: %s get failed, err=%d\n", 8962 __func__, vreg->name, ret); 8963 } 8964 out: 8965 return ret; 8966 } 8967 EXPORT_SYMBOL_GPL(ufshcd_get_vreg); 8968 8969 static int ufshcd_init_vreg(struct ufs_hba *hba) 8970 { 8971 int ret = 0; 8972 struct device *dev = hba->dev; 8973 struct ufs_vreg_info *info = &hba->vreg_info; 8974 8975 ret = ufshcd_get_vreg(dev, info->vcc); 8976 if (ret) 8977 goto out; 8978 8979 ret = ufshcd_get_vreg(dev, info->vccq); 8980 if (!ret) 8981 ret = ufshcd_get_vreg(dev, info->vccq2); 8982 out: 8983 return ret; 8984 } 8985 8986 static int ufshcd_init_hba_vreg(struct ufs_hba *hba) 8987 { 8988 struct ufs_vreg_info *info = &hba->vreg_info; 8989 8990 return ufshcd_get_vreg(hba->dev, info->vdd_hba); 8991 } 8992 8993 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) 8994 { 8995 int ret = 0; 8996 struct ufs_clk_info *clki; 8997 struct list_head *head = &hba->clk_list_head; 8998 unsigned long flags; 8999 ktime_t start = ktime_get(); 9000 bool clk_state_changed = false; 9001 9002 if (list_empty(head)) 9003 goto out; 9004 9005 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); 9006 if (ret) 9007 return ret; 9008 9009 list_for_each_entry(clki, head, list) { 9010 if (!IS_ERR_OR_NULL(clki->clk)) { 9011 /* 9012 * Don't disable clocks which are needed 9013 * to keep the link active. 9014 */ 9015 if (ufshcd_is_link_active(hba) && 9016 clki->keep_link_active) 9017 continue; 9018 9019 clk_state_changed = on ^ clki->enabled; 9020 if (on && !clki->enabled) { 9021 ret = clk_prepare_enable(clki->clk); 9022 if (ret) { 9023 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", 9024 __func__, clki->name, ret); 9025 goto out; 9026 } 9027 } else if (!on && clki->enabled) { 9028 clk_disable_unprepare(clki->clk); 9029 } 9030 clki->enabled = on; 9031 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, 9032 clki->name, on ? "en" : "dis"); 9033 } 9034 } 9035 9036 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); 9037 if (ret) 9038 return ret; 9039 9040 out: 9041 if (ret) { 9042 list_for_each_entry(clki, head, list) { 9043 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) 9044 clk_disable_unprepare(clki->clk); 9045 } 9046 } else if (!ret && on) { 9047 spin_lock_irqsave(hba->host->host_lock, flags); 9048 hba->clk_gating.state = CLKS_ON; 9049 trace_ufshcd_clk_gating(dev_name(hba->dev), 9050 hba->clk_gating.state); 9051 spin_unlock_irqrestore(hba->host->host_lock, flags); 9052 } 9053 9054 if (clk_state_changed) 9055 trace_ufshcd_profile_clk_gating(dev_name(hba->dev), 9056 (on ? "on" : "off"), 9057 ktime_to_us(ktime_sub(ktime_get(), start)), ret); 9058 return ret; 9059 } 9060 9061 static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba) 9062 { 9063 u32 freq; 9064 int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq); 9065 9066 if (ret) { 9067 dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret); 9068 return REF_CLK_FREQ_INVAL; 9069 } 9070 9071 return ufs_get_bref_clk_from_hz(freq); 9072 } 9073 9074 static int ufshcd_init_clocks(struct ufs_hba *hba) 9075 { 9076 int ret = 0; 9077 struct ufs_clk_info *clki; 9078 struct device *dev = hba->dev; 9079 struct list_head *head = &hba->clk_list_head; 9080 9081 if (list_empty(head)) 9082 goto out; 9083 9084 list_for_each_entry(clki, head, list) { 9085 if (!clki->name) 9086 continue; 9087 9088 clki->clk = devm_clk_get(dev, clki->name); 9089 if (IS_ERR(clki->clk)) { 9090 ret = PTR_ERR(clki->clk); 9091 dev_err(dev, "%s: %s clk get failed, %d\n", 9092 __func__, clki->name, ret); 9093 goto out; 9094 } 9095 9096 /* 9097 * Parse device ref clk freq as per device tree "ref_clk". 9098 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL 9099 * in ufshcd_alloc_host(). 9100 */ 9101 if (!strcmp(clki->name, "ref_clk")) 9102 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk); 9103 9104 if (clki->max_freq) { 9105 ret = clk_set_rate(clki->clk, clki->max_freq); 9106 if (ret) { 9107 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", 9108 __func__, clki->name, 9109 clki->max_freq, ret); 9110 goto out; 9111 } 9112 clki->curr_freq = clki->max_freq; 9113 } 9114 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, 9115 clki->name, clk_get_rate(clki->clk)); 9116 } 9117 out: 9118 return ret; 9119 } 9120 9121 static int ufshcd_variant_hba_init(struct ufs_hba *hba) 9122 { 9123 int err = 0; 9124 9125 if (!hba->vops) 9126 goto out; 9127 9128 err = ufshcd_vops_init(hba); 9129 if (err) 9130 dev_err(hba->dev, "%s: variant %s init failed err %d\n", 9131 __func__, ufshcd_get_var_name(hba), err); 9132 out: 9133 return err; 9134 } 9135 9136 static void ufshcd_variant_hba_exit(struct ufs_hba *hba) 9137 { 9138 if (!hba->vops) 9139 return; 9140 9141 ufshcd_vops_exit(hba); 9142 } 9143 9144 static int ufshcd_hba_init(struct ufs_hba *hba) 9145 { 9146 int err; 9147 9148 /* 9149 * Handle host controller power separately from the UFS device power 9150 * rails as it will help controlling the UFS host controller power 9151 * collapse easily which is different than UFS device power collapse. 9152 * Also, enable the host controller power before we go ahead with rest 9153 * of the initialization here. 9154 */ 9155 err = ufshcd_init_hba_vreg(hba); 9156 if (err) 9157 goto out; 9158 9159 err = ufshcd_setup_hba_vreg(hba, true); 9160 if (err) 9161 goto out; 9162 9163 err = ufshcd_init_clocks(hba); 9164 if (err) 9165 goto out_disable_hba_vreg; 9166 9167 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) 9168 hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba); 9169 9170 err = ufshcd_setup_clocks(hba, true); 9171 if (err) 9172 goto out_disable_hba_vreg; 9173 9174 err = ufshcd_init_vreg(hba); 9175 if (err) 9176 goto out_disable_clks; 9177 9178 err = ufshcd_setup_vreg(hba, true); 9179 if (err) 9180 goto out_disable_clks; 9181 9182 err = ufshcd_variant_hba_init(hba); 9183 if (err) 9184 goto out_disable_vreg; 9185 9186 ufs_debugfs_hba_init(hba); 9187 9188 hba->is_powered = true; 9189 goto out; 9190 9191 out_disable_vreg: 9192 ufshcd_setup_vreg(hba, false); 9193 out_disable_clks: 9194 ufshcd_setup_clocks(hba, false); 9195 out_disable_hba_vreg: 9196 ufshcd_setup_hba_vreg(hba, false); 9197 out: 9198 return err; 9199 } 9200 9201 static void ufshcd_hba_exit(struct ufs_hba *hba) 9202 { 9203 if (hba->is_powered) { 9204 ufshcd_exit_clk_scaling(hba); 9205 ufshcd_exit_clk_gating(hba); 9206 if (hba->eh_wq) 9207 destroy_workqueue(hba->eh_wq); 9208 ufs_debugfs_hba_exit(hba); 9209 ufshcd_variant_hba_exit(hba); 9210 ufshcd_setup_vreg(hba, false); 9211 ufshcd_setup_clocks(hba, false); 9212 ufshcd_setup_hba_vreg(hba, false); 9213 hba->is_powered = false; 9214 ufs_put_device_desc(hba); 9215 } 9216 } 9217 9218 static int ufshcd_execute_start_stop(struct scsi_device *sdev, 9219 enum ufs_dev_pwr_mode pwr_mode, 9220 struct scsi_sense_hdr *sshdr) 9221 { 9222 const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 }; 9223 const struct scsi_exec_args args = { 9224 .sshdr = sshdr, 9225 .req_flags = BLK_MQ_REQ_PM, 9226 .scmd_flags = SCMD_FAIL_IF_RECOVERING, 9227 }; 9228 9229 return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL, 9230 /*bufflen=*/0, /*timeout=*/10 * HZ, /*retries=*/0, 9231 &args); 9232 } 9233 9234 /** 9235 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device 9236 * power mode 9237 * @hba: per adapter instance 9238 * @pwr_mode: device power mode to set 9239 * 9240 * Return: 0 if requested power mode is set successfully; 9241 * < 0 if failed to set the requested power mode. 9242 */ 9243 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, 9244 enum ufs_dev_pwr_mode pwr_mode) 9245 { 9246 struct scsi_sense_hdr sshdr; 9247 struct scsi_device *sdp; 9248 unsigned long flags; 9249 int ret, retries; 9250 9251 spin_lock_irqsave(hba->host->host_lock, flags); 9252 sdp = hba->ufs_device_wlun; 9253 if (sdp && scsi_device_online(sdp)) 9254 ret = scsi_device_get(sdp); 9255 else 9256 ret = -ENODEV; 9257 spin_unlock_irqrestore(hba->host->host_lock, flags); 9258 9259 if (ret) 9260 return ret; 9261 9262 /* 9263 * If scsi commands fail, the scsi mid-layer schedules scsi error- 9264 * handling, which would wait for host to be resumed. Since we know 9265 * we are functional while we are here, skip host resume in error 9266 * handling context. 9267 */ 9268 hba->host->eh_noresume = 1; 9269 9270 /* 9271 * Current function would be generally called from the power management 9272 * callbacks hence set the RQF_PM flag so that it doesn't resume the 9273 * already suspended childs. 9274 */ 9275 for (retries = 3; retries > 0; --retries) { 9276 ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr); 9277 /* 9278 * scsi_execute() only returns a negative value if the request 9279 * queue is dying. 9280 */ 9281 if (ret <= 0) 9282 break; 9283 } 9284 if (ret) { 9285 sdev_printk(KERN_WARNING, sdp, 9286 "START_STOP failed for power mode: %d, result %x\n", 9287 pwr_mode, ret); 9288 if (ret > 0) { 9289 if (scsi_sense_valid(&sshdr)) 9290 scsi_print_sense_hdr(sdp, NULL, &sshdr); 9291 ret = -EIO; 9292 } 9293 } else { 9294 hba->curr_dev_pwr_mode = pwr_mode; 9295 } 9296 9297 scsi_device_put(sdp); 9298 hba->host->eh_noresume = 0; 9299 return ret; 9300 } 9301 9302 static int ufshcd_link_state_transition(struct ufs_hba *hba, 9303 enum uic_link_state req_link_state, 9304 bool check_for_bkops) 9305 { 9306 int ret = 0; 9307 9308 if (req_link_state == hba->uic_link_state) 9309 return 0; 9310 9311 if (req_link_state == UIC_LINK_HIBERN8_STATE) { 9312 ret = ufshcd_uic_hibern8_enter(hba); 9313 if (!ret) { 9314 ufshcd_set_link_hibern8(hba); 9315 } else { 9316 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", 9317 __func__, ret); 9318 goto out; 9319 } 9320 } 9321 /* 9322 * If autobkops is enabled, link can't be turned off because 9323 * turning off the link would also turn off the device, except in the 9324 * case of DeepSleep where the device is expected to remain powered. 9325 */ 9326 else if ((req_link_state == UIC_LINK_OFF_STATE) && 9327 (!check_for_bkops || !hba->auto_bkops_enabled)) { 9328 /* 9329 * Let's make sure that link is in low power mode, we are doing 9330 * this currently by putting the link in Hibern8. Otherway to 9331 * put the link in low power mode is to send the DME end point 9332 * to device and then send the DME reset command to local 9333 * unipro. But putting the link in hibern8 is much faster. 9334 * 9335 * Note also that putting the link in Hibern8 is a requirement 9336 * for entering DeepSleep. 9337 */ 9338 ret = ufshcd_uic_hibern8_enter(hba); 9339 if (ret) { 9340 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", 9341 __func__, ret); 9342 goto out; 9343 } 9344 /* 9345 * Change controller state to "reset state" which 9346 * should also put the link in off/reset state 9347 */ 9348 ufshcd_hba_stop(hba); 9349 /* 9350 * TODO: Check if we need any delay to make sure that 9351 * controller is reset 9352 */ 9353 ufshcd_set_link_off(hba); 9354 } 9355 9356 out: 9357 return ret; 9358 } 9359 9360 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) 9361 { 9362 bool vcc_off = false; 9363 9364 /* 9365 * It seems some UFS devices may keep drawing more than sleep current 9366 * (atleast for 500us) from UFS rails (especially from VCCQ rail). 9367 * To avoid this situation, add 2ms delay before putting these UFS 9368 * rails in LPM mode. 9369 */ 9370 if (!ufshcd_is_link_active(hba) && 9371 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) 9372 usleep_range(2000, 2100); 9373 9374 /* 9375 * If UFS device is either in UFS_Sleep turn off VCC rail to save some 9376 * power. 9377 * 9378 * If UFS device and link is in OFF state, all power supplies (VCC, 9379 * VCCQ, VCCQ2) can be turned off if power on write protect is not 9380 * required. If UFS link is inactive (Hibern8 or OFF state) and device 9381 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode. 9382 * 9383 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway 9384 * in low power state which would save some power. 9385 * 9386 * If Write Booster is enabled and the device needs to flush the WB 9387 * buffer OR if bkops status is urgent for WB, keep Vcc on. 9388 */ 9389 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && 9390 !hba->dev_info.is_lu_power_on_wp) { 9391 ufshcd_setup_vreg(hba, false); 9392 vcc_off = true; 9393 } else if (!ufshcd_is_ufs_dev_active(hba)) { 9394 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); 9395 vcc_off = true; 9396 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) { 9397 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); 9398 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); 9399 } 9400 } 9401 9402 /* 9403 * Some UFS devices require delay after VCC power rail is turned-off. 9404 */ 9405 if (vcc_off && hba->vreg_info.vcc && 9406 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM) 9407 usleep_range(5000, 5100); 9408 } 9409 9410 #ifdef CONFIG_PM 9411 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) 9412 { 9413 int ret = 0; 9414 9415 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && 9416 !hba->dev_info.is_lu_power_on_wp) { 9417 ret = ufshcd_setup_vreg(hba, true); 9418 } else if (!ufshcd_is_ufs_dev_active(hba)) { 9419 if (!ufshcd_is_link_active(hba)) { 9420 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); 9421 if (ret) 9422 goto vcc_disable; 9423 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); 9424 if (ret) 9425 goto vccq_lpm; 9426 } 9427 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); 9428 } 9429 goto out; 9430 9431 vccq_lpm: 9432 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); 9433 vcc_disable: 9434 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); 9435 out: 9436 return ret; 9437 } 9438 #endif /* CONFIG_PM */ 9439 9440 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) 9441 { 9442 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) 9443 ufshcd_setup_hba_vreg(hba, false); 9444 } 9445 9446 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) 9447 { 9448 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) 9449 ufshcd_setup_hba_vreg(hba, true); 9450 } 9451 9452 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) 9453 { 9454 int ret = 0; 9455 bool check_for_bkops; 9456 enum ufs_pm_level pm_lvl; 9457 enum ufs_dev_pwr_mode req_dev_pwr_mode; 9458 enum uic_link_state req_link_state; 9459 9460 hba->pm_op_in_progress = true; 9461 if (pm_op != UFS_SHUTDOWN_PM) { 9462 pm_lvl = pm_op == UFS_RUNTIME_PM ? 9463 hba->rpm_lvl : hba->spm_lvl; 9464 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl); 9465 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl); 9466 } else { 9467 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE; 9468 req_link_state = UIC_LINK_OFF_STATE; 9469 } 9470 9471 /* 9472 * If we can't transition into any of the low power modes 9473 * just gate the clocks. 9474 */ 9475 ufshcd_hold(hba); 9476 hba->clk_gating.is_suspended = true; 9477 9478 if (ufshcd_is_clkscaling_supported(hba)) 9479 ufshcd_clk_scaling_suspend(hba, true); 9480 9481 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && 9482 req_link_state == UIC_LINK_ACTIVE_STATE) { 9483 goto vops_suspend; 9484 } 9485 9486 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && 9487 (req_link_state == hba->uic_link_state)) 9488 goto enable_scaling; 9489 9490 /* UFS device & link must be active before we enter in this function */ 9491 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { 9492 ret = -EINVAL; 9493 goto enable_scaling; 9494 } 9495 9496 if (pm_op == UFS_RUNTIME_PM) { 9497 if (ufshcd_can_autobkops_during_suspend(hba)) { 9498 /* 9499 * The device is idle with no requests in the queue, 9500 * allow background operations if bkops status shows 9501 * that performance might be impacted. 9502 */ 9503 ret = ufshcd_urgent_bkops(hba); 9504 if (ret) { 9505 /* 9506 * If return err in suspend flow, IO will hang. 9507 * Trigger error handler and break suspend for 9508 * error recovery. 9509 */ 9510 ufshcd_force_error_recovery(hba); 9511 ret = -EBUSY; 9512 goto enable_scaling; 9513 } 9514 } else { 9515 /* make sure that auto bkops is disabled */ 9516 ufshcd_disable_auto_bkops(hba); 9517 } 9518 /* 9519 * If device needs to do BKOP or WB buffer flush during 9520 * Hibern8, keep device power mode as "active power mode" 9521 * and VCC supply. 9522 */ 9523 hba->dev_info.b_rpm_dev_flush_capable = 9524 hba->auto_bkops_enabled || 9525 (((req_link_state == UIC_LINK_HIBERN8_STATE) || 9526 ((req_link_state == UIC_LINK_ACTIVE_STATE) && 9527 ufshcd_is_auto_hibern8_enabled(hba))) && 9528 ufshcd_wb_need_flush(hba)); 9529 } 9530 9531 flush_work(&hba->eeh_work); 9532 9533 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); 9534 if (ret) 9535 goto enable_scaling; 9536 9537 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) { 9538 if (pm_op != UFS_RUNTIME_PM) 9539 /* ensure that bkops is disabled */ 9540 ufshcd_disable_auto_bkops(hba); 9541 9542 if (!hba->dev_info.b_rpm_dev_flush_capable) { 9543 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); 9544 if (ret && pm_op != UFS_SHUTDOWN_PM) { 9545 /* 9546 * If return err in suspend flow, IO will hang. 9547 * Trigger error handler and break suspend for 9548 * error recovery. 9549 */ 9550 ufshcd_force_error_recovery(hba); 9551 ret = -EBUSY; 9552 } 9553 if (ret) 9554 goto enable_scaling; 9555 } 9556 } 9557 9558 /* 9559 * In the case of DeepSleep, the device is expected to remain powered 9560 * with the link off, so do not check for bkops. 9561 */ 9562 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba); 9563 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops); 9564 if (ret && pm_op != UFS_SHUTDOWN_PM) { 9565 /* 9566 * If return err in suspend flow, IO will hang. 9567 * Trigger error handler and break suspend for 9568 * error recovery. 9569 */ 9570 ufshcd_force_error_recovery(hba); 9571 ret = -EBUSY; 9572 } 9573 if (ret) 9574 goto set_dev_active; 9575 9576 vops_suspend: 9577 /* 9578 * Call vendor specific suspend callback. As these callbacks may access 9579 * vendor specific host controller register space call them before the 9580 * host clocks are ON. 9581 */ 9582 ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); 9583 if (ret) 9584 goto set_link_active; 9585 goto out; 9586 9587 set_link_active: 9588 /* 9589 * Device hardware reset is required to exit DeepSleep. Also, for 9590 * DeepSleep, the link is off so host reset and restore will be done 9591 * further below. 9592 */ 9593 if (ufshcd_is_ufs_dev_deepsleep(hba)) { 9594 ufshcd_device_reset(hba); 9595 WARN_ON(!ufshcd_is_link_off(hba)); 9596 } 9597 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) 9598 ufshcd_set_link_active(hba); 9599 else if (ufshcd_is_link_off(hba)) 9600 ufshcd_host_reset_and_restore(hba); 9601 set_dev_active: 9602 /* Can also get here needing to exit DeepSleep */ 9603 if (ufshcd_is_ufs_dev_deepsleep(hba)) { 9604 ufshcd_device_reset(hba); 9605 ufshcd_host_reset_and_restore(hba); 9606 } 9607 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) 9608 ufshcd_disable_auto_bkops(hba); 9609 enable_scaling: 9610 if (ufshcd_is_clkscaling_supported(hba)) 9611 ufshcd_clk_scaling_suspend(hba, false); 9612 9613 hba->dev_info.b_rpm_dev_flush_capable = false; 9614 out: 9615 if (hba->dev_info.b_rpm_dev_flush_capable) { 9616 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work, 9617 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS)); 9618 } 9619 9620 if (ret) { 9621 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret); 9622 hba->clk_gating.is_suspended = false; 9623 ufshcd_release(hba); 9624 } 9625 hba->pm_op_in_progress = false; 9626 return ret; 9627 } 9628 9629 #ifdef CONFIG_PM 9630 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) 9631 { 9632 int ret; 9633 enum uic_link_state old_link_state = hba->uic_link_state; 9634 9635 hba->pm_op_in_progress = true; 9636 9637 /* 9638 * Call vendor specific resume callback. As these callbacks may access 9639 * vendor specific host controller register space call them when the 9640 * host clocks are ON. 9641 */ 9642 ret = ufshcd_vops_resume(hba, pm_op); 9643 if (ret) 9644 goto out; 9645 9646 /* For DeepSleep, the only supported option is to have the link off */ 9647 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba)); 9648 9649 if (ufshcd_is_link_hibern8(hba)) { 9650 ret = ufshcd_uic_hibern8_exit(hba); 9651 if (!ret) { 9652 ufshcd_set_link_active(hba); 9653 } else { 9654 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", 9655 __func__, ret); 9656 goto vendor_suspend; 9657 } 9658 } else if (ufshcd_is_link_off(hba)) { 9659 /* 9660 * A full initialization of the host and the device is 9661 * required since the link was put to off during suspend. 9662 * Note, in the case of DeepSleep, the device will exit 9663 * DeepSleep due to device reset. 9664 */ 9665 ret = ufshcd_reset_and_restore(hba); 9666 /* 9667 * ufshcd_reset_and_restore() should have already 9668 * set the link state as active 9669 */ 9670 if (ret || !ufshcd_is_link_active(hba)) 9671 goto vendor_suspend; 9672 } 9673 9674 if (!ufshcd_is_ufs_dev_active(hba)) { 9675 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); 9676 if (ret) 9677 goto set_old_link_state; 9678 } 9679 9680 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) 9681 ufshcd_enable_auto_bkops(hba); 9682 else 9683 /* 9684 * If BKOPs operations are urgently needed at this moment then 9685 * keep auto-bkops enabled or else disable it. 9686 */ 9687 ufshcd_urgent_bkops(hba); 9688 9689 if (hba->ee_usr_mask) 9690 ufshcd_write_ee_control(hba); 9691 9692 if (ufshcd_is_clkscaling_supported(hba)) 9693 ufshcd_clk_scaling_suspend(hba, false); 9694 9695 if (hba->dev_info.b_rpm_dev_flush_capable) { 9696 hba->dev_info.b_rpm_dev_flush_capable = false; 9697 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); 9698 } 9699 9700 /* Enable Auto-Hibernate if configured */ 9701 ufshcd_auto_hibern8_enable(hba); 9702 9703 goto out; 9704 9705 set_old_link_state: 9706 ufshcd_link_state_transition(hba, old_link_state, 0); 9707 vendor_suspend: 9708 ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); 9709 ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); 9710 out: 9711 if (ret) 9712 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret); 9713 hba->clk_gating.is_suspended = false; 9714 ufshcd_release(hba); 9715 hba->pm_op_in_progress = false; 9716 return ret; 9717 } 9718 9719 static int ufshcd_wl_runtime_suspend(struct device *dev) 9720 { 9721 struct scsi_device *sdev = to_scsi_device(dev); 9722 struct ufs_hba *hba; 9723 int ret; 9724 ktime_t start = ktime_get(); 9725 9726 hba = shost_priv(sdev->host); 9727 9728 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM); 9729 if (ret) 9730 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 9731 9732 trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret, 9733 ktime_to_us(ktime_sub(ktime_get(), start)), 9734 hba->curr_dev_pwr_mode, hba->uic_link_state); 9735 9736 return ret; 9737 } 9738 9739 static int ufshcd_wl_runtime_resume(struct device *dev) 9740 { 9741 struct scsi_device *sdev = to_scsi_device(dev); 9742 struct ufs_hba *hba; 9743 int ret = 0; 9744 ktime_t start = ktime_get(); 9745 9746 hba = shost_priv(sdev->host); 9747 9748 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM); 9749 if (ret) 9750 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 9751 9752 trace_ufshcd_wl_runtime_resume(dev_name(dev), ret, 9753 ktime_to_us(ktime_sub(ktime_get(), start)), 9754 hba->curr_dev_pwr_mode, hba->uic_link_state); 9755 9756 return ret; 9757 } 9758 #endif 9759 9760 #ifdef CONFIG_PM_SLEEP 9761 static int ufshcd_wl_suspend(struct device *dev) 9762 { 9763 struct scsi_device *sdev = to_scsi_device(dev); 9764 struct ufs_hba *hba; 9765 int ret = 0; 9766 ktime_t start = ktime_get(); 9767 9768 hba = shost_priv(sdev->host); 9769 down(&hba->host_sem); 9770 hba->system_suspending = true; 9771 9772 if (pm_runtime_suspended(dev)) 9773 goto out; 9774 9775 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM); 9776 if (ret) { 9777 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 9778 up(&hba->host_sem); 9779 } 9780 9781 out: 9782 if (!ret) 9783 hba->is_sys_suspended = true; 9784 trace_ufshcd_wl_suspend(dev_name(dev), ret, 9785 ktime_to_us(ktime_sub(ktime_get(), start)), 9786 hba->curr_dev_pwr_mode, hba->uic_link_state); 9787 9788 return ret; 9789 } 9790 9791 static int ufshcd_wl_resume(struct device *dev) 9792 { 9793 struct scsi_device *sdev = to_scsi_device(dev); 9794 struct ufs_hba *hba; 9795 int ret = 0; 9796 ktime_t start = ktime_get(); 9797 9798 hba = shost_priv(sdev->host); 9799 9800 if (pm_runtime_suspended(dev)) 9801 goto out; 9802 9803 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM); 9804 if (ret) 9805 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 9806 out: 9807 trace_ufshcd_wl_resume(dev_name(dev), ret, 9808 ktime_to_us(ktime_sub(ktime_get(), start)), 9809 hba->curr_dev_pwr_mode, hba->uic_link_state); 9810 if (!ret) 9811 hba->is_sys_suspended = false; 9812 hba->system_suspending = false; 9813 up(&hba->host_sem); 9814 return ret; 9815 } 9816 #endif 9817 9818 /** 9819 * ufshcd_suspend - helper function for suspend operations 9820 * @hba: per adapter instance 9821 * 9822 * This function will put disable irqs, turn off clocks 9823 * and set vreg and hba-vreg in lpm mode. 9824 * 9825 * Return: 0 upon success; < 0 upon failure. 9826 */ 9827 static int ufshcd_suspend(struct ufs_hba *hba) 9828 { 9829 int ret; 9830 9831 if (!hba->is_powered) 9832 return 0; 9833 /* 9834 * Disable the host irq as host controller as there won't be any 9835 * host controller transaction expected till resume. 9836 */ 9837 ufshcd_disable_irq(hba); 9838 ret = ufshcd_setup_clocks(hba, false); 9839 if (ret) { 9840 ufshcd_enable_irq(hba); 9841 return ret; 9842 } 9843 if (ufshcd_is_clkgating_allowed(hba)) { 9844 hba->clk_gating.state = CLKS_OFF; 9845 trace_ufshcd_clk_gating(dev_name(hba->dev), 9846 hba->clk_gating.state); 9847 } 9848 9849 ufshcd_vreg_set_lpm(hba); 9850 /* Put the host controller in low power mode if possible */ 9851 ufshcd_hba_vreg_set_lpm(hba); 9852 return ret; 9853 } 9854 9855 #ifdef CONFIG_PM 9856 /** 9857 * ufshcd_resume - helper function for resume operations 9858 * @hba: per adapter instance 9859 * 9860 * This function basically turns on the regulators, clocks and 9861 * irqs of the hba. 9862 * 9863 * Return: 0 for success and non-zero for failure. 9864 */ 9865 static int ufshcd_resume(struct ufs_hba *hba) 9866 { 9867 int ret; 9868 9869 if (!hba->is_powered) 9870 return 0; 9871 9872 ufshcd_hba_vreg_set_hpm(hba); 9873 ret = ufshcd_vreg_set_hpm(hba); 9874 if (ret) 9875 goto out; 9876 9877 /* Make sure clocks are enabled before accessing controller */ 9878 ret = ufshcd_setup_clocks(hba, true); 9879 if (ret) 9880 goto disable_vreg; 9881 9882 /* enable the host irq as host controller would be active soon */ 9883 ufshcd_enable_irq(hba); 9884 9885 goto out; 9886 9887 disable_vreg: 9888 ufshcd_vreg_set_lpm(hba); 9889 out: 9890 if (ret) 9891 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret); 9892 return ret; 9893 } 9894 #endif /* CONFIG_PM */ 9895 9896 #ifdef CONFIG_PM_SLEEP 9897 /** 9898 * ufshcd_system_suspend - system suspend callback 9899 * @dev: Device associated with the UFS controller. 9900 * 9901 * Executed before putting the system into a sleep state in which the contents 9902 * of main memory are preserved. 9903 * 9904 * Return: 0 for success and non-zero for failure. 9905 */ 9906 int ufshcd_system_suspend(struct device *dev) 9907 { 9908 struct ufs_hba *hba = dev_get_drvdata(dev); 9909 int ret = 0; 9910 ktime_t start = ktime_get(); 9911 9912 if (pm_runtime_suspended(hba->dev)) 9913 goto out; 9914 9915 ret = ufshcd_suspend(hba); 9916 out: 9917 trace_ufshcd_system_suspend(dev_name(hba->dev), ret, 9918 ktime_to_us(ktime_sub(ktime_get(), start)), 9919 hba->curr_dev_pwr_mode, hba->uic_link_state); 9920 return ret; 9921 } 9922 EXPORT_SYMBOL(ufshcd_system_suspend); 9923 9924 /** 9925 * ufshcd_system_resume - system resume callback 9926 * @dev: Device associated with the UFS controller. 9927 * 9928 * Executed after waking the system up from a sleep state in which the contents 9929 * of main memory were preserved. 9930 * 9931 * Return: 0 for success and non-zero for failure. 9932 */ 9933 int ufshcd_system_resume(struct device *dev) 9934 { 9935 struct ufs_hba *hba = dev_get_drvdata(dev); 9936 ktime_t start = ktime_get(); 9937 int ret = 0; 9938 9939 if (pm_runtime_suspended(hba->dev)) 9940 goto out; 9941 9942 ret = ufshcd_resume(hba); 9943 9944 out: 9945 trace_ufshcd_system_resume(dev_name(hba->dev), ret, 9946 ktime_to_us(ktime_sub(ktime_get(), start)), 9947 hba->curr_dev_pwr_mode, hba->uic_link_state); 9948 9949 return ret; 9950 } 9951 EXPORT_SYMBOL(ufshcd_system_resume); 9952 #endif /* CONFIG_PM_SLEEP */ 9953 9954 #ifdef CONFIG_PM 9955 /** 9956 * ufshcd_runtime_suspend - runtime suspend callback 9957 * @dev: Device associated with the UFS controller. 9958 * 9959 * Check the description of ufshcd_suspend() function for more details. 9960 * 9961 * Return: 0 for success and non-zero for failure. 9962 */ 9963 int ufshcd_runtime_suspend(struct device *dev) 9964 { 9965 struct ufs_hba *hba = dev_get_drvdata(dev); 9966 int ret; 9967 ktime_t start = ktime_get(); 9968 9969 ret = ufshcd_suspend(hba); 9970 9971 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, 9972 ktime_to_us(ktime_sub(ktime_get(), start)), 9973 hba->curr_dev_pwr_mode, hba->uic_link_state); 9974 return ret; 9975 } 9976 EXPORT_SYMBOL(ufshcd_runtime_suspend); 9977 9978 /** 9979 * ufshcd_runtime_resume - runtime resume routine 9980 * @dev: Device associated with the UFS controller. 9981 * 9982 * This function basically brings controller 9983 * to active state. Following operations are done in this function: 9984 * 9985 * 1. Turn on all the controller related clocks 9986 * 2. Turn ON VCC rail 9987 * 9988 * Return: 0 upon success; < 0 upon failure. 9989 */ 9990 int ufshcd_runtime_resume(struct device *dev) 9991 { 9992 struct ufs_hba *hba = dev_get_drvdata(dev); 9993 int ret; 9994 ktime_t start = ktime_get(); 9995 9996 ret = ufshcd_resume(hba); 9997 9998 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, 9999 ktime_to_us(ktime_sub(ktime_get(), start)), 10000 hba->curr_dev_pwr_mode, hba->uic_link_state); 10001 return ret; 10002 } 10003 EXPORT_SYMBOL(ufshcd_runtime_resume); 10004 #endif /* CONFIG_PM */ 10005 10006 static void ufshcd_wl_shutdown(struct device *dev) 10007 { 10008 struct scsi_device *sdev = to_scsi_device(dev); 10009 struct ufs_hba *hba = shost_priv(sdev->host); 10010 10011 down(&hba->host_sem); 10012 hba->shutting_down = true; 10013 up(&hba->host_sem); 10014 10015 /* Turn on everything while shutting down */ 10016 ufshcd_rpm_get_sync(hba); 10017 scsi_device_quiesce(sdev); 10018 shost_for_each_device(sdev, hba->host) { 10019 if (sdev == hba->ufs_device_wlun) 10020 continue; 10021 scsi_device_quiesce(sdev); 10022 } 10023 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); 10024 10025 /* 10026 * Next, turn off the UFS controller and the UFS regulators. Disable 10027 * clocks. 10028 */ 10029 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) 10030 ufshcd_suspend(hba); 10031 10032 hba->is_powered = false; 10033 } 10034 10035 /** 10036 * ufshcd_remove - de-allocate SCSI host and host memory space 10037 * data structure memory 10038 * @hba: per adapter instance 10039 */ 10040 void ufshcd_remove(struct ufs_hba *hba) 10041 { 10042 if (hba->ufs_device_wlun) 10043 ufshcd_rpm_get_sync(hba); 10044 ufs_hwmon_remove(hba); 10045 ufs_bsg_remove(hba); 10046 ufs_sysfs_remove_nodes(hba->dev); 10047 blk_mq_destroy_queue(hba->tmf_queue); 10048 blk_put_queue(hba->tmf_queue); 10049 blk_mq_free_tag_set(&hba->tmf_tag_set); 10050 scsi_remove_host(hba->host); 10051 /* disable interrupts */ 10052 ufshcd_disable_intr(hba, hba->intr_mask); 10053 ufshcd_hba_stop(hba); 10054 ufshcd_hba_exit(hba); 10055 } 10056 EXPORT_SYMBOL_GPL(ufshcd_remove); 10057 10058 #ifdef CONFIG_PM_SLEEP 10059 int ufshcd_system_freeze(struct device *dev) 10060 { 10061 10062 return ufshcd_system_suspend(dev); 10063 10064 } 10065 EXPORT_SYMBOL_GPL(ufshcd_system_freeze); 10066 10067 int ufshcd_system_restore(struct device *dev) 10068 { 10069 10070 struct ufs_hba *hba = dev_get_drvdata(dev); 10071 int ret; 10072 10073 ret = ufshcd_system_resume(dev); 10074 if (ret) 10075 return ret; 10076 10077 /* Configure UTRL and UTMRL base address registers */ 10078 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), 10079 REG_UTP_TRANSFER_REQ_LIST_BASE_L); 10080 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), 10081 REG_UTP_TRANSFER_REQ_LIST_BASE_H); 10082 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), 10083 REG_UTP_TASK_REQ_LIST_BASE_L); 10084 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), 10085 REG_UTP_TASK_REQ_LIST_BASE_H); 10086 /* 10087 * Make sure that UTRL and UTMRL base address registers 10088 * are updated with the latest queue addresses. Only after 10089 * updating these addresses, we can queue the new commands. 10090 */ 10091 mb(); 10092 10093 /* Resuming from hibernate, assume that link was OFF */ 10094 ufshcd_set_link_off(hba); 10095 10096 return 0; 10097 10098 } 10099 EXPORT_SYMBOL_GPL(ufshcd_system_restore); 10100 10101 int ufshcd_system_thaw(struct device *dev) 10102 { 10103 return ufshcd_system_resume(dev); 10104 } 10105 EXPORT_SYMBOL_GPL(ufshcd_system_thaw); 10106 #endif /* CONFIG_PM_SLEEP */ 10107 10108 /** 10109 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA) 10110 * @hba: pointer to Host Bus Adapter (HBA) 10111 */ 10112 void ufshcd_dealloc_host(struct ufs_hba *hba) 10113 { 10114 scsi_host_put(hba->host); 10115 } 10116 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); 10117 10118 /** 10119 * ufshcd_set_dma_mask - Set dma mask based on the controller 10120 * addressing capability 10121 * @hba: per adapter instance 10122 * 10123 * Return: 0 for success, non-zero for failure. 10124 */ 10125 static int ufshcd_set_dma_mask(struct ufs_hba *hba) 10126 { 10127 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { 10128 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) 10129 return 0; 10130 } 10131 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); 10132 } 10133 10134 /** 10135 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) 10136 * @dev: pointer to device handle 10137 * @hba_handle: driver private handle 10138 * 10139 * Return: 0 on success, non-zero value on failure. 10140 */ 10141 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) 10142 { 10143 struct Scsi_Host *host; 10144 struct ufs_hba *hba; 10145 int err = 0; 10146 10147 if (!dev) { 10148 dev_err(dev, 10149 "Invalid memory reference for dev is NULL\n"); 10150 err = -ENODEV; 10151 goto out_error; 10152 } 10153 10154 host = scsi_host_alloc(&ufshcd_driver_template, 10155 sizeof(struct ufs_hba)); 10156 if (!host) { 10157 dev_err(dev, "scsi_host_alloc failed\n"); 10158 err = -ENOMEM; 10159 goto out_error; 10160 } 10161 host->nr_maps = HCTX_TYPE_POLL + 1; 10162 hba = shost_priv(host); 10163 hba->host = host; 10164 hba->dev = dev; 10165 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; 10166 hba->nop_out_timeout = NOP_OUT_TIMEOUT; 10167 ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry)); 10168 INIT_LIST_HEAD(&hba->clk_list_head); 10169 spin_lock_init(&hba->outstanding_lock); 10170 10171 *hba_handle = hba; 10172 10173 out_error: 10174 return err; 10175 } 10176 EXPORT_SYMBOL(ufshcd_alloc_host); 10177 10178 /* This function exists because blk_mq_alloc_tag_set() requires this. */ 10179 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx, 10180 const struct blk_mq_queue_data *qd) 10181 { 10182 WARN_ON_ONCE(true); 10183 return BLK_STS_NOTSUPP; 10184 } 10185 10186 static const struct blk_mq_ops ufshcd_tmf_ops = { 10187 .queue_rq = ufshcd_queue_tmf, 10188 }; 10189 10190 /** 10191 * ufshcd_init - Driver initialization routine 10192 * @hba: per-adapter instance 10193 * @mmio_base: base register address 10194 * @irq: Interrupt line of device 10195 * 10196 * Return: 0 on success, non-zero value on failure. 10197 */ 10198 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) 10199 { 10200 int err; 10201 struct Scsi_Host *host = hba->host; 10202 struct device *dev = hba->dev; 10203 char eh_wq_name[sizeof("ufs_eh_wq_00")]; 10204 10205 /* 10206 * dev_set_drvdata() must be called before any callbacks are registered 10207 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon, 10208 * sysfs). 10209 */ 10210 dev_set_drvdata(dev, hba); 10211 10212 if (!mmio_base) { 10213 dev_err(hba->dev, 10214 "Invalid memory reference for mmio_base is NULL\n"); 10215 err = -ENODEV; 10216 goto out_error; 10217 } 10218 10219 hba->mmio_base = mmio_base; 10220 hba->irq = irq; 10221 hba->vps = &ufs_hba_vps; 10222 10223 err = ufshcd_hba_init(hba); 10224 if (err) 10225 goto out_error; 10226 10227 /* Read capabilities registers */ 10228 err = ufshcd_hba_capabilities(hba); 10229 if (err) 10230 goto out_disable; 10231 10232 /* Get UFS version supported by the controller */ 10233 hba->ufs_version = ufshcd_get_ufs_version(hba); 10234 10235 /* Get Interrupt bit mask per version */ 10236 hba->intr_mask = ufshcd_get_intr_mask(hba); 10237 10238 err = ufshcd_set_dma_mask(hba); 10239 if (err) { 10240 dev_err(hba->dev, "set dma mask failed\n"); 10241 goto out_disable; 10242 } 10243 10244 /* Allocate memory for host memory space */ 10245 err = ufshcd_memory_alloc(hba); 10246 if (err) { 10247 dev_err(hba->dev, "Memory allocation failed\n"); 10248 goto out_disable; 10249 } 10250 10251 /* Configure LRB */ 10252 ufshcd_host_memory_configure(hba); 10253 10254 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; 10255 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED; 10256 host->max_id = UFSHCD_MAX_ID; 10257 host->max_lun = UFS_MAX_LUNS; 10258 host->max_channel = UFSHCD_MAX_CHANNEL; 10259 host->unique_id = host->host_no; 10260 host->max_cmd_len = UFS_CDB_SIZE; 10261 host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING); 10262 10263 hba->max_pwr_info.is_valid = false; 10264 10265 /* Initialize work queues */ 10266 snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d", 10267 hba->host->host_no); 10268 hba->eh_wq = create_singlethread_workqueue(eh_wq_name); 10269 if (!hba->eh_wq) { 10270 dev_err(hba->dev, "%s: failed to create eh workqueue\n", 10271 __func__); 10272 err = -ENOMEM; 10273 goto out_disable; 10274 } 10275 INIT_WORK(&hba->eh_work, ufshcd_err_handler); 10276 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); 10277 10278 sema_init(&hba->host_sem, 1); 10279 10280 /* Initialize UIC command mutex */ 10281 mutex_init(&hba->uic_cmd_mutex); 10282 10283 /* Initialize mutex for device management commands */ 10284 mutex_init(&hba->dev_cmd.lock); 10285 10286 /* Initialize mutex for exception event control */ 10287 mutex_init(&hba->ee_ctrl_mutex); 10288 10289 mutex_init(&hba->wb_mutex); 10290 init_rwsem(&hba->clk_scaling_lock); 10291 10292 ufshcd_init_clk_gating(hba); 10293 10294 ufshcd_init_clk_scaling(hba); 10295 10296 /* 10297 * In order to avoid any spurious interrupt immediately after 10298 * registering UFS controller interrupt handler, clear any pending UFS 10299 * interrupt status and disable all the UFS interrupts. 10300 */ 10301 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), 10302 REG_INTERRUPT_STATUS); 10303 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); 10304 /* 10305 * Make sure that UFS interrupts are disabled and any pending interrupt 10306 * status is cleared before registering UFS interrupt handler. 10307 */ 10308 mb(); 10309 10310 /* IRQ registration */ 10311 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); 10312 if (err) { 10313 dev_err(hba->dev, "request irq failed\n"); 10314 goto out_disable; 10315 } else { 10316 hba->is_irq_enabled = true; 10317 } 10318 10319 if (!is_mcq_supported(hba)) { 10320 err = scsi_add_host(host, hba->dev); 10321 if (err) { 10322 dev_err(hba->dev, "scsi_add_host failed\n"); 10323 goto out_disable; 10324 } 10325 } 10326 10327 hba->tmf_tag_set = (struct blk_mq_tag_set) { 10328 .nr_hw_queues = 1, 10329 .queue_depth = hba->nutmrs, 10330 .ops = &ufshcd_tmf_ops, 10331 .flags = BLK_MQ_F_NO_SCHED, 10332 }; 10333 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); 10334 if (err < 0) 10335 goto out_remove_scsi_host; 10336 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); 10337 if (IS_ERR(hba->tmf_queue)) { 10338 err = PTR_ERR(hba->tmf_queue); 10339 goto free_tmf_tag_set; 10340 } 10341 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, 10342 sizeof(*hba->tmf_rqs), GFP_KERNEL); 10343 if (!hba->tmf_rqs) { 10344 err = -ENOMEM; 10345 goto free_tmf_queue; 10346 } 10347 10348 /* Reset the attached device */ 10349 ufshcd_device_reset(hba); 10350 10351 ufshcd_init_crypto(hba); 10352 10353 /* Host controller enable */ 10354 err = ufshcd_hba_enable(hba); 10355 if (err) { 10356 dev_err(hba->dev, "Host controller enable failed\n"); 10357 ufshcd_print_evt_hist(hba); 10358 ufshcd_print_host_state(hba); 10359 goto free_tmf_queue; 10360 } 10361 10362 /* 10363 * Set the default power management level for runtime and system PM. 10364 * Default power saving mode is to keep UFS link in Hibern8 state 10365 * and UFS device in sleep state. 10366 */ 10367 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( 10368 UFS_SLEEP_PWR_MODE, 10369 UIC_LINK_HIBERN8_STATE); 10370 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( 10371 UFS_SLEEP_PWR_MODE, 10372 UIC_LINK_HIBERN8_STATE); 10373 10374 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, 10375 ufshcd_rpm_dev_flush_recheck_work); 10376 10377 /* Set the default auto-hiberate idle timer value to 150 ms */ 10378 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) { 10379 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) | 10380 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); 10381 } 10382 10383 /* Hold auto suspend until async scan completes */ 10384 pm_runtime_get_sync(dev); 10385 atomic_set(&hba->scsi_block_reqs_cnt, 0); 10386 /* 10387 * We are assuming that device wasn't put in sleep/power-down 10388 * state exclusively during the boot stage before kernel. 10389 * This assumption helps avoid doing link startup twice during 10390 * ufshcd_probe_hba(). 10391 */ 10392 ufshcd_set_ufs_dev_active(hba); 10393 10394 async_schedule(ufshcd_async_scan, hba); 10395 ufs_sysfs_add_nodes(hba->dev); 10396 10397 device_enable_async_suspend(dev); 10398 return 0; 10399 10400 free_tmf_queue: 10401 blk_mq_destroy_queue(hba->tmf_queue); 10402 blk_put_queue(hba->tmf_queue); 10403 free_tmf_tag_set: 10404 blk_mq_free_tag_set(&hba->tmf_tag_set); 10405 out_remove_scsi_host: 10406 scsi_remove_host(hba->host); 10407 out_disable: 10408 hba->is_irq_enabled = false; 10409 ufshcd_hba_exit(hba); 10410 out_error: 10411 return err; 10412 } 10413 EXPORT_SYMBOL_GPL(ufshcd_init); 10414 10415 void ufshcd_resume_complete(struct device *dev) 10416 { 10417 struct ufs_hba *hba = dev_get_drvdata(dev); 10418 10419 if (hba->complete_put) { 10420 ufshcd_rpm_put(hba); 10421 hba->complete_put = false; 10422 } 10423 } 10424 EXPORT_SYMBOL_GPL(ufshcd_resume_complete); 10425 10426 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba) 10427 { 10428 struct device *dev = &hba->ufs_device_wlun->sdev_gendev; 10429 enum ufs_dev_pwr_mode dev_pwr_mode; 10430 enum uic_link_state link_state; 10431 unsigned long flags; 10432 bool res; 10433 10434 spin_lock_irqsave(&dev->power.lock, flags); 10435 dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl); 10436 link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl); 10437 res = pm_runtime_suspended(dev) && 10438 hba->curr_dev_pwr_mode == dev_pwr_mode && 10439 hba->uic_link_state == link_state && 10440 !hba->dev_info.b_rpm_dev_flush_capable; 10441 spin_unlock_irqrestore(&dev->power.lock, flags); 10442 10443 return res; 10444 } 10445 10446 int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm) 10447 { 10448 struct ufs_hba *hba = dev_get_drvdata(dev); 10449 int ret; 10450 10451 /* 10452 * SCSI assumes that runtime-pm and system-pm for scsi drivers 10453 * are same. And it doesn't wake up the device for system-suspend 10454 * if it's runtime suspended. But ufs doesn't follow that. 10455 * Refer ufshcd_resume_complete() 10456 */ 10457 if (hba->ufs_device_wlun) { 10458 /* Prevent runtime suspend */ 10459 ufshcd_rpm_get_noresume(hba); 10460 /* 10461 * Check if already runtime suspended in same state as system 10462 * suspend would be. 10463 */ 10464 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) { 10465 /* RPM state is not ok for SPM, so runtime resume */ 10466 ret = ufshcd_rpm_resume(hba); 10467 if (ret < 0 && ret != -EACCES) { 10468 ufshcd_rpm_put(hba); 10469 return ret; 10470 } 10471 } 10472 hba->complete_put = true; 10473 } 10474 return 0; 10475 } 10476 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare); 10477 10478 int ufshcd_suspend_prepare(struct device *dev) 10479 { 10480 return __ufshcd_suspend_prepare(dev, true); 10481 } 10482 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare); 10483 10484 #ifdef CONFIG_PM_SLEEP 10485 static int ufshcd_wl_poweroff(struct device *dev) 10486 { 10487 struct scsi_device *sdev = to_scsi_device(dev); 10488 struct ufs_hba *hba = shost_priv(sdev->host); 10489 10490 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); 10491 return 0; 10492 } 10493 #endif 10494 10495 static int ufshcd_wl_probe(struct device *dev) 10496 { 10497 struct scsi_device *sdev = to_scsi_device(dev); 10498 10499 if (!is_device_wlun(sdev)) 10500 return -ENODEV; 10501 10502 blk_pm_runtime_init(sdev->request_queue, dev); 10503 pm_runtime_set_autosuspend_delay(dev, 0); 10504 pm_runtime_allow(dev); 10505 10506 return 0; 10507 } 10508 10509 static int ufshcd_wl_remove(struct device *dev) 10510 { 10511 pm_runtime_forbid(dev); 10512 return 0; 10513 } 10514 10515 static const struct dev_pm_ops ufshcd_wl_pm_ops = { 10516 #ifdef CONFIG_PM_SLEEP 10517 .suspend = ufshcd_wl_suspend, 10518 .resume = ufshcd_wl_resume, 10519 .freeze = ufshcd_wl_suspend, 10520 .thaw = ufshcd_wl_resume, 10521 .poweroff = ufshcd_wl_poweroff, 10522 .restore = ufshcd_wl_resume, 10523 #endif 10524 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL) 10525 }; 10526 10527 static void ufshcd_check_header_layout(void) 10528 { 10529 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){ 10530 .cci = 3})[0] != 3); 10531 10532 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){ 10533 .ehs_length = 2})[1] != 2); 10534 10535 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){ 10536 .enable_crypto = 1})[2] 10537 != 0x80); 10538 10539 BUILD_BUG_ON((((u8 *)&(struct request_desc_header){ 10540 .command_type = 5, 10541 .data_direction = 3, 10542 .interrupt = 1, 10543 })[3]) != ((5 << 4) | (3 << 1) | 1)); 10544 10545 BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){ 10546 .dunl = cpu_to_le32(0xdeadbeef)})[1] != 10547 cpu_to_le32(0xdeadbeef)); 10548 10549 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){ 10550 .ocs = 4})[8] != 4); 10551 10552 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){ 10553 .cds = 5})[9] != 5); 10554 10555 BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){ 10556 .dunu = cpu_to_le32(0xbadcafe)})[3] != 10557 cpu_to_le32(0xbadcafe)); 10558 10559 BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){ 10560 .iid = 0xf })[4] != 0xf0); 10561 10562 BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){ 10563 .command_set_type = 0xf })[4] != 0xf); 10564 } 10565 10566 /* 10567 * ufs_dev_wlun_template - describes ufs device wlun 10568 * ufs-device wlun - used to send pm commands 10569 * All luns are consumers of ufs-device wlun. 10570 * 10571 * Currently, no sd driver is present for wluns. 10572 * Hence the no specific pm operations are performed. 10573 * With ufs design, SSU should be sent to ufs-device wlun. 10574 * Hence register a scsi driver for ufs wluns only. 10575 */ 10576 static struct scsi_driver ufs_dev_wlun_template = { 10577 .gendrv = { 10578 .name = "ufs_device_wlun", 10579 .owner = THIS_MODULE, 10580 .probe = ufshcd_wl_probe, 10581 .remove = ufshcd_wl_remove, 10582 .pm = &ufshcd_wl_pm_ops, 10583 .shutdown = ufshcd_wl_shutdown, 10584 }, 10585 }; 10586 10587 static int __init ufshcd_core_init(void) 10588 { 10589 int ret; 10590 10591 ufshcd_check_header_layout(); 10592 10593 ufs_debugfs_init(); 10594 10595 ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv); 10596 if (ret) 10597 ufs_debugfs_exit(); 10598 return ret; 10599 } 10600 10601 static void __exit ufshcd_core_exit(void) 10602 { 10603 ufs_debugfs_exit(); 10604 scsi_unregister_driver(&ufs_dev_wlun_template.gendrv); 10605 } 10606 10607 module_init(ufshcd_core_init); 10608 module_exit(ufshcd_core_exit); 10609 10610 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); 10611 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); 10612 MODULE_DESCRIPTION("Generic UFS host controller driver Core"); 10613 MODULE_SOFTDEP("pre: governor_simpleondemand"); 10614 MODULE_LICENSE("GPL"); 10615