1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. 4 * 5 */ 6 7 #include <linux/delay.h> 8 #include <linux/device.h> 9 #include <linux/dma-direction.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/interrupt.h> 12 #include <linux/list.h> 13 #include <linux/mhi.h> 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/wait.h> 17 #include "internal.h" 18 #include "trace.h" 19 20 /* 21 * Not all MHI state transitions are synchronous. Transitions like Linkdown, 22 * SYS_ERR, and shutdown can happen anytime asynchronously. This function will 23 * transition to a new state only if we're allowed to. 24 * 25 * Priority increases as we go down. For instance, from any state in L0, the 26 * transition can be made to states in L1, L2 and L3. A notable exception to 27 * this rule is state DISABLE. From DISABLE state we can only transition to 28 * POR state. Also, while in L2 state, user cannot jump back to previous 29 * L1 or L0 states. 30 * 31 * Valid transitions: 32 * L0: DISABLE <--> POR 33 * POR <--> POR 34 * POR -> M0 -> M2 --> M0 35 * POR -> FW_DL_ERR 36 * FW_DL_ERR <--> FW_DL_ERR 37 * M0 <--> M0 38 * M0 -> FW_DL_ERR 39 * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 40 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS 41 * SYS_ERR_PROCESS -> SYS_ERR_FAIL 42 * SYS_ERR_FAIL -> SYS_ERR_DETECT 43 * SYS_ERR_PROCESS --> POR 44 * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT 45 * SHUTDOWN_PROCESS -> DISABLE 46 * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT 47 * LD_ERR_FATAL_DETECT -> DISABLE 48 */ 49 static const struct mhi_pm_transitions dev_state_transitions[] = { 50 /* L0 States */ 51 { 52 MHI_PM_DISABLE, 53 MHI_PM_POR 54 }, 55 { 56 MHI_PM_POR, 57 MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | 58 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | 59 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR 60 }, 61 { 62 MHI_PM_M0, 63 MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER | 64 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | 65 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR 66 }, 67 { 68 MHI_PM_M2, 69 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | 70 MHI_PM_LD_ERR_FATAL_DETECT 71 }, 72 { 73 MHI_PM_M3_ENTER, 74 MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | 75 MHI_PM_LD_ERR_FATAL_DETECT 76 }, 77 { 78 MHI_PM_M3, 79 MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | 80 MHI_PM_LD_ERR_FATAL_DETECT 81 }, 82 { 83 MHI_PM_M3_EXIT, 84 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | 85 MHI_PM_LD_ERR_FATAL_DETECT 86 }, 87 { 88 MHI_PM_FW_DL_ERR, 89 MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | 90 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT 91 }, 92 /* L1 States */ 93 { 94 MHI_PM_SYS_ERR_DETECT, 95 MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | 96 MHI_PM_LD_ERR_FATAL_DETECT 97 }, 98 { 99 MHI_PM_SYS_ERR_PROCESS, 100 MHI_PM_POR | MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS | 101 MHI_PM_LD_ERR_FATAL_DETECT 102 }, 103 { 104 MHI_PM_SYS_ERR_FAIL, 105 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | 106 MHI_PM_LD_ERR_FATAL_DETECT 107 }, 108 /* L2 States */ 109 { 110 MHI_PM_SHUTDOWN_PROCESS, 111 MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT 112 }, 113 /* L3 States */ 114 { 115 MHI_PM_LD_ERR_FATAL_DETECT, 116 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE 117 }, 118 }; 119 120 enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl, 121 enum mhi_pm_state state) 122 { 123 unsigned long cur_state = mhi_cntrl->pm_state; 124 int index = find_last_bit(&cur_state, 32); 125 126 if (unlikely(index >= ARRAY_SIZE(dev_state_transitions))) 127 return cur_state; 128 129 if (unlikely(dev_state_transitions[index].from_state != cur_state)) 130 return cur_state; 131 132 if (unlikely(!(dev_state_transitions[index].to_states & state))) 133 return cur_state; 134 135 trace_mhi_tryset_pm_state(mhi_cntrl, state); 136 mhi_cntrl->pm_state = state; 137 return mhi_cntrl->pm_state; 138 } 139 140 void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) 141 { 142 struct device *dev = &mhi_cntrl->mhi_dev->dev; 143 int ret; 144 145 if (state == MHI_STATE_RESET) { 146 ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, 147 MHICTRL_RESET_MASK, 1); 148 } else { 149 ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, 150 MHICTRL_MHISTATE_MASK, state); 151 } 152 153 if (ret) 154 dev_err(dev, "Failed to set MHI state to: %s\n", 155 mhi_state_str(state)); 156 } 157 158 /* NOP for backward compatibility, host allowed to ring DB in M2 state */ 159 static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl) 160 { 161 } 162 163 static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl) 164 { 165 mhi_cntrl->wake_get(mhi_cntrl, false); 166 mhi_cntrl->wake_put(mhi_cntrl, true); 167 } 168 169 /* Handle device ready state transition */ 170 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) 171 { 172 struct mhi_event *mhi_event; 173 enum mhi_pm_state cur_state; 174 struct device *dev = &mhi_cntrl->mhi_dev->dev; 175 u32 interval_us = 25000; /* poll register field every 25 milliseconds */ 176 u32 timeout_ms; 177 int ret, i; 178 179 /* Check if device entered error state */ 180 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { 181 dev_err(dev, "Device link is not accessible\n"); 182 return -EIO; 183 } 184 185 /* Wait for RESET to be cleared and READY bit to be set by the device */ 186 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, 187 MHICTRL_RESET_MASK, 0, interval_us, 188 mhi_cntrl->timeout_ms); 189 if (ret) { 190 dev_err(dev, "Device failed to clear MHI Reset\n"); 191 return ret; 192 } 193 194 timeout_ms = mhi_cntrl->ready_timeout_ms ? 195 mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; 196 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, 197 MHISTATUS_READY_MASK, 1, interval_us, 198 timeout_ms); 199 if (ret) { 200 dev_err(dev, "Device failed to enter MHI Ready\n"); 201 return ret; 202 } 203 204 dev_dbg(dev, "Device in READY State\n"); 205 write_lock_irq(&mhi_cntrl->pm_lock); 206 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); 207 mhi_cntrl->dev_state = MHI_STATE_READY; 208 write_unlock_irq(&mhi_cntrl->pm_lock); 209 210 if (cur_state != MHI_PM_POR) { 211 dev_err(dev, "Error moving to state %s from %s\n", 212 to_mhi_pm_state_str(MHI_PM_POR), 213 to_mhi_pm_state_str(cur_state)); 214 return -EIO; 215 } 216 217 read_lock_bh(&mhi_cntrl->pm_lock); 218 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { 219 dev_err(dev, "Device registers not accessible\n"); 220 goto error_mmio; 221 } 222 223 /* Configure MMIO registers */ 224 ret = mhi_init_mmio(mhi_cntrl); 225 if (ret) { 226 dev_err(dev, "Error configuring MMIO registers\n"); 227 goto error_mmio; 228 } 229 230 /* Add elements to all SW event rings */ 231 mhi_event = mhi_cntrl->mhi_event; 232 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 233 struct mhi_ring *ring = &mhi_event->ring; 234 235 /* Skip if this is an offload or HW event */ 236 if (mhi_event->offload_ev || mhi_event->hw_ring) 237 continue; 238 239 ring->wp = ring->base + ring->len - ring->el_size; 240 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); 241 /* Update all cores */ 242 smp_wmb(); 243 244 /* Ring the event ring db */ 245 spin_lock_irq(&mhi_event->lock); 246 mhi_ring_er_db(mhi_event); 247 spin_unlock_irq(&mhi_event->lock); 248 } 249 250 /* Set MHI to M0 state */ 251 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); 252 read_unlock_bh(&mhi_cntrl->pm_lock); 253 254 return 0; 255 256 error_mmio: 257 read_unlock_bh(&mhi_cntrl->pm_lock); 258 259 return -EIO; 260 } 261 262 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) 263 { 264 enum mhi_pm_state cur_state; 265 struct mhi_chan *mhi_chan; 266 struct device *dev = &mhi_cntrl->mhi_dev->dev; 267 int i; 268 269 write_lock_irq(&mhi_cntrl->pm_lock); 270 mhi_cntrl->dev_state = MHI_STATE_M0; 271 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); 272 write_unlock_irq(&mhi_cntrl->pm_lock); 273 if (unlikely(cur_state != MHI_PM_M0)) { 274 dev_err(dev, "Unable to transition to M0 state\n"); 275 return -EIO; 276 } 277 mhi_cntrl->M0++; 278 279 /* Wake up the device */ 280 read_lock_bh(&mhi_cntrl->pm_lock); 281 mhi_cntrl->wake_get(mhi_cntrl, true); 282 283 /* Ring all event rings and CMD ring only if we're in mission mode */ 284 if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { 285 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; 286 struct mhi_cmd *mhi_cmd = 287 &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; 288 289 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 290 if (mhi_event->offload_ev) 291 continue; 292 293 spin_lock_irq(&mhi_event->lock); 294 mhi_ring_er_db(mhi_event); 295 spin_unlock_irq(&mhi_event->lock); 296 } 297 298 /* Only ring primary cmd ring if ring is not empty */ 299 spin_lock_irq(&mhi_cmd->lock); 300 if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) 301 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); 302 spin_unlock_irq(&mhi_cmd->lock); 303 } 304 305 /* Ring channel DB registers */ 306 mhi_chan = mhi_cntrl->mhi_chan; 307 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { 308 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; 309 310 if (mhi_chan->db_cfg.reset_req) { 311 write_lock_irq(&mhi_chan->lock); 312 mhi_chan->db_cfg.db_mode = true; 313 write_unlock_irq(&mhi_chan->lock); 314 } 315 316 read_lock_irq(&mhi_chan->lock); 317 318 /* Only ring DB if ring is not empty */ 319 if (tre_ring->base && tre_ring->wp != tre_ring->rp && 320 mhi_chan->ch_state == MHI_CH_STATE_ENABLED) 321 mhi_ring_chan_db(mhi_cntrl, mhi_chan); 322 read_unlock_irq(&mhi_chan->lock); 323 } 324 325 mhi_cntrl->wake_put(mhi_cntrl, false); 326 read_unlock_bh(&mhi_cntrl->pm_lock); 327 wake_up_all(&mhi_cntrl->state_event); 328 329 return 0; 330 } 331 332 /* 333 * After receiving the MHI state change event from the device indicating the 334 * transition to M1 state, the host can transition the device to M2 state 335 * for keeping it in low power state. 336 */ 337 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) 338 { 339 enum mhi_pm_state state; 340 struct device *dev = &mhi_cntrl->mhi_dev->dev; 341 342 write_lock_irq(&mhi_cntrl->pm_lock); 343 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); 344 if (state == MHI_PM_M2) { 345 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); 346 mhi_cntrl->dev_state = MHI_STATE_M2; 347 348 write_unlock_irq(&mhi_cntrl->pm_lock); 349 350 mhi_cntrl->M2++; 351 wake_up_all(&mhi_cntrl->state_event); 352 353 /* If there are any pending resources, exit M2 immediately */ 354 if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) || 355 atomic_read(&mhi_cntrl->dev_wake))) { 356 dev_dbg(dev, 357 "Exiting M2, pending_pkts: %d dev_wake: %d\n", 358 atomic_read(&mhi_cntrl->pending_pkts), 359 atomic_read(&mhi_cntrl->dev_wake)); 360 read_lock_bh(&mhi_cntrl->pm_lock); 361 mhi_cntrl->wake_get(mhi_cntrl, true); 362 mhi_cntrl->wake_put(mhi_cntrl, true); 363 read_unlock_bh(&mhi_cntrl->pm_lock); 364 } else { 365 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE); 366 } 367 } else { 368 write_unlock_irq(&mhi_cntrl->pm_lock); 369 } 370 } 371 372 /* MHI M3 completion handler */ 373 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) 374 { 375 enum mhi_pm_state state; 376 struct device *dev = &mhi_cntrl->mhi_dev->dev; 377 378 write_lock_irq(&mhi_cntrl->pm_lock); 379 mhi_cntrl->dev_state = MHI_STATE_M3; 380 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); 381 write_unlock_irq(&mhi_cntrl->pm_lock); 382 if (state != MHI_PM_M3) { 383 dev_err(dev, "Unable to transition to M3 state\n"); 384 return -EIO; 385 } 386 387 mhi_cntrl->M3++; 388 wake_up_all(&mhi_cntrl->state_event); 389 390 return 0; 391 } 392 393 /* Handle device Mission Mode transition */ 394 static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) 395 { 396 struct mhi_event *mhi_event; 397 struct device *dev = &mhi_cntrl->mhi_dev->dev; 398 enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee; 399 int i, ret; 400 401 dev_dbg(dev, "Processing Mission Mode transition\n"); 402 403 write_lock_irq(&mhi_cntrl->pm_lock); 404 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) 405 ee = mhi_get_exec_env(mhi_cntrl); 406 407 if (!MHI_IN_MISSION_MODE(ee)) { 408 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; 409 write_unlock_irq(&mhi_cntrl->pm_lock); 410 wake_up_all(&mhi_cntrl->state_event); 411 return -EIO; 412 } 413 mhi_cntrl->ee = ee; 414 write_unlock_irq(&mhi_cntrl->pm_lock); 415 416 wake_up_all(&mhi_cntrl->state_event); 417 418 device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee, 419 mhi_destroy_device); 420 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); 421 422 /* Force MHI to be in M0 state before continuing */ 423 ret = __mhi_device_get_sync(mhi_cntrl); 424 if (ret) 425 return ret; 426 427 read_lock_bh(&mhi_cntrl->pm_lock); 428 429 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { 430 ret = -EIO; 431 goto error_mission_mode; 432 } 433 434 /* Add elements to all HW event rings */ 435 mhi_event = mhi_cntrl->mhi_event; 436 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 437 struct mhi_ring *ring = &mhi_event->ring; 438 439 if (mhi_event->offload_ev || !mhi_event->hw_ring) 440 continue; 441 442 ring->wp = ring->base + ring->len - ring->el_size; 443 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); 444 /* Update to all cores */ 445 smp_wmb(); 446 447 spin_lock_irq(&mhi_event->lock); 448 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) 449 mhi_ring_er_db(mhi_event); 450 spin_unlock_irq(&mhi_event->lock); 451 } 452 453 read_unlock_bh(&mhi_cntrl->pm_lock); 454 455 /* 456 * The MHI devices are only created when the client device switches its 457 * Execution Environment (EE) to either SBL or AMSS states 458 */ 459 mhi_create_devices(mhi_cntrl); 460 461 read_lock_bh(&mhi_cntrl->pm_lock); 462 463 error_mission_mode: 464 mhi_cntrl->wake_put(mhi_cntrl, false); 465 read_unlock_bh(&mhi_cntrl->pm_lock); 466 467 return ret; 468 } 469 470 /* Handle shutdown transitions */ 471 static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) 472 { 473 enum mhi_pm_state cur_state; 474 struct mhi_event *mhi_event; 475 struct mhi_cmd_ctxt *cmd_ctxt; 476 struct mhi_cmd *mhi_cmd; 477 struct mhi_event_ctxt *er_ctxt; 478 struct device *dev = &mhi_cntrl->mhi_dev->dev; 479 int ret, i; 480 481 dev_dbg(dev, "Processing disable transition with PM state: %s\n", 482 to_mhi_pm_state_str(mhi_cntrl->pm_state)); 483 484 mutex_lock(&mhi_cntrl->pm_mutex); 485 486 /* Trigger MHI RESET so that the device will not access host memory */ 487 if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { 488 /* Skip MHI RESET if in RDDM state */ 489 if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM) 490 goto skip_mhi_reset; 491 492 dev_dbg(dev, "Triggering MHI Reset in device\n"); 493 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); 494 495 /* Wait for the reset bit to be cleared by the device */ 496 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, 497 MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms); 498 if (ret) 499 dev_err(dev, "Device failed to clear MHI Reset\n"); 500 501 /* 502 * Device will clear BHI_INTVEC as a part of RESET processing, 503 * hence re-program it 504 */ 505 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); 506 507 if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { 508 /* wait for ready to be set */ 509 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, 510 MHISTATUS, MHISTATUS_READY_MASK, 511 1, 25000, mhi_cntrl->timeout_ms); 512 if (ret) 513 dev_err(dev, "Device failed to enter READY state\n"); 514 } 515 } 516 517 skip_mhi_reset: 518 dev_dbg(dev, 519 "Waiting for all pending event ring processing to complete\n"); 520 mhi_event = mhi_cntrl->mhi_event; 521 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 522 if (mhi_event->offload_ev) 523 continue; 524 disable_irq(mhi_cntrl->irq[mhi_event->irq]); 525 tasklet_kill(&mhi_event->task); 526 } 527 528 /* Release lock and wait for all pending threads to complete */ 529 mutex_unlock(&mhi_cntrl->pm_mutex); 530 dev_dbg(dev, "Waiting for all pending threads to complete\n"); 531 wake_up_all(&mhi_cntrl->state_event); 532 533 dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); 534 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); 535 536 mutex_lock(&mhi_cntrl->pm_mutex); 537 538 WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); 539 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); 540 541 /* Reset the ev rings and cmd rings */ 542 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); 543 mhi_cmd = mhi_cntrl->mhi_cmd; 544 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; 545 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { 546 struct mhi_ring *ring = &mhi_cmd->ring; 547 548 ring->rp = ring->base; 549 ring->wp = ring->base; 550 cmd_ctxt->rp = cmd_ctxt->rbase; 551 cmd_ctxt->wp = cmd_ctxt->rbase; 552 } 553 554 mhi_event = mhi_cntrl->mhi_event; 555 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; 556 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, 557 mhi_event++) { 558 struct mhi_ring *ring = &mhi_event->ring; 559 560 /* Skip offload events */ 561 if (mhi_event->offload_ev) 562 continue; 563 564 ring->rp = ring->base; 565 ring->wp = ring->base; 566 er_ctxt->rp = er_ctxt->rbase; 567 er_ctxt->wp = er_ctxt->rbase; 568 } 569 570 /* Move to disable state */ 571 write_lock_irq(&mhi_cntrl->pm_lock); 572 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); 573 write_unlock_irq(&mhi_cntrl->pm_lock); 574 if (unlikely(cur_state != MHI_PM_DISABLE)) 575 dev_err(dev, "Error moving from PM state: %s to: %s\n", 576 to_mhi_pm_state_str(cur_state), 577 to_mhi_pm_state_str(MHI_PM_DISABLE)); 578 579 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", 580 to_mhi_pm_state_str(mhi_cntrl->pm_state), 581 mhi_state_str(mhi_cntrl->dev_state)); 582 583 mutex_unlock(&mhi_cntrl->pm_mutex); 584 } 585 586 /* Handle system error transitions */ 587 static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) 588 { 589 enum mhi_pm_state cur_state, prev_state; 590 enum dev_st_transition next_state; 591 struct mhi_event *mhi_event; 592 struct mhi_cmd_ctxt *cmd_ctxt; 593 struct mhi_cmd *mhi_cmd; 594 struct mhi_event_ctxt *er_ctxt; 595 struct device *dev = &mhi_cntrl->mhi_dev->dev; 596 int ret, i; 597 598 dev_dbg(dev, "Transitioning from PM state: %s to: %s\n", 599 to_mhi_pm_state_str(mhi_cntrl->pm_state), 600 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); 601 602 /* We must notify MHI control driver so it can clean up first */ 603 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); 604 605 mutex_lock(&mhi_cntrl->pm_mutex); 606 write_lock_irq(&mhi_cntrl->pm_lock); 607 prev_state = mhi_cntrl->pm_state; 608 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); 609 write_unlock_irq(&mhi_cntrl->pm_lock); 610 611 if (cur_state != MHI_PM_SYS_ERR_PROCESS) { 612 dev_err(dev, "Failed to transition from PM state: %s to: %s\n", 613 to_mhi_pm_state_str(cur_state), 614 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); 615 goto exit_sys_error_transition; 616 } 617 618 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; 619 mhi_cntrl->dev_state = MHI_STATE_RESET; 620 621 /* Wake up threads waiting for state transition */ 622 wake_up_all(&mhi_cntrl->state_event); 623 624 /* Trigger MHI RESET so that the device will not access host memory */ 625 if (MHI_REG_ACCESS_VALID(prev_state)) { 626 u32 in_reset = -1; 627 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); 628 629 dev_dbg(dev, "Triggering MHI Reset in device\n"); 630 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); 631 632 /* Wait for the reset bit to be cleared by the device */ 633 ret = wait_event_timeout(mhi_cntrl->state_event, 634 mhi_read_reg_field(mhi_cntrl, 635 mhi_cntrl->regs, 636 MHICTRL, 637 MHICTRL_RESET_MASK, 638 &in_reset) || 639 !in_reset, timeout); 640 if (!ret || in_reset) { 641 dev_err(dev, "Device failed to exit MHI Reset state\n"); 642 write_lock_irq(&mhi_cntrl->pm_lock); 643 cur_state = mhi_tryset_pm_state(mhi_cntrl, 644 MHI_PM_SYS_ERR_FAIL); 645 write_unlock_irq(&mhi_cntrl->pm_lock); 646 /* Shutdown may have occurred, otherwise cleanup now */ 647 if (cur_state != MHI_PM_SYS_ERR_FAIL) 648 goto exit_sys_error_transition; 649 } 650 651 /* 652 * Device will clear BHI_INTVEC as a part of RESET processing, 653 * hence re-program it 654 */ 655 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); 656 } 657 658 dev_dbg(dev, 659 "Waiting for all pending event ring processing to complete\n"); 660 mhi_event = mhi_cntrl->mhi_event; 661 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 662 if (mhi_event->offload_ev) 663 continue; 664 tasklet_kill(&mhi_event->task); 665 } 666 667 /* Release lock and wait for all pending threads to complete */ 668 mutex_unlock(&mhi_cntrl->pm_mutex); 669 dev_dbg(dev, "Waiting for all pending threads to complete\n"); 670 wake_up_all(&mhi_cntrl->state_event); 671 672 dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); 673 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); 674 675 mutex_lock(&mhi_cntrl->pm_mutex); 676 677 WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); 678 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); 679 680 /* Reset the ev rings and cmd rings */ 681 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); 682 mhi_cmd = mhi_cntrl->mhi_cmd; 683 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; 684 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { 685 struct mhi_ring *ring = &mhi_cmd->ring; 686 687 ring->rp = ring->base; 688 ring->wp = ring->base; 689 cmd_ctxt->rp = cmd_ctxt->rbase; 690 cmd_ctxt->wp = cmd_ctxt->rbase; 691 } 692 693 mhi_event = mhi_cntrl->mhi_event; 694 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; 695 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, 696 mhi_event++) { 697 struct mhi_ring *ring = &mhi_event->ring; 698 699 /* Skip offload events */ 700 if (mhi_event->offload_ev) 701 continue; 702 703 ring->rp = ring->base; 704 ring->wp = ring->base; 705 er_ctxt->rp = er_ctxt->rbase; 706 er_ctxt->wp = er_ctxt->rbase; 707 } 708 709 /* Transition to next state */ 710 if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { 711 write_lock_irq(&mhi_cntrl->pm_lock); 712 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); 713 write_unlock_irq(&mhi_cntrl->pm_lock); 714 if (cur_state != MHI_PM_POR) { 715 dev_err(dev, "Error moving to state %s from %s\n", 716 to_mhi_pm_state_str(MHI_PM_POR), 717 to_mhi_pm_state_str(cur_state)); 718 goto exit_sys_error_transition; 719 } 720 next_state = DEV_ST_TRANSITION_PBL; 721 } else { 722 next_state = DEV_ST_TRANSITION_READY; 723 } 724 725 mhi_queue_state_transition(mhi_cntrl, next_state); 726 727 exit_sys_error_transition: 728 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", 729 to_mhi_pm_state_str(mhi_cntrl->pm_state), 730 mhi_state_str(mhi_cntrl->dev_state)); 731 732 mutex_unlock(&mhi_cntrl->pm_mutex); 733 } 734 735 /* Queue a new work item and schedule work */ 736 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, 737 enum dev_st_transition state) 738 { 739 struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); 740 unsigned long flags; 741 742 if (!item) 743 return -ENOMEM; 744 745 item->state = state; 746 spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); 747 list_add_tail(&item->node, &mhi_cntrl->transition_list); 748 spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); 749 750 queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker); 751 752 return 0; 753 } 754 755 /* SYS_ERR worker */ 756 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl) 757 { 758 struct device *dev = &mhi_cntrl->mhi_dev->dev; 759 760 /* skip if controller supports RDDM */ 761 if (mhi_cntrl->rddm_image) { 762 dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n"); 763 return; 764 } 765 766 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR); 767 } 768 769 /* Device State Transition worker */ 770 void mhi_pm_st_worker(struct work_struct *work) 771 { 772 struct state_transition *itr, *tmp; 773 LIST_HEAD(head); 774 struct mhi_controller *mhi_cntrl = container_of(work, 775 struct mhi_controller, 776 st_worker); 777 778 spin_lock_irq(&mhi_cntrl->transition_lock); 779 list_splice_tail_init(&mhi_cntrl->transition_list, &head); 780 spin_unlock_irq(&mhi_cntrl->transition_lock); 781 782 list_for_each_entry_safe(itr, tmp, &head, node) { 783 list_del(&itr->node); 784 785 trace_mhi_pm_st_transition(mhi_cntrl, itr->state); 786 787 switch (itr->state) { 788 case DEV_ST_TRANSITION_PBL: 789 write_lock_irq(&mhi_cntrl->pm_lock); 790 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) 791 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); 792 write_unlock_irq(&mhi_cntrl->pm_lock); 793 mhi_fw_load_handler(mhi_cntrl); 794 break; 795 case DEV_ST_TRANSITION_SBL: 796 write_lock_irq(&mhi_cntrl->pm_lock); 797 mhi_cntrl->ee = MHI_EE_SBL; 798 write_unlock_irq(&mhi_cntrl->pm_lock); 799 /* 800 * The MHI devices are only created when the client 801 * device switches its Execution Environment (EE) to 802 * either SBL or AMSS states 803 */ 804 mhi_create_devices(mhi_cntrl); 805 if (mhi_cntrl->fbc_download) 806 mhi_download_amss_image(mhi_cntrl); 807 break; 808 case DEV_ST_TRANSITION_MISSION_MODE: 809 mhi_pm_mission_mode_transition(mhi_cntrl); 810 break; 811 case DEV_ST_TRANSITION_FP: 812 write_lock_irq(&mhi_cntrl->pm_lock); 813 mhi_cntrl->ee = MHI_EE_FP; 814 write_unlock_irq(&mhi_cntrl->pm_lock); 815 mhi_create_devices(mhi_cntrl); 816 break; 817 case DEV_ST_TRANSITION_READY: 818 mhi_ready_state_transition(mhi_cntrl); 819 break; 820 case DEV_ST_TRANSITION_SYS_ERR: 821 mhi_pm_sys_error_transition(mhi_cntrl); 822 break; 823 case DEV_ST_TRANSITION_DISABLE: 824 mhi_pm_disable_transition(mhi_cntrl); 825 break; 826 default: 827 break; 828 } 829 kfree(itr); 830 } 831 } 832 833 int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) 834 { 835 struct mhi_chan *itr, *tmp; 836 struct device *dev = &mhi_cntrl->mhi_dev->dev; 837 enum mhi_pm_state new_state; 838 int ret; 839 840 if (mhi_cntrl->pm_state == MHI_PM_DISABLE) 841 return -EINVAL; 842 843 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) 844 return -EIO; 845 846 /* Return busy if there are any pending resources */ 847 if (atomic_read(&mhi_cntrl->dev_wake) || 848 atomic_read(&mhi_cntrl->pending_pkts)) 849 return -EBUSY; 850 851 /* Take MHI out of M2 state */ 852 read_lock_bh(&mhi_cntrl->pm_lock); 853 mhi_cntrl->wake_get(mhi_cntrl, false); 854 read_unlock_bh(&mhi_cntrl->pm_lock); 855 856 ret = wait_event_timeout(mhi_cntrl->state_event, 857 mhi_cntrl->dev_state == MHI_STATE_M0 || 858 mhi_cntrl->dev_state == MHI_STATE_M1 || 859 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), 860 msecs_to_jiffies(mhi_cntrl->timeout_ms)); 861 862 read_lock_bh(&mhi_cntrl->pm_lock); 863 mhi_cntrl->wake_put(mhi_cntrl, false); 864 read_unlock_bh(&mhi_cntrl->pm_lock); 865 866 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { 867 dev_err(dev, 868 "Could not enter M0/M1 state"); 869 return -EIO; 870 } 871 872 write_lock_irq(&mhi_cntrl->pm_lock); 873 874 if (atomic_read(&mhi_cntrl->dev_wake) || 875 atomic_read(&mhi_cntrl->pending_pkts)) { 876 write_unlock_irq(&mhi_cntrl->pm_lock); 877 return -EBUSY; 878 } 879 880 dev_dbg(dev, "Allowing M3 transition\n"); 881 new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); 882 if (new_state != MHI_PM_M3_ENTER) { 883 write_unlock_irq(&mhi_cntrl->pm_lock); 884 dev_err(dev, 885 "Error setting to PM state: %s from: %s\n", 886 to_mhi_pm_state_str(MHI_PM_M3_ENTER), 887 to_mhi_pm_state_str(mhi_cntrl->pm_state)); 888 return -EIO; 889 } 890 891 /* Set MHI to M3 and wait for completion */ 892 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); 893 write_unlock_irq(&mhi_cntrl->pm_lock); 894 dev_dbg(dev, "Waiting for M3 completion\n"); 895 896 ret = wait_event_timeout(mhi_cntrl->state_event, 897 mhi_cntrl->dev_state == MHI_STATE_M3 || 898 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), 899 msecs_to_jiffies(mhi_cntrl->timeout_ms)); 900 901 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { 902 dev_err(dev, 903 "Did not enter M3 state, MHI state: %s, PM state: %s\n", 904 mhi_state_str(mhi_cntrl->dev_state), 905 to_mhi_pm_state_str(mhi_cntrl->pm_state)); 906 return -EIO; 907 } 908 909 /* Notify clients about entering LPM */ 910 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { 911 mutex_lock(&itr->mutex); 912 if (itr->mhi_dev) 913 mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); 914 mutex_unlock(&itr->mutex); 915 } 916 917 return 0; 918 } 919 EXPORT_SYMBOL_GPL(mhi_pm_suspend); 920 921 static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force) 922 { 923 struct mhi_chan *itr, *tmp; 924 struct device *dev = &mhi_cntrl->mhi_dev->dev; 925 enum mhi_pm_state cur_state; 926 int ret; 927 928 dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n", 929 to_mhi_pm_state_str(mhi_cntrl->pm_state), 930 mhi_state_str(mhi_cntrl->dev_state)); 931 932 if (mhi_cntrl->pm_state == MHI_PM_DISABLE) 933 return 0; 934 935 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) 936 return -EIO; 937 938 if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) { 939 dev_warn(dev, "Resuming from non M3 state (%s)\n", 940 mhi_state_str(mhi_get_mhi_state(mhi_cntrl))); 941 if (!force) 942 return -EINVAL; 943 } 944 945 /* Notify clients about exiting LPM */ 946 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { 947 mutex_lock(&itr->mutex); 948 if (itr->mhi_dev) 949 mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); 950 mutex_unlock(&itr->mutex); 951 } 952 953 write_lock_irq(&mhi_cntrl->pm_lock); 954 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); 955 if (cur_state != MHI_PM_M3_EXIT) { 956 write_unlock_irq(&mhi_cntrl->pm_lock); 957 dev_info(dev, 958 "Error setting to PM state: %s from: %s\n", 959 to_mhi_pm_state_str(MHI_PM_M3_EXIT), 960 to_mhi_pm_state_str(mhi_cntrl->pm_state)); 961 return -EIO; 962 } 963 964 /* Set MHI to M0 and wait for completion */ 965 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); 966 write_unlock_irq(&mhi_cntrl->pm_lock); 967 968 ret = wait_event_timeout(mhi_cntrl->state_event, 969 mhi_cntrl->dev_state == MHI_STATE_M0 || 970 mhi_cntrl->dev_state == MHI_STATE_M2 || 971 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), 972 msecs_to_jiffies(mhi_cntrl->timeout_ms)); 973 974 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { 975 dev_err(dev, 976 "Did not enter M0 state, MHI state: %s, PM state: %s\n", 977 mhi_state_str(mhi_cntrl->dev_state), 978 to_mhi_pm_state_str(mhi_cntrl->pm_state)); 979 return -EIO; 980 } 981 982 return 0; 983 } 984 985 int mhi_pm_resume(struct mhi_controller *mhi_cntrl) 986 { 987 return __mhi_pm_resume(mhi_cntrl, false); 988 } 989 EXPORT_SYMBOL_GPL(mhi_pm_resume); 990 991 int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl) 992 { 993 return __mhi_pm_resume(mhi_cntrl, true); 994 } 995 EXPORT_SYMBOL_GPL(mhi_pm_resume_force); 996 997 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) 998 { 999 int ret; 1000 1001 /* Wake up the device */ 1002 read_lock_bh(&mhi_cntrl->pm_lock); 1003 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { 1004 read_unlock_bh(&mhi_cntrl->pm_lock); 1005 return -EIO; 1006 } 1007 mhi_cntrl->wake_get(mhi_cntrl, true); 1008 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) 1009 mhi_trigger_resume(mhi_cntrl); 1010 read_unlock_bh(&mhi_cntrl->pm_lock); 1011 1012 ret = wait_event_timeout(mhi_cntrl->state_event, 1013 mhi_cntrl->pm_state == MHI_PM_M0 || 1014 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), 1015 msecs_to_jiffies(mhi_cntrl->timeout_ms)); 1016 1017 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { 1018 read_lock_bh(&mhi_cntrl->pm_lock); 1019 mhi_cntrl->wake_put(mhi_cntrl, false); 1020 read_unlock_bh(&mhi_cntrl->pm_lock); 1021 return -EIO; 1022 } 1023 1024 return 0; 1025 } 1026 1027 /* Assert device wake db */ 1028 static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) 1029 { 1030 unsigned long flags; 1031 1032 /* 1033 * If force flag is set, then increment the wake count value and 1034 * ring wake db 1035 */ 1036 if (unlikely(force)) { 1037 spin_lock_irqsave(&mhi_cntrl->wlock, flags); 1038 atomic_inc(&mhi_cntrl->dev_wake); 1039 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && 1040 !mhi_cntrl->wake_set) { 1041 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); 1042 mhi_cntrl->wake_set = true; 1043 } 1044 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); 1045 } else { 1046 /* 1047 * If resources are already requested, then just increment 1048 * the wake count value and return 1049 */ 1050 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) 1051 return; 1052 1053 spin_lock_irqsave(&mhi_cntrl->wlock, flags); 1054 if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && 1055 MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && 1056 !mhi_cntrl->wake_set) { 1057 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); 1058 mhi_cntrl->wake_set = true; 1059 } 1060 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); 1061 } 1062 } 1063 1064 /* De-assert device wake db */ 1065 static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, 1066 bool override) 1067 { 1068 unsigned long flags; 1069 1070 /* 1071 * Only continue if there is a single resource, else just decrement 1072 * and return 1073 */ 1074 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) 1075 return; 1076 1077 spin_lock_irqsave(&mhi_cntrl->wlock, flags); 1078 if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && 1079 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && 1080 mhi_cntrl->wake_set) { 1081 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); 1082 mhi_cntrl->wake_set = false; 1083 } 1084 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); 1085 } 1086 1087 int mhi_async_power_up(struct mhi_controller *mhi_cntrl) 1088 { 1089 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; 1090 enum mhi_state state; 1091 enum mhi_ee_type current_ee; 1092 enum dev_st_transition next_state; 1093 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1094 u32 interval_us = 25000; /* poll register field every 25 milliseconds */ 1095 int ret, i; 1096 1097 dev_info(dev, "Requested to power ON\n"); 1098 1099 /* Supply default wake routines if not provided by controller driver */ 1100 if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put || 1101 !mhi_cntrl->wake_toggle) { 1102 mhi_cntrl->wake_get = mhi_assert_dev_wake; 1103 mhi_cntrl->wake_put = mhi_deassert_dev_wake; 1104 mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ? 1105 mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake; 1106 } 1107 1108 mutex_lock(&mhi_cntrl->pm_mutex); 1109 mhi_cntrl->pm_state = MHI_PM_DISABLE; 1110 1111 /* Setup BHI INTVEC */ 1112 write_lock_irq(&mhi_cntrl->pm_lock); 1113 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); 1114 mhi_cntrl->pm_state = MHI_PM_POR; 1115 mhi_cntrl->ee = MHI_EE_MAX; 1116 current_ee = mhi_get_exec_env(mhi_cntrl); 1117 write_unlock_irq(&mhi_cntrl->pm_lock); 1118 1119 /* Confirm that the device is in valid exec env */ 1120 if (!MHI_POWER_UP_CAPABLE(current_ee)) { 1121 dev_err(dev, "%s is not a valid EE for power on\n", 1122 TO_MHI_EXEC_STR(current_ee)); 1123 ret = -EIO; 1124 goto error_exit; 1125 } 1126 1127 state = mhi_get_mhi_state(mhi_cntrl); 1128 dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n", 1129 TO_MHI_EXEC_STR(current_ee), mhi_state_str(state)); 1130 1131 if (state == MHI_STATE_SYS_ERR) { 1132 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); 1133 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, 1134 MHICTRL_RESET_MASK, 0, interval_us, 1135 mhi_cntrl->timeout_ms); 1136 if (ret) { 1137 dev_info(dev, "Failed to reset MHI due to syserr state\n"); 1138 goto error_exit; 1139 } 1140 1141 /* 1142 * device cleares INTVEC as part of RESET processing, 1143 * re-program it 1144 */ 1145 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); 1146 } 1147 1148 /* IRQs have been requested during probe, so we just need to enable them. */ 1149 enable_irq(mhi_cntrl->irq[0]); 1150 1151 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 1152 if (mhi_event->offload_ev) 1153 continue; 1154 1155 enable_irq(mhi_cntrl->irq[mhi_event->irq]); 1156 } 1157 1158 /* Transition to next state */ 1159 next_state = MHI_IN_PBL(current_ee) ? 1160 DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; 1161 1162 mhi_queue_state_transition(mhi_cntrl, next_state); 1163 1164 mutex_unlock(&mhi_cntrl->pm_mutex); 1165 1166 dev_info(dev, "Power on setup success\n"); 1167 1168 return 0; 1169 1170 error_exit: 1171 mhi_cntrl->pm_state = MHI_PM_DISABLE; 1172 mutex_unlock(&mhi_cntrl->pm_mutex); 1173 1174 return ret; 1175 } 1176 EXPORT_SYMBOL_GPL(mhi_async_power_up); 1177 1178 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) 1179 { 1180 enum mhi_pm_state cur_state, transition_state; 1181 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1182 1183 mutex_lock(&mhi_cntrl->pm_mutex); 1184 write_lock_irq(&mhi_cntrl->pm_lock); 1185 cur_state = mhi_cntrl->pm_state; 1186 if (cur_state == MHI_PM_DISABLE) { 1187 write_unlock_irq(&mhi_cntrl->pm_lock); 1188 mutex_unlock(&mhi_cntrl->pm_mutex); 1189 return; /* Already powered down */ 1190 } 1191 1192 /* If it's not a graceful shutdown, force MHI to linkdown state */ 1193 transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS : 1194 MHI_PM_LD_ERR_FATAL_DETECT; 1195 1196 cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); 1197 if (cur_state != transition_state) { 1198 dev_err(dev, "Failed to move to state: %s from: %s\n", 1199 to_mhi_pm_state_str(transition_state), 1200 to_mhi_pm_state_str(mhi_cntrl->pm_state)); 1201 /* Force link down or error fatal detected state */ 1202 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; 1203 } 1204 1205 /* mark device inactive to avoid any further host processing */ 1206 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; 1207 mhi_cntrl->dev_state = MHI_STATE_RESET; 1208 1209 wake_up_all(&mhi_cntrl->state_event); 1210 1211 write_unlock_irq(&mhi_cntrl->pm_lock); 1212 mutex_unlock(&mhi_cntrl->pm_mutex); 1213 1214 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); 1215 1216 /* Wait for shutdown to complete */ 1217 flush_work(&mhi_cntrl->st_worker); 1218 1219 disable_irq(mhi_cntrl->irq[0]); 1220 } 1221 EXPORT_SYMBOL_GPL(mhi_power_down); 1222 1223 int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) 1224 { 1225 int ret = mhi_async_power_up(mhi_cntrl); 1226 u32 timeout_ms; 1227 1228 if (ret) 1229 return ret; 1230 1231 /* Some devices need more time to set ready during power up */ 1232 timeout_ms = mhi_cntrl->ready_timeout_ms ? 1233 mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; 1234 wait_event_timeout(mhi_cntrl->state_event, 1235 MHI_IN_MISSION_MODE(mhi_cntrl->ee) || 1236 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), 1237 msecs_to_jiffies(timeout_ms)); 1238 1239 ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; 1240 if (ret) 1241 mhi_power_down(mhi_cntrl, false); 1242 1243 return ret; 1244 } 1245 EXPORT_SYMBOL(mhi_sync_power_up); 1246 1247 int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) 1248 { 1249 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1250 int ret; 1251 1252 /* Check if device is already in RDDM */ 1253 if (mhi_cntrl->ee == MHI_EE_RDDM) 1254 return 0; 1255 1256 dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n"); 1257 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); 1258 1259 /* Wait for RDDM event */ 1260 ret = wait_event_timeout(mhi_cntrl->state_event, 1261 mhi_cntrl->ee == MHI_EE_RDDM, 1262 msecs_to_jiffies(mhi_cntrl->timeout_ms)); 1263 ret = ret ? 0 : -EIO; 1264 1265 return ret; 1266 } 1267 EXPORT_SYMBOL_GPL(mhi_force_rddm_mode); 1268 1269 void mhi_device_get(struct mhi_device *mhi_dev) 1270 { 1271 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 1272 1273 mhi_dev->dev_wake++; 1274 read_lock_bh(&mhi_cntrl->pm_lock); 1275 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) 1276 mhi_trigger_resume(mhi_cntrl); 1277 1278 mhi_cntrl->wake_get(mhi_cntrl, true); 1279 read_unlock_bh(&mhi_cntrl->pm_lock); 1280 } 1281 EXPORT_SYMBOL_GPL(mhi_device_get); 1282 1283 int mhi_device_get_sync(struct mhi_device *mhi_dev) 1284 { 1285 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 1286 int ret; 1287 1288 ret = __mhi_device_get_sync(mhi_cntrl); 1289 if (!ret) 1290 mhi_dev->dev_wake++; 1291 1292 return ret; 1293 } 1294 EXPORT_SYMBOL_GPL(mhi_device_get_sync); 1295 1296 void mhi_device_put(struct mhi_device *mhi_dev) 1297 { 1298 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 1299 1300 mhi_dev->dev_wake--; 1301 read_lock_bh(&mhi_cntrl->pm_lock); 1302 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) 1303 mhi_trigger_resume(mhi_cntrl); 1304 1305 mhi_cntrl->wake_put(mhi_cntrl, false); 1306 read_unlock_bh(&mhi_cntrl->pm_lock); 1307 } 1308 EXPORT_SYMBOL_GPL(mhi_device_put); 1309