xref: /linux/drivers/bus/mhi/host/pm.c (revision c4bbe83d27c2446a033cc0381c3fb6be5e8c41c7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4  *
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/wait.h>
17 #include "internal.h"
18 
19 /*
20  * Not all MHI state transitions are synchronous. Transitions like Linkdown,
21  * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
22  * transition to a new state only if we're allowed to.
23  *
24  * Priority increases as we go down. For instance, from any state in L0, the
25  * transition can be made to states in L1, L2 and L3. A notable exception to
26  * this rule is state DISABLE.  From DISABLE state we can only transition to
27  * POR state. Also, while in L2 state, user cannot jump back to previous
28  * L1 or L0 states.
29  *
30  * Valid transitions:
31  * L0: DISABLE <--> POR
32  *     POR <--> POR
33  *     POR -> M0 -> M2 --> M0
34  *     POR -> FW_DL_ERR
35  *     FW_DL_ERR <--> FW_DL_ERR
36  *     M0 <--> M0
37  *     M0 -> FW_DL_ERR
38  *     M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
39  * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
40  * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
41  *     SHUTDOWN_PROCESS -> DISABLE
42  * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
43  *     LD_ERR_FATAL_DETECT -> DISABLE
44  */
45 static const struct mhi_pm_transitions dev_state_transitions[] = {
46 	/* L0 States */
47 	{
48 		MHI_PM_DISABLE,
49 		MHI_PM_POR
50 	},
51 	{
52 		MHI_PM_POR,
53 		MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
54 		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
55 		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
56 	},
57 	{
58 		MHI_PM_M0,
59 		MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
60 		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
61 		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
62 	},
63 	{
64 		MHI_PM_M2,
65 		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
66 		MHI_PM_LD_ERR_FATAL_DETECT
67 	},
68 	{
69 		MHI_PM_M3_ENTER,
70 		MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
71 		MHI_PM_LD_ERR_FATAL_DETECT
72 	},
73 	{
74 		MHI_PM_M3,
75 		MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
76 		MHI_PM_LD_ERR_FATAL_DETECT
77 	},
78 	{
79 		MHI_PM_M3_EXIT,
80 		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
81 		MHI_PM_LD_ERR_FATAL_DETECT
82 	},
83 	{
84 		MHI_PM_FW_DL_ERR,
85 		MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
86 		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
87 	},
88 	/* L1 States */
89 	{
90 		MHI_PM_SYS_ERR_DETECT,
91 		MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
92 		MHI_PM_LD_ERR_FATAL_DETECT
93 	},
94 	{
95 		MHI_PM_SYS_ERR_PROCESS,
96 		MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
97 		MHI_PM_LD_ERR_FATAL_DETECT
98 	},
99 	/* L2 States */
100 	{
101 		MHI_PM_SHUTDOWN_PROCESS,
102 		MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
103 	},
104 	/* L3 States */
105 	{
106 		MHI_PM_LD_ERR_FATAL_DETECT,
107 		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
108 	},
109 };
110 
111 enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
112 						   enum mhi_pm_state state)
113 {
114 	unsigned long cur_state = mhi_cntrl->pm_state;
115 	int index = find_last_bit(&cur_state, 32);
116 
117 	if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
118 		return cur_state;
119 
120 	if (unlikely(dev_state_transitions[index].from_state != cur_state))
121 		return cur_state;
122 
123 	if (unlikely(!(dev_state_transitions[index].to_states & state)))
124 		return cur_state;
125 
126 	mhi_cntrl->pm_state = state;
127 	return mhi_cntrl->pm_state;
128 }
129 
130 void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
131 {
132 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
133 	int ret;
134 
135 	if (state == MHI_STATE_RESET) {
136 		ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
137 					  MHICTRL_RESET_MASK, 1);
138 	} else {
139 		ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
140 					  MHICTRL_MHISTATE_MASK, state);
141 	}
142 
143 	if (ret)
144 		dev_err(dev, "Failed to set MHI state to: %s\n",
145 			mhi_state_str(state));
146 }
147 
148 /* NOP for backward compatibility, host allowed to ring DB in M2 state */
149 static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
150 {
151 }
152 
153 static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
154 {
155 	mhi_cntrl->wake_get(mhi_cntrl, false);
156 	mhi_cntrl->wake_put(mhi_cntrl, true);
157 }
158 
159 /* Handle device ready state transition */
160 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
161 {
162 	struct mhi_event *mhi_event;
163 	enum mhi_pm_state cur_state;
164 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
165 	u32 interval_us = 25000; /* poll register field every 25 milliseconds */
166 	u32 timeout_ms;
167 	int ret, i;
168 
169 	/* Check if device entered error state */
170 	if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
171 		dev_err(dev, "Device link is not accessible\n");
172 		return -EIO;
173 	}
174 
175 	/* Wait for RESET to be cleared and READY bit to be set by the device */
176 	ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
177 				 MHICTRL_RESET_MASK, 0, interval_us,
178 				 mhi_cntrl->timeout_ms);
179 	if (ret) {
180 		dev_err(dev, "Device failed to clear MHI Reset\n");
181 		return ret;
182 	}
183 
184 	timeout_ms = mhi_cntrl->ready_timeout_ms ?
185 		mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
186 	ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
187 				 MHISTATUS_READY_MASK, 1, interval_us,
188 				 timeout_ms);
189 	if (ret) {
190 		dev_err(dev, "Device failed to enter MHI Ready\n");
191 		return ret;
192 	}
193 
194 	dev_dbg(dev, "Device in READY State\n");
195 	write_lock_irq(&mhi_cntrl->pm_lock);
196 	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
197 	mhi_cntrl->dev_state = MHI_STATE_READY;
198 	write_unlock_irq(&mhi_cntrl->pm_lock);
199 
200 	if (cur_state != MHI_PM_POR) {
201 		dev_err(dev, "Error moving to state %s from %s\n",
202 			to_mhi_pm_state_str(MHI_PM_POR),
203 			to_mhi_pm_state_str(cur_state));
204 		return -EIO;
205 	}
206 
207 	read_lock_bh(&mhi_cntrl->pm_lock);
208 	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
209 		dev_err(dev, "Device registers not accessible\n");
210 		goto error_mmio;
211 	}
212 
213 	/* Configure MMIO registers */
214 	ret = mhi_init_mmio(mhi_cntrl);
215 	if (ret) {
216 		dev_err(dev, "Error configuring MMIO registers\n");
217 		goto error_mmio;
218 	}
219 
220 	/* Add elements to all SW event rings */
221 	mhi_event = mhi_cntrl->mhi_event;
222 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
223 		struct mhi_ring *ring = &mhi_event->ring;
224 
225 		/* Skip if this is an offload or HW event */
226 		if (mhi_event->offload_ev || mhi_event->hw_ring)
227 			continue;
228 
229 		ring->wp = ring->base + ring->len - ring->el_size;
230 		*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
231 		/* Update all cores */
232 		smp_wmb();
233 
234 		/* Ring the event ring db */
235 		spin_lock_irq(&mhi_event->lock);
236 		mhi_ring_er_db(mhi_event);
237 		spin_unlock_irq(&mhi_event->lock);
238 	}
239 
240 	/* Set MHI to M0 state */
241 	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
242 	read_unlock_bh(&mhi_cntrl->pm_lock);
243 
244 	return 0;
245 
246 error_mmio:
247 	read_unlock_bh(&mhi_cntrl->pm_lock);
248 
249 	return -EIO;
250 }
251 
252 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
253 {
254 	enum mhi_pm_state cur_state;
255 	struct mhi_chan *mhi_chan;
256 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
257 	int i;
258 
259 	write_lock_irq(&mhi_cntrl->pm_lock);
260 	mhi_cntrl->dev_state = MHI_STATE_M0;
261 	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
262 	write_unlock_irq(&mhi_cntrl->pm_lock);
263 	if (unlikely(cur_state != MHI_PM_M0)) {
264 		dev_err(dev, "Unable to transition to M0 state\n");
265 		return -EIO;
266 	}
267 	mhi_cntrl->M0++;
268 
269 	/* Wake up the device */
270 	read_lock_bh(&mhi_cntrl->pm_lock);
271 	mhi_cntrl->wake_get(mhi_cntrl, true);
272 
273 	/* Ring all event rings and CMD ring only if we're in mission mode */
274 	if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
275 		struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
276 		struct mhi_cmd *mhi_cmd =
277 			&mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
278 
279 		for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
280 			if (mhi_event->offload_ev)
281 				continue;
282 
283 			spin_lock_irq(&mhi_event->lock);
284 			mhi_ring_er_db(mhi_event);
285 			spin_unlock_irq(&mhi_event->lock);
286 		}
287 
288 		/* Only ring primary cmd ring if ring is not empty */
289 		spin_lock_irq(&mhi_cmd->lock);
290 		if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
291 			mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
292 		spin_unlock_irq(&mhi_cmd->lock);
293 	}
294 
295 	/* Ring channel DB registers */
296 	mhi_chan = mhi_cntrl->mhi_chan;
297 	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
298 		struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
299 
300 		if (mhi_chan->db_cfg.reset_req) {
301 			write_lock_irq(&mhi_chan->lock);
302 			mhi_chan->db_cfg.db_mode = true;
303 			write_unlock_irq(&mhi_chan->lock);
304 		}
305 
306 		read_lock_irq(&mhi_chan->lock);
307 
308 		/* Only ring DB if ring is not empty */
309 		if (tre_ring->base && tre_ring->wp  != tre_ring->rp &&
310 		    mhi_chan->ch_state == MHI_CH_STATE_ENABLED)
311 			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
312 		read_unlock_irq(&mhi_chan->lock);
313 	}
314 
315 	mhi_cntrl->wake_put(mhi_cntrl, false);
316 	read_unlock_bh(&mhi_cntrl->pm_lock);
317 	wake_up_all(&mhi_cntrl->state_event);
318 
319 	return 0;
320 }
321 
322 /*
323  * After receiving the MHI state change event from the device indicating the
324  * transition to M1 state, the host can transition the device to M2 state
325  * for keeping it in low power state.
326  */
327 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
328 {
329 	enum mhi_pm_state state;
330 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
331 
332 	write_lock_irq(&mhi_cntrl->pm_lock);
333 	state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
334 	if (state == MHI_PM_M2) {
335 		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
336 		mhi_cntrl->dev_state = MHI_STATE_M2;
337 
338 		write_unlock_irq(&mhi_cntrl->pm_lock);
339 
340 		mhi_cntrl->M2++;
341 		wake_up_all(&mhi_cntrl->state_event);
342 
343 		/* If there are any pending resources, exit M2 immediately */
344 		if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
345 			     atomic_read(&mhi_cntrl->dev_wake))) {
346 			dev_dbg(dev,
347 				"Exiting M2, pending_pkts: %d dev_wake: %d\n",
348 				atomic_read(&mhi_cntrl->pending_pkts),
349 				atomic_read(&mhi_cntrl->dev_wake));
350 			read_lock_bh(&mhi_cntrl->pm_lock);
351 			mhi_cntrl->wake_get(mhi_cntrl, true);
352 			mhi_cntrl->wake_put(mhi_cntrl, true);
353 			read_unlock_bh(&mhi_cntrl->pm_lock);
354 		} else {
355 			mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
356 		}
357 	} else {
358 		write_unlock_irq(&mhi_cntrl->pm_lock);
359 	}
360 }
361 
362 /* MHI M3 completion handler */
363 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
364 {
365 	enum mhi_pm_state state;
366 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
367 
368 	write_lock_irq(&mhi_cntrl->pm_lock);
369 	mhi_cntrl->dev_state = MHI_STATE_M3;
370 	state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
371 	write_unlock_irq(&mhi_cntrl->pm_lock);
372 	if (state != MHI_PM_M3) {
373 		dev_err(dev, "Unable to transition to M3 state\n");
374 		return -EIO;
375 	}
376 
377 	mhi_cntrl->M3++;
378 	wake_up_all(&mhi_cntrl->state_event);
379 
380 	return 0;
381 }
382 
383 /* Handle device Mission Mode transition */
384 static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
385 {
386 	struct mhi_event *mhi_event;
387 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
388 	enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
389 	int i, ret;
390 
391 	dev_dbg(dev, "Processing Mission Mode transition\n");
392 
393 	write_lock_irq(&mhi_cntrl->pm_lock);
394 	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
395 		ee = mhi_get_exec_env(mhi_cntrl);
396 
397 	if (!MHI_IN_MISSION_MODE(ee)) {
398 		mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
399 		write_unlock_irq(&mhi_cntrl->pm_lock);
400 		wake_up_all(&mhi_cntrl->state_event);
401 		return -EIO;
402 	}
403 	mhi_cntrl->ee = ee;
404 	write_unlock_irq(&mhi_cntrl->pm_lock);
405 
406 	wake_up_all(&mhi_cntrl->state_event);
407 
408 	device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
409 			      mhi_destroy_device);
410 	mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
411 
412 	/* Force MHI to be in M0 state before continuing */
413 	ret = __mhi_device_get_sync(mhi_cntrl);
414 	if (ret)
415 		return ret;
416 
417 	read_lock_bh(&mhi_cntrl->pm_lock);
418 
419 	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
420 		ret = -EIO;
421 		goto error_mission_mode;
422 	}
423 
424 	/* Add elements to all HW event rings */
425 	mhi_event = mhi_cntrl->mhi_event;
426 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
427 		struct mhi_ring *ring = &mhi_event->ring;
428 
429 		if (mhi_event->offload_ev || !mhi_event->hw_ring)
430 			continue;
431 
432 		ring->wp = ring->base + ring->len - ring->el_size;
433 		*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
434 		/* Update to all cores */
435 		smp_wmb();
436 
437 		spin_lock_irq(&mhi_event->lock);
438 		if (MHI_DB_ACCESS_VALID(mhi_cntrl))
439 			mhi_ring_er_db(mhi_event);
440 		spin_unlock_irq(&mhi_event->lock);
441 	}
442 
443 	read_unlock_bh(&mhi_cntrl->pm_lock);
444 
445 	/*
446 	 * The MHI devices are only created when the client device switches its
447 	 * Execution Environment (EE) to either SBL or AMSS states
448 	 */
449 	mhi_create_devices(mhi_cntrl);
450 
451 	read_lock_bh(&mhi_cntrl->pm_lock);
452 
453 error_mission_mode:
454 	mhi_cntrl->wake_put(mhi_cntrl, false);
455 	read_unlock_bh(&mhi_cntrl->pm_lock);
456 
457 	return ret;
458 }
459 
460 /* Handle shutdown transitions */
461 static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
462 {
463 	enum mhi_pm_state cur_state;
464 	struct mhi_event *mhi_event;
465 	struct mhi_cmd_ctxt *cmd_ctxt;
466 	struct mhi_cmd *mhi_cmd;
467 	struct mhi_event_ctxt *er_ctxt;
468 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
469 	int ret, i;
470 
471 	dev_dbg(dev, "Processing disable transition with PM state: %s\n",
472 		to_mhi_pm_state_str(mhi_cntrl->pm_state));
473 
474 	mutex_lock(&mhi_cntrl->pm_mutex);
475 
476 	/* Trigger MHI RESET so that the device will not access host memory */
477 	if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
478 		/* Skip MHI RESET if in RDDM state */
479 		if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
480 			goto skip_mhi_reset;
481 
482 		dev_dbg(dev, "Triggering MHI Reset in device\n");
483 		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
484 
485 		/* Wait for the reset bit to be cleared by the device */
486 		ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
487 				 MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms);
488 		if (ret)
489 			dev_err(dev, "Device failed to clear MHI Reset\n");
490 
491 		/*
492 		 * Device will clear BHI_INTVEC as a part of RESET processing,
493 		 * hence re-program it
494 		 */
495 		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
496 
497 		if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
498 			/* wait for ready to be set */
499 			ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
500 						 MHISTATUS, MHISTATUS_READY_MASK,
501 						 1, 25000, mhi_cntrl->timeout_ms);
502 			if (ret)
503 				dev_err(dev, "Device failed to enter READY state\n");
504 		}
505 	}
506 
507 skip_mhi_reset:
508 	dev_dbg(dev,
509 		 "Waiting for all pending event ring processing to complete\n");
510 	mhi_event = mhi_cntrl->mhi_event;
511 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
512 		if (mhi_event->offload_ev)
513 			continue;
514 		disable_irq(mhi_cntrl->irq[mhi_event->irq]);
515 		tasklet_kill(&mhi_event->task);
516 	}
517 
518 	/* Release lock and wait for all pending threads to complete */
519 	mutex_unlock(&mhi_cntrl->pm_mutex);
520 	dev_dbg(dev, "Waiting for all pending threads to complete\n");
521 	wake_up_all(&mhi_cntrl->state_event);
522 
523 	dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
524 	device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
525 
526 	mutex_lock(&mhi_cntrl->pm_mutex);
527 
528 	WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
529 	WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
530 
531 	/* Reset the ev rings and cmd rings */
532 	dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
533 	mhi_cmd = mhi_cntrl->mhi_cmd;
534 	cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
535 	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
536 		struct mhi_ring *ring = &mhi_cmd->ring;
537 
538 		ring->rp = ring->base;
539 		ring->wp = ring->base;
540 		cmd_ctxt->rp = cmd_ctxt->rbase;
541 		cmd_ctxt->wp = cmd_ctxt->rbase;
542 	}
543 
544 	mhi_event = mhi_cntrl->mhi_event;
545 	er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
546 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
547 		     mhi_event++) {
548 		struct mhi_ring *ring = &mhi_event->ring;
549 
550 		/* Skip offload events */
551 		if (mhi_event->offload_ev)
552 			continue;
553 
554 		ring->rp = ring->base;
555 		ring->wp = ring->base;
556 		er_ctxt->rp = er_ctxt->rbase;
557 		er_ctxt->wp = er_ctxt->rbase;
558 	}
559 
560 	/* Move to disable state */
561 	write_lock_irq(&mhi_cntrl->pm_lock);
562 	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
563 	write_unlock_irq(&mhi_cntrl->pm_lock);
564 	if (unlikely(cur_state != MHI_PM_DISABLE))
565 		dev_err(dev, "Error moving from PM state: %s to: %s\n",
566 			to_mhi_pm_state_str(cur_state),
567 			to_mhi_pm_state_str(MHI_PM_DISABLE));
568 
569 	dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
570 		to_mhi_pm_state_str(mhi_cntrl->pm_state),
571 		mhi_state_str(mhi_cntrl->dev_state));
572 
573 	mutex_unlock(&mhi_cntrl->pm_mutex);
574 }
575 
576 /* Handle system error transitions */
577 static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
578 {
579 	enum mhi_pm_state cur_state, prev_state;
580 	enum dev_st_transition next_state;
581 	struct mhi_event *mhi_event;
582 	struct mhi_cmd_ctxt *cmd_ctxt;
583 	struct mhi_cmd *mhi_cmd;
584 	struct mhi_event_ctxt *er_ctxt;
585 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
586 	int ret, i;
587 
588 	dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
589 		to_mhi_pm_state_str(mhi_cntrl->pm_state),
590 		to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
591 
592 	/* We must notify MHI control driver so it can clean up first */
593 	mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
594 
595 	mutex_lock(&mhi_cntrl->pm_mutex);
596 	write_lock_irq(&mhi_cntrl->pm_lock);
597 	prev_state = mhi_cntrl->pm_state;
598 	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
599 	write_unlock_irq(&mhi_cntrl->pm_lock);
600 
601 	if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
602 		dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
603 			to_mhi_pm_state_str(cur_state),
604 			to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
605 		goto exit_sys_error_transition;
606 	}
607 
608 	mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
609 	mhi_cntrl->dev_state = MHI_STATE_RESET;
610 
611 	/* Wake up threads waiting for state transition */
612 	wake_up_all(&mhi_cntrl->state_event);
613 
614 	/* Trigger MHI RESET so that the device will not access host memory */
615 	if (MHI_REG_ACCESS_VALID(prev_state)) {
616 		u32 in_reset = -1;
617 		unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
618 
619 		dev_dbg(dev, "Triggering MHI Reset in device\n");
620 		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
621 
622 		/* Wait for the reset bit to be cleared by the device */
623 		ret = wait_event_timeout(mhi_cntrl->state_event,
624 					 mhi_read_reg_field(mhi_cntrl,
625 							    mhi_cntrl->regs,
626 							    MHICTRL,
627 							    MHICTRL_RESET_MASK,
628 							    &in_reset) ||
629 					!in_reset, timeout);
630 		if (!ret || in_reset) {
631 			dev_err(dev, "Device failed to exit MHI Reset state\n");
632 			goto exit_sys_error_transition;
633 		}
634 
635 		/*
636 		 * Device will clear BHI_INTVEC as a part of RESET processing,
637 		 * hence re-program it
638 		 */
639 		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
640 	}
641 
642 	dev_dbg(dev,
643 		"Waiting for all pending event ring processing to complete\n");
644 	mhi_event = mhi_cntrl->mhi_event;
645 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
646 		if (mhi_event->offload_ev)
647 			continue;
648 		tasklet_kill(&mhi_event->task);
649 	}
650 
651 	/* Release lock and wait for all pending threads to complete */
652 	mutex_unlock(&mhi_cntrl->pm_mutex);
653 	dev_dbg(dev, "Waiting for all pending threads to complete\n");
654 	wake_up_all(&mhi_cntrl->state_event);
655 
656 	dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
657 	device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
658 
659 	mutex_lock(&mhi_cntrl->pm_mutex);
660 
661 	WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
662 	WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
663 
664 	/* Reset the ev rings and cmd rings */
665 	dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
666 	mhi_cmd = mhi_cntrl->mhi_cmd;
667 	cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
668 	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
669 		struct mhi_ring *ring = &mhi_cmd->ring;
670 
671 		ring->rp = ring->base;
672 		ring->wp = ring->base;
673 		cmd_ctxt->rp = cmd_ctxt->rbase;
674 		cmd_ctxt->wp = cmd_ctxt->rbase;
675 	}
676 
677 	mhi_event = mhi_cntrl->mhi_event;
678 	er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
679 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
680 	     mhi_event++) {
681 		struct mhi_ring *ring = &mhi_event->ring;
682 
683 		/* Skip offload events */
684 		if (mhi_event->offload_ev)
685 			continue;
686 
687 		ring->rp = ring->base;
688 		ring->wp = ring->base;
689 		er_ctxt->rp = er_ctxt->rbase;
690 		er_ctxt->wp = er_ctxt->rbase;
691 	}
692 
693 	/* Transition to next state */
694 	if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
695 		write_lock_irq(&mhi_cntrl->pm_lock);
696 		cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
697 		write_unlock_irq(&mhi_cntrl->pm_lock);
698 		if (cur_state != MHI_PM_POR) {
699 			dev_err(dev, "Error moving to state %s from %s\n",
700 				to_mhi_pm_state_str(MHI_PM_POR),
701 				to_mhi_pm_state_str(cur_state));
702 			goto exit_sys_error_transition;
703 		}
704 		next_state = DEV_ST_TRANSITION_PBL;
705 	} else {
706 		next_state = DEV_ST_TRANSITION_READY;
707 	}
708 
709 	mhi_queue_state_transition(mhi_cntrl, next_state);
710 
711 exit_sys_error_transition:
712 	dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
713 		to_mhi_pm_state_str(mhi_cntrl->pm_state),
714 		mhi_state_str(mhi_cntrl->dev_state));
715 
716 	mutex_unlock(&mhi_cntrl->pm_mutex);
717 }
718 
719 /* Queue a new work item and schedule work */
720 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
721 			       enum dev_st_transition state)
722 {
723 	struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
724 	unsigned long flags;
725 
726 	if (!item)
727 		return -ENOMEM;
728 
729 	item->state = state;
730 	spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
731 	list_add_tail(&item->node, &mhi_cntrl->transition_list);
732 	spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
733 
734 	queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
735 
736 	return 0;
737 }
738 
739 /* SYS_ERR worker */
740 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
741 {
742 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
743 
744 	/* skip if controller supports RDDM */
745 	if (mhi_cntrl->rddm_image) {
746 		dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
747 		return;
748 	}
749 
750 	mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
751 }
752 
753 /* Device State Transition worker */
754 void mhi_pm_st_worker(struct work_struct *work)
755 {
756 	struct state_transition *itr, *tmp;
757 	LIST_HEAD(head);
758 	struct mhi_controller *mhi_cntrl = container_of(work,
759 							struct mhi_controller,
760 							st_worker);
761 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
762 
763 	spin_lock_irq(&mhi_cntrl->transition_lock);
764 	list_splice_tail_init(&mhi_cntrl->transition_list, &head);
765 	spin_unlock_irq(&mhi_cntrl->transition_lock);
766 
767 	list_for_each_entry_safe(itr, tmp, &head, node) {
768 		list_del(&itr->node);
769 		dev_dbg(dev, "Handling state transition: %s\n",
770 			TO_DEV_STATE_TRANS_STR(itr->state));
771 
772 		switch (itr->state) {
773 		case DEV_ST_TRANSITION_PBL:
774 			write_lock_irq(&mhi_cntrl->pm_lock);
775 			if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
776 				mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
777 			write_unlock_irq(&mhi_cntrl->pm_lock);
778 			mhi_fw_load_handler(mhi_cntrl);
779 			break;
780 		case DEV_ST_TRANSITION_SBL:
781 			write_lock_irq(&mhi_cntrl->pm_lock);
782 			mhi_cntrl->ee = MHI_EE_SBL;
783 			write_unlock_irq(&mhi_cntrl->pm_lock);
784 			/*
785 			 * The MHI devices are only created when the client
786 			 * device switches its Execution Environment (EE) to
787 			 * either SBL or AMSS states
788 			 */
789 			mhi_create_devices(mhi_cntrl);
790 			if (mhi_cntrl->fbc_download)
791 				mhi_download_amss_image(mhi_cntrl);
792 			break;
793 		case DEV_ST_TRANSITION_MISSION_MODE:
794 			mhi_pm_mission_mode_transition(mhi_cntrl);
795 			break;
796 		case DEV_ST_TRANSITION_FP:
797 			write_lock_irq(&mhi_cntrl->pm_lock);
798 			mhi_cntrl->ee = MHI_EE_FP;
799 			write_unlock_irq(&mhi_cntrl->pm_lock);
800 			mhi_create_devices(mhi_cntrl);
801 			break;
802 		case DEV_ST_TRANSITION_READY:
803 			mhi_ready_state_transition(mhi_cntrl);
804 			break;
805 		case DEV_ST_TRANSITION_SYS_ERR:
806 			mhi_pm_sys_error_transition(mhi_cntrl);
807 			break;
808 		case DEV_ST_TRANSITION_DISABLE:
809 			mhi_pm_disable_transition(mhi_cntrl);
810 			break;
811 		default:
812 			break;
813 		}
814 		kfree(itr);
815 	}
816 }
817 
818 int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
819 {
820 	struct mhi_chan *itr, *tmp;
821 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
822 	enum mhi_pm_state new_state;
823 	int ret;
824 
825 	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
826 		return -EINVAL;
827 
828 	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
829 		return -EIO;
830 
831 	/* Return busy if there are any pending resources */
832 	if (atomic_read(&mhi_cntrl->dev_wake) ||
833 	    atomic_read(&mhi_cntrl->pending_pkts))
834 		return -EBUSY;
835 
836 	/* Take MHI out of M2 state */
837 	read_lock_bh(&mhi_cntrl->pm_lock);
838 	mhi_cntrl->wake_get(mhi_cntrl, false);
839 	read_unlock_bh(&mhi_cntrl->pm_lock);
840 
841 	ret = wait_event_timeout(mhi_cntrl->state_event,
842 				 mhi_cntrl->dev_state == MHI_STATE_M0 ||
843 				 mhi_cntrl->dev_state == MHI_STATE_M1 ||
844 				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
845 				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
846 
847 	read_lock_bh(&mhi_cntrl->pm_lock);
848 	mhi_cntrl->wake_put(mhi_cntrl, false);
849 	read_unlock_bh(&mhi_cntrl->pm_lock);
850 
851 	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
852 		dev_err(dev,
853 			"Could not enter M0/M1 state");
854 		return -EIO;
855 	}
856 
857 	write_lock_irq(&mhi_cntrl->pm_lock);
858 
859 	if (atomic_read(&mhi_cntrl->dev_wake) ||
860 	    atomic_read(&mhi_cntrl->pending_pkts)) {
861 		write_unlock_irq(&mhi_cntrl->pm_lock);
862 		return -EBUSY;
863 	}
864 
865 	dev_dbg(dev, "Allowing M3 transition\n");
866 	new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
867 	if (new_state != MHI_PM_M3_ENTER) {
868 		write_unlock_irq(&mhi_cntrl->pm_lock);
869 		dev_err(dev,
870 			"Error setting to PM state: %s from: %s\n",
871 			to_mhi_pm_state_str(MHI_PM_M3_ENTER),
872 			to_mhi_pm_state_str(mhi_cntrl->pm_state));
873 		return -EIO;
874 	}
875 
876 	/* Set MHI to M3 and wait for completion */
877 	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
878 	write_unlock_irq(&mhi_cntrl->pm_lock);
879 	dev_dbg(dev, "Waiting for M3 completion\n");
880 
881 	ret = wait_event_timeout(mhi_cntrl->state_event,
882 				 mhi_cntrl->dev_state == MHI_STATE_M3 ||
883 				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
884 				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
885 
886 	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
887 		dev_err(dev,
888 			"Did not enter M3 state, MHI state: %s, PM state: %s\n",
889 			mhi_state_str(mhi_cntrl->dev_state),
890 			to_mhi_pm_state_str(mhi_cntrl->pm_state));
891 		return -EIO;
892 	}
893 
894 	/* Notify clients about entering LPM */
895 	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
896 		mutex_lock(&itr->mutex);
897 		if (itr->mhi_dev)
898 			mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
899 		mutex_unlock(&itr->mutex);
900 	}
901 
902 	return 0;
903 }
904 EXPORT_SYMBOL_GPL(mhi_pm_suspend);
905 
906 static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
907 {
908 	struct mhi_chan *itr, *tmp;
909 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
910 	enum mhi_pm_state cur_state;
911 	int ret;
912 
913 	dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
914 		to_mhi_pm_state_str(mhi_cntrl->pm_state),
915 		mhi_state_str(mhi_cntrl->dev_state));
916 
917 	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
918 		return 0;
919 
920 	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
921 		return -EIO;
922 
923 	if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
924 		dev_warn(dev, "Resuming from non M3 state (%s)\n",
925 			 mhi_state_str(mhi_get_mhi_state(mhi_cntrl)));
926 		if (!force)
927 			return -EINVAL;
928 	}
929 
930 	/* Notify clients about exiting LPM */
931 	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
932 		mutex_lock(&itr->mutex);
933 		if (itr->mhi_dev)
934 			mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
935 		mutex_unlock(&itr->mutex);
936 	}
937 
938 	write_lock_irq(&mhi_cntrl->pm_lock);
939 	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
940 	if (cur_state != MHI_PM_M3_EXIT) {
941 		write_unlock_irq(&mhi_cntrl->pm_lock);
942 		dev_info(dev,
943 			 "Error setting to PM state: %s from: %s\n",
944 			 to_mhi_pm_state_str(MHI_PM_M3_EXIT),
945 			 to_mhi_pm_state_str(mhi_cntrl->pm_state));
946 		return -EIO;
947 	}
948 
949 	/* Set MHI to M0 and wait for completion */
950 	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
951 	write_unlock_irq(&mhi_cntrl->pm_lock);
952 
953 	ret = wait_event_timeout(mhi_cntrl->state_event,
954 				 mhi_cntrl->dev_state == MHI_STATE_M0 ||
955 				 mhi_cntrl->dev_state == MHI_STATE_M2 ||
956 				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
957 				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
958 
959 	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
960 		dev_err(dev,
961 			"Did not enter M0 state, MHI state: %s, PM state: %s\n",
962 			mhi_state_str(mhi_cntrl->dev_state),
963 			to_mhi_pm_state_str(mhi_cntrl->pm_state));
964 		return -EIO;
965 	}
966 
967 	return 0;
968 }
969 
970 int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
971 {
972 	return __mhi_pm_resume(mhi_cntrl, false);
973 }
974 EXPORT_SYMBOL_GPL(mhi_pm_resume);
975 
976 int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
977 {
978 	return __mhi_pm_resume(mhi_cntrl, true);
979 }
980 EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
981 
982 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
983 {
984 	int ret;
985 
986 	/* Wake up the device */
987 	read_lock_bh(&mhi_cntrl->pm_lock);
988 	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
989 		read_unlock_bh(&mhi_cntrl->pm_lock);
990 		return -EIO;
991 	}
992 	mhi_cntrl->wake_get(mhi_cntrl, true);
993 	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
994 		mhi_trigger_resume(mhi_cntrl);
995 	read_unlock_bh(&mhi_cntrl->pm_lock);
996 
997 	ret = wait_event_timeout(mhi_cntrl->state_event,
998 				 mhi_cntrl->pm_state == MHI_PM_M0 ||
999 				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1000 				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1001 
1002 	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1003 		read_lock_bh(&mhi_cntrl->pm_lock);
1004 		mhi_cntrl->wake_put(mhi_cntrl, false);
1005 		read_unlock_bh(&mhi_cntrl->pm_lock);
1006 		return -EIO;
1007 	}
1008 
1009 	return 0;
1010 }
1011 
1012 /* Assert device wake db */
1013 static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
1014 {
1015 	unsigned long flags;
1016 
1017 	/*
1018 	 * If force flag is set, then increment the wake count value and
1019 	 * ring wake db
1020 	 */
1021 	if (unlikely(force)) {
1022 		spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1023 		atomic_inc(&mhi_cntrl->dev_wake);
1024 		if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
1025 		    !mhi_cntrl->wake_set) {
1026 			mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1027 			mhi_cntrl->wake_set = true;
1028 		}
1029 		spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1030 	} else {
1031 		/*
1032 		 * If resources are already requested, then just increment
1033 		 * the wake count value and return
1034 		 */
1035 		if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
1036 			return;
1037 
1038 		spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1039 		if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
1040 		    MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
1041 		    !mhi_cntrl->wake_set) {
1042 			mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1043 			mhi_cntrl->wake_set = true;
1044 		}
1045 		spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1046 	}
1047 }
1048 
1049 /* De-assert device wake db */
1050 static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
1051 				  bool override)
1052 {
1053 	unsigned long flags;
1054 
1055 	/*
1056 	 * Only continue if there is a single resource, else just decrement
1057 	 * and return
1058 	 */
1059 	if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
1060 		return;
1061 
1062 	spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1063 	if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
1064 	    MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
1065 	    mhi_cntrl->wake_set) {
1066 		mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
1067 		mhi_cntrl->wake_set = false;
1068 	}
1069 	spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1070 }
1071 
1072 int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
1073 {
1074 	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
1075 	enum mhi_state state;
1076 	enum mhi_ee_type current_ee;
1077 	enum dev_st_transition next_state;
1078 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1079 	u32 interval_us = 25000; /* poll register field every 25 milliseconds */
1080 	int ret, i;
1081 
1082 	dev_info(dev, "Requested to power ON\n");
1083 
1084 	/* Supply default wake routines if not provided by controller driver */
1085 	if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
1086 	    !mhi_cntrl->wake_toggle) {
1087 		mhi_cntrl->wake_get = mhi_assert_dev_wake;
1088 		mhi_cntrl->wake_put = mhi_deassert_dev_wake;
1089 		mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
1090 			mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
1091 	}
1092 
1093 	mutex_lock(&mhi_cntrl->pm_mutex);
1094 	mhi_cntrl->pm_state = MHI_PM_DISABLE;
1095 
1096 	/* Setup BHI INTVEC */
1097 	write_lock_irq(&mhi_cntrl->pm_lock);
1098 	mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1099 	mhi_cntrl->pm_state = MHI_PM_POR;
1100 	mhi_cntrl->ee = MHI_EE_MAX;
1101 	current_ee = mhi_get_exec_env(mhi_cntrl);
1102 	write_unlock_irq(&mhi_cntrl->pm_lock);
1103 
1104 	/* Confirm that the device is in valid exec env */
1105 	if (!MHI_POWER_UP_CAPABLE(current_ee)) {
1106 		dev_err(dev, "%s is not a valid EE for power on\n",
1107 			TO_MHI_EXEC_STR(current_ee));
1108 		ret = -EIO;
1109 		goto error_exit;
1110 	}
1111 
1112 	state = mhi_get_mhi_state(mhi_cntrl);
1113 	dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n",
1114 		TO_MHI_EXEC_STR(current_ee), mhi_state_str(state));
1115 
1116 	if (state == MHI_STATE_SYS_ERR) {
1117 		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
1118 		ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
1119 				 MHICTRL_RESET_MASK, 0, interval_us,
1120 				 mhi_cntrl->timeout_ms);
1121 		if (ret) {
1122 			dev_info(dev, "Failed to reset MHI due to syserr state\n");
1123 			goto error_exit;
1124 		}
1125 
1126 		/*
1127 		 * device cleares INTVEC as part of RESET processing,
1128 		 * re-program it
1129 		 */
1130 		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1131 	}
1132 
1133 	/* IRQs have been requested during probe, so we just need to enable them. */
1134 	enable_irq(mhi_cntrl->irq[0]);
1135 
1136 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
1137 		if (mhi_event->offload_ev)
1138 			continue;
1139 
1140 		enable_irq(mhi_cntrl->irq[mhi_event->irq]);
1141 	}
1142 
1143 	/* Transition to next state */
1144 	next_state = MHI_IN_PBL(current_ee) ?
1145 		DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
1146 
1147 	mhi_queue_state_transition(mhi_cntrl, next_state);
1148 
1149 	mutex_unlock(&mhi_cntrl->pm_mutex);
1150 
1151 	dev_info(dev, "Power on setup success\n");
1152 
1153 	return 0;
1154 
1155 error_exit:
1156 	mhi_cntrl->pm_state = MHI_PM_DISABLE;
1157 	mutex_unlock(&mhi_cntrl->pm_mutex);
1158 
1159 	return ret;
1160 }
1161 EXPORT_SYMBOL_GPL(mhi_async_power_up);
1162 
1163 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1164 {
1165 	enum mhi_pm_state cur_state, transition_state;
1166 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1167 
1168 	mutex_lock(&mhi_cntrl->pm_mutex);
1169 	write_lock_irq(&mhi_cntrl->pm_lock);
1170 	cur_state = mhi_cntrl->pm_state;
1171 	if (cur_state == MHI_PM_DISABLE) {
1172 		write_unlock_irq(&mhi_cntrl->pm_lock);
1173 		mutex_unlock(&mhi_cntrl->pm_mutex);
1174 		return; /* Already powered down */
1175 	}
1176 
1177 	/* If it's not a graceful shutdown, force MHI to linkdown state */
1178 	transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
1179 			   MHI_PM_LD_ERR_FATAL_DETECT;
1180 
1181 	cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
1182 	if (cur_state != transition_state) {
1183 		dev_err(dev, "Failed to move to state: %s from: %s\n",
1184 			to_mhi_pm_state_str(transition_state),
1185 			to_mhi_pm_state_str(mhi_cntrl->pm_state));
1186 		/* Force link down or error fatal detected state */
1187 		mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
1188 	}
1189 
1190 	/* mark device inactive to avoid any further host processing */
1191 	mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
1192 	mhi_cntrl->dev_state = MHI_STATE_RESET;
1193 
1194 	wake_up_all(&mhi_cntrl->state_event);
1195 
1196 	write_unlock_irq(&mhi_cntrl->pm_lock);
1197 	mutex_unlock(&mhi_cntrl->pm_mutex);
1198 
1199 	mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
1200 
1201 	/* Wait for shutdown to complete */
1202 	flush_work(&mhi_cntrl->st_worker);
1203 
1204 	disable_irq(mhi_cntrl->irq[0]);
1205 }
1206 EXPORT_SYMBOL_GPL(mhi_power_down);
1207 
1208 int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1209 {
1210 	int ret = mhi_async_power_up(mhi_cntrl);
1211 	u32 timeout_ms;
1212 
1213 	if (ret)
1214 		return ret;
1215 
1216 	/* Some devices need more time to set ready during power up */
1217 	timeout_ms = mhi_cntrl->ready_timeout_ms ?
1218 		mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
1219 	wait_event_timeout(mhi_cntrl->state_event,
1220 			   MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1221 			   MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1222 			   msecs_to_jiffies(timeout_ms));
1223 
1224 	ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1225 	if (ret)
1226 		mhi_power_down(mhi_cntrl, false);
1227 
1228 	return ret;
1229 }
1230 EXPORT_SYMBOL(mhi_sync_power_up);
1231 
1232 int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1233 {
1234 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1235 	int ret;
1236 
1237 	/* Check if device is already in RDDM */
1238 	if (mhi_cntrl->ee == MHI_EE_RDDM)
1239 		return 0;
1240 
1241 	dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
1242 	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1243 
1244 	/* Wait for RDDM event */
1245 	ret = wait_event_timeout(mhi_cntrl->state_event,
1246 				 mhi_cntrl->ee == MHI_EE_RDDM,
1247 				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1248 	ret = ret ? 0 : -EIO;
1249 
1250 	return ret;
1251 }
1252 EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
1253 
1254 void mhi_device_get(struct mhi_device *mhi_dev)
1255 {
1256 	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1257 
1258 	mhi_dev->dev_wake++;
1259 	read_lock_bh(&mhi_cntrl->pm_lock);
1260 	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1261 		mhi_trigger_resume(mhi_cntrl);
1262 
1263 	mhi_cntrl->wake_get(mhi_cntrl, true);
1264 	read_unlock_bh(&mhi_cntrl->pm_lock);
1265 }
1266 EXPORT_SYMBOL_GPL(mhi_device_get);
1267 
1268 int mhi_device_get_sync(struct mhi_device *mhi_dev)
1269 {
1270 	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1271 	int ret;
1272 
1273 	ret = __mhi_device_get_sync(mhi_cntrl);
1274 	if (!ret)
1275 		mhi_dev->dev_wake++;
1276 
1277 	return ret;
1278 }
1279 EXPORT_SYMBOL_GPL(mhi_device_get_sync);
1280 
1281 void mhi_device_put(struct mhi_device *mhi_dev)
1282 {
1283 	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1284 
1285 	mhi_dev->dev_wake--;
1286 	read_lock_bh(&mhi_cntrl->pm_lock);
1287 	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1288 		mhi_trigger_resume(mhi_cntrl);
1289 
1290 	mhi_cntrl->wake_put(mhi_cntrl, false);
1291 	read_unlock_bh(&mhi_cntrl->pm_lock);
1292 }
1293 EXPORT_SYMBOL_GPL(mhi_device_put);
1294