xref: /linux/drivers/net/wwan/iosm/iosm_ipc_imem.c (revision 41fb0cf1bced59c1fe178cf6cc9f716b5da9e40e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_devlink.h"
10 #include "iosm_ipc_flash.h"
11 #include "iosm_ipc_imem.h"
12 #include "iosm_ipc_port.h"
13 #include "iosm_ipc_trace.h"
14 
15 /* Check the wwan ips if it is valid with Channel as input. */
16 static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
17 {
18 	if (chnl)
19 		return chnl->ctype == IPC_CTYPE_WWAN &&
20 		       chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
21 	return false;
22 }
23 
24 static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
25 {
26 	union ipc_msg_prep_args prep_args = {
27 		.sleep.target = 1,
28 		.sleep.state = state,
29 	};
30 
31 	ipc_imem->device_sleep = state;
32 
33 	return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
34 					IPC_MSG_PREP_SLEEP, &prep_args, NULL);
35 }
36 
37 static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
38 				  struct ipc_pipe *pipe)
39 {
40 	/* limit max. nr of entries */
41 	if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
42 		return false;
43 
44 	return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
45 }
46 
47 /* This timer handler will retry DL buff allocation if a pipe has no free buf
48  * and gives doorbell if TD is available
49  */
50 static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
51 				      void *msg, size_t size)
52 {
53 	bool new_buffers_available = false;
54 	bool retry_allocation = false;
55 	int i;
56 
57 	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
58 		struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
59 
60 		if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
61 			continue;
62 
63 		while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
64 			new_buffers_available = true;
65 
66 		if (pipe->nr_of_queued_entries == 0)
67 			retry_allocation = true;
68 	}
69 
70 	if (new_buffers_available)
71 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
72 					      IPC_HP_DL_PROCESS);
73 
74 	if (retry_allocation) {
75 		ipc_imem->hrtimer_period =
76 		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
77 		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
78 			hrtimer_start(&ipc_imem->td_alloc_timer,
79 				      ipc_imem->hrtimer_period,
80 				      HRTIMER_MODE_REL);
81 	}
82 	return 0;
83 }
84 
85 static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
86 {
87 	struct iosm_imem *ipc_imem =
88 		container_of(hr_timer, struct iosm_imem, td_alloc_timer);
89 	/* Post an async tasklet event to trigger HP update Doorbell */
90 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
91 				 0, false);
92 	return HRTIMER_NORESTART;
93 }
94 
95 /* Fast update timer tasklet handler to trigger HP update */
96 static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
97 					    void *msg, size_t size)
98 {
99 	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
100 				      IPC_HP_FAST_TD_UPD_TMR);
101 
102 	return 0;
103 }
104 
105 static enum hrtimer_restart
106 ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
107 {
108 	struct iosm_imem *ipc_imem =
109 		container_of(hr_timer, struct iosm_imem, fast_update_timer);
110 	/* Post an async tasklet event to trigger HP update Doorbell */
111 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
112 				 NULL, 0, false);
113 	return HRTIMER_NORESTART;
114 }
115 
116 static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
117 					  struct ipc_mux_config *cfg)
118 {
119 	ipc_mmio_update_cp_capability(ipc_imem->mmio);
120 
121 	if (!ipc_imem->mmio->has_mux_lite) {
122 		dev_err(ipc_imem->dev, "Failed to get Mux capability.");
123 		return -EINVAL;
124 	}
125 
126 	cfg->protocol = MUX_LITE;
127 
128 	cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
129 			       MUX_UL_ON_CREDITS :
130 			       MUX_UL;
131 
132 	/* The instance ID is same as channel ID because this is been reused
133 	 * for channel alloc function.
134 	 */
135 	cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
136 	cfg->nr_sessions = IPC_MEM_MUX_IP_SESSION_ENTRIES;
137 
138 	return 0;
139 }
140 
141 void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
142 				   unsigned int reset_enable, bool atomic_ctx)
143 {
144 	union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
145 						      reset_enable };
146 
147 	if (atomic_ctx)
148 		ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
149 					 IPC_MSG_PREP_FEATURE_SET, &prep_args,
150 					 NULL);
151 	else
152 		ipc_protocol_msg_send(ipc_imem->ipc_protocol,
153 				      IPC_MSG_PREP_FEATURE_SET, &prep_args);
154 }
155 
156 void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
157 {
158 	/* Use the TD update timer only in the runtime phase */
159 	if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
160 		/* trigger the doorbell irq on CP directly. */
161 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
162 					      IPC_HP_TD_UPD_TMR_START);
163 		return;
164 	}
165 
166 	if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
167 		ipc_imem->hrtimer_period =
168 		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
169 		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
170 			hrtimer_start(&ipc_imem->tdupdate_timer,
171 				      ipc_imem->hrtimer_period,
172 				      HRTIMER_MODE_REL);
173 	}
174 }
175 
176 void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
177 {
178 	if (hrtimer_active(hr_timer))
179 		hrtimer_cancel(hr_timer);
180 }
181 
182 bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
183 {
184 	struct ipc_mem_channel *channel;
185 	struct sk_buff_head *ul_list;
186 	bool hpda_pending = false;
187 	bool forced_hpdu = false;
188 	struct ipc_pipe *pipe;
189 	int i;
190 
191 	/* Analyze the uplink pipe of all active channels. */
192 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
193 		channel = &ipc_imem->channels[i];
194 
195 		if (channel->state != IMEM_CHANNEL_ACTIVE)
196 			continue;
197 
198 		pipe = &channel->ul_pipe;
199 
200 		/* Get the reference to the skbuf accumulator list. */
201 		ul_list = &channel->ul_list;
202 
203 		/* Fill the transfer descriptor with the uplink buffer info. */
204 		hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
205 							pipe, ul_list);
206 
207 		/* forced HP update needed for non data channels */
208 		if (hpda_pending && !ipc_imem_check_wwan_ips(channel))
209 			forced_hpdu = true;
210 	}
211 
212 	if (forced_hpdu) {
213 		hpda_pending = false;
214 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
215 					      IPC_HP_UL_WRITE_TD);
216 	}
217 
218 	return hpda_pending;
219 }
220 
221 void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
222 {
223 	int timeout = IPC_MODEM_BOOT_TIMEOUT;
224 
225 	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
226 
227 	/* Trigger the CP interrupt to enter the init state. */
228 	ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
229 			  IPC_MEM_DEVICE_IPC_INIT);
230 	/* Wait for the CP update. */
231 	do {
232 		if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
233 		    ipc_imem->ipc_requested_state) {
234 			/* Prepare the MMIO space */
235 			ipc_mmio_config(ipc_imem->mmio);
236 
237 			/* Trigger the CP irq to enter the running state. */
238 			ipc_imem->ipc_requested_state =
239 				IPC_MEM_DEVICE_IPC_RUNNING;
240 			ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
241 					  IPC_MEM_DEVICE_IPC_RUNNING);
242 
243 			return;
244 		}
245 		msleep(20);
246 	} while (--timeout);
247 
248 	/* timeout */
249 	dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
250 		ipc_imem_phase_get_string(ipc_imem->phase),
251 		ipc_mmio_get_ipc_state(ipc_imem->mmio));
252 
253 	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
254 }
255 
256 /* Analyze the packet type and distribute it. */
257 static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
258 				    struct ipc_pipe *pipe, struct sk_buff *skb)
259 {
260 	u16 port_id;
261 
262 	if (!skb)
263 		return;
264 
265 	/* An AT/control or IP packet is expected. */
266 	switch (pipe->channel->ctype) {
267 	case IPC_CTYPE_CTRL:
268 		port_id = pipe->channel->channel_id;
269 		ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len,
270 				    IPC_CB(skb)->mapping,
271 				    IPC_CB(skb)->direction);
272 		if (port_id == IPC_MEM_CTRL_CHL_ID_7)
273 			ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
274 						       skb);
275 		else if (port_id == ipc_imem->trace->chl_id)
276 			ipc_trace_port_rx(ipc_imem->trace, skb);
277 		else
278 			wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
279 				     skb);
280 		break;
281 
282 	case IPC_CTYPE_WWAN:
283 		if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
284 			ipc_mux_dl_decode(ipc_imem->mux, skb);
285 		break;
286 	default:
287 		dev_err(ipc_imem->dev, "Invalid channel type");
288 		break;
289 	}
290 }
291 
292 /* Process the downlink data and pass them to the char or net layer. */
293 static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
294 				     struct ipc_pipe *pipe)
295 {
296 	s32 cnt = 0, processed_td_cnt = 0;
297 	struct ipc_mem_channel *channel;
298 	u32 head = 0, tail = 0;
299 	bool processed = false;
300 	struct sk_buff *skb;
301 
302 	channel = pipe->channel;
303 
304 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
305 					 &tail);
306 	if (pipe->old_tail != tail) {
307 		if (pipe->old_tail < tail)
308 			cnt = tail - pipe->old_tail;
309 		else
310 			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
311 	}
312 
313 	processed_td_cnt = cnt;
314 
315 	/* Seek for pipes with pending DL data. */
316 	while (cnt--) {
317 		skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
318 
319 		/* Analyze the packet type and distribute it. */
320 		ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
321 	}
322 
323 	/* try to allocate new empty DL SKbs from head..tail - 1*/
324 	while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
325 		processed = true;
326 
327 	if (processed && !ipc_imem_check_wwan_ips(channel)) {
328 		/* Force HP update for non IP channels */
329 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
330 					      IPC_HP_DL_PROCESS);
331 		processed = false;
332 
333 		/* If Fast Update timer is already running then stop */
334 		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
335 	}
336 
337 	/* Any control channel process will get immediate HP update.
338 	 * Start Fast update timer only for IP channel if all the TDs were
339 	 * used in last process.
340 	 */
341 	if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
342 		ipc_imem->hrtimer_period =
343 		ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
344 		hrtimer_start(&ipc_imem->fast_update_timer,
345 			      ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
346 	}
347 
348 	if (ipc_imem->app_notify_dl_pend)
349 		complete(&ipc_imem->dl_pend_sem);
350 }
351 
352 /* process open uplink pipe */
353 static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
354 				     struct ipc_pipe *pipe)
355 {
356 	struct ipc_mem_channel *channel;
357 	u32 tail = 0, head = 0;
358 	struct sk_buff *skb;
359 	s32 cnt = 0;
360 
361 	channel = pipe->channel;
362 
363 	/* Get the internal phase. */
364 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
365 					 &tail);
366 
367 	if (pipe->old_tail != tail) {
368 		if (pipe->old_tail < tail)
369 			cnt = tail - pipe->old_tail;
370 		else
371 			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
372 	}
373 
374 	/* Free UL buffers. */
375 	while (cnt--) {
376 		skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
377 
378 		if (!skb)
379 			continue;
380 
381 		/* If the user app was suspended in uplink direction - blocking
382 		 * write, resume it.
383 		 */
384 		if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
385 			complete(&channel->ul_sem);
386 
387 		/* Free the skbuf element. */
388 		if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
389 			if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
390 				ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
391 			else
392 				dev_err(ipc_imem->dev,
393 					"OP Type is UL_MUX, unknown if_id %d",
394 					channel->if_id);
395 		} else {
396 			ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
397 		}
398 	}
399 
400 	/* Trace channel stats for IP UL pipe. */
401 	if (ipc_imem_check_wwan_ips(pipe->channel))
402 		ipc_mux_check_n_restart_tx(ipc_imem->mux);
403 
404 	if (ipc_imem->app_notify_ul_pend)
405 		complete(&ipc_imem->ul_pend_sem);
406 }
407 
408 /* Executes the irq. */
409 static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
410 {
411 	struct ipc_mem_channel *channel;
412 
413 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
414 	ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
415 	complete(&channel->ul_sem);
416 }
417 
418 /* Execute the UL bundle timer actions, generating the doorbell irq. */
419 static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
420 					  void *msg, size_t size)
421 {
422 	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
423 				      IPC_HP_TD_UPD_TMR);
424 	return 0;
425 }
426 
427 /* Consider link power management in the runtime phase. */
428 static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
429 {
430 	    /* link will go down, Test pending UL packets.*/
431 	if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
432 	    hrtimer_active(&ipc_imem->tdupdate_timer)) {
433 		/* Generate the doorbell irq. */
434 		ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
435 		/* Stop the TD update timer. */
436 		ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
437 		/* Stop the fast update timer. */
438 		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
439 	}
440 }
441 
442 /* Execute startup timer and wait for delayed start (e.g. NAND) */
443 static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
444 					void *msg, size_t size)
445 {
446 	/* Update & check the current operation phase. */
447 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
448 		return -EIO;
449 
450 	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
451 	    IPC_MEM_DEVICE_IPC_UNINIT) {
452 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
453 
454 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
455 				  IPC_MEM_DEVICE_IPC_INIT);
456 
457 		ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
458 		/* reduce period to 100 ms to check for mmio init state */
459 		if (!hrtimer_active(&ipc_imem->startup_timer))
460 			hrtimer_start(&ipc_imem->startup_timer,
461 				      ipc_imem->hrtimer_period,
462 				      HRTIMER_MODE_REL);
463 	} else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
464 		   IPC_MEM_DEVICE_IPC_INIT) {
465 		/* Startup complete  - disable timer */
466 		ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
467 
468 		/* Prepare the MMIO space */
469 		ipc_mmio_config(ipc_imem->mmio);
470 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
471 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
472 				  IPC_MEM_DEVICE_IPC_RUNNING);
473 	}
474 
475 	return 0;
476 }
477 
478 static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
479 {
480 	enum hrtimer_restart result = HRTIMER_NORESTART;
481 	struct iosm_imem *ipc_imem =
482 		container_of(hr_timer, struct iosm_imem, startup_timer);
483 
484 	if (ktime_to_ns(ipc_imem->hrtimer_period)) {
485 		hrtimer_forward_now(&ipc_imem->startup_timer,
486 				    ipc_imem->hrtimer_period);
487 		result = HRTIMER_RESTART;
488 	}
489 
490 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
491 				 NULL, 0, false);
492 	return result;
493 }
494 
495 /* Get the CP execution stage */
496 static enum ipc_mem_exec_stage
497 ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
498 {
499 	return (ipc_imem->phase == IPC_P_RUN &&
500 		ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
501 		       ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
502 		       ipc_mmio_get_exec_stage(ipc_imem->mmio);
503 }
504 
505 /* Callback to send the modem ready uevent */
506 static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
507 				    void *msg, size_t size)
508 {
509 	enum ipc_mem_exec_stage exec_stage =
510 		ipc_imem_get_exec_stage_buffered(ipc_imem);
511 
512 	if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
513 		ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
514 
515 	return 0;
516 }
517 
518 /* This function is executed in a task context via an ipc_worker object,
519  * as the creation or removal of device can't be done from tasklet.
520  */
521 static void ipc_imem_run_state_worker(struct work_struct *instance)
522 {
523 	struct ipc_chnl_cfg chnl_cfg_port = { 0 };
524 	struct ipc_mux_config mux_cfg;
525 	struct iosm_imem *ipc_imem;
526 	u8 ctrl_chl_idx = 0;
527 
528 	ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
529 
530 	if (ipc_imem->phase != IPC_P_RUN) {
531 		dev_err(ipc_imem->dev,
532 			"Modem link down. Exit run state worker.");
533 		return;
534 	}
535 
536 	if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
537 		ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
538 
539 	ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
540 	if (ipc_imem->mux)
541 		ipc_imem->mux->wwan = ipc_imem->wwan;
542 
543 	while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
544 		if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
545 			ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
546 			if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
547 				ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
548 						      chnl_cfg_port,
549 						      IRQ_MOD_OFF);
550 				ipc_imem->ipc_port[ctrl_chl_idx] =
551 					ipc_port_init(ipc_imem, chnl_cfg_port);
552 			}
553 		}
554 		ctrl_chl_idx++;
555 	}
556 
557 	ipc_imem->trace = ipc_imem_trace_channel_init(ipc_imem);
558 	if (!ipc_imem->trace) {
559 		dev_err(ipc_imem->dev, "trace channel init failed");
560 		return;
561 	}
562 
563 	ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
564 				 false);
565 
566 	/* Complete all memory stores before setting bit */
567 	smp_mb__before_atomic();
568 
569 	set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
570 
571 	/* Complete all memory stores after setting bit */
572 	smp_mb__after_atomic();
573 }
574 
575 static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
576 {
577 	enum ipc_mem_device_ipc_state curr_ipc_status;
578 	enum ipc_phase old_phase, phase;
579 	bool retry_allocation = false;
580 	bool ul_pending = false;
581 	int i;
582 
583 	if (irq != IMEM_IRQ_DONT_CARE)
584 		ipc_imem->ev_irq_pending[irq] = false;
585 
586 	/* Get the internal phase. */
587 	old_phase = ipc_imem->phase;
588 
589 	if (old_phase == IPC_P_OFF_REQ) {
590 		dev_dbg(ipc_imem->dev,
591 			"[%s]: Ignoring MSI. Deinit sequence in progress!",
592 			ipc_imem_phase_get_string(old_phase));
593 		return;
594 	}
595 
596 	/* Update the phase controlled by CP. */
597 	phase = ipc_imem_phase_update(ipc_imem);
598 
599 	switch (phase) {
600 	case IPC_P_RUN:
601 		if (!ipc_imem->enter_runtime) {
602 			/* Excute the transition from flash/boot to runtime. */
603 			ipc_imem->enter_runtime = 1;
604 
605 			/* allow device to sleep, default value is
606 			 * IPC_HOST_SLEEP_ENTER_SLEEP
607 			 */
608 			ipc_imem_msg_send_device_sleep(ipc_imem,
609 						       ipc_imem->device_sleep);
610 
611 			ipc_imem_msg_send_feature_set(ipc_imem,
612 						      IPC_MEM_INBAND_CRASH_SIG,
613 						  true);
614 		}
615 
616 		curr_ipc_status =
617 			ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
618 
619 		/* check ipc_status change */
620 		if (ipc_imem->ipc_status != curr_ipc_status) {
621 			ipc_imem->ipc_status = curr_ipc_status;
622 
623 			if (ipc_imem->ipc_status ==
624 			    IPC_MEM_DEVICE_IPC_RUNNING) {
625 				schedule_work(&ipc_imem->run_state_worker);
626 			}
627 		}
628 
629 		/* Consider power management in the runtime phase. */
630 		ipc_imem_slp_control_exec(ipc_imem);
631 		break; /* Continue with skbuf processing. */
632 
633 		/* Unexpected phases. */
634 	case IPC_P_OFF:
635 	case IPC_P_OFF_REQ:
636 		dev_err(ipc_imem->dev, "confused phase %s",
637 			ipc_imem_phase_get_string(phase));
638 		return;
639 
640 	case IPC_P_PSI:
641 		if (old_phase != IPC_P_ROM)
642 			break;
643 
644 		fallthrough;
645 		/* On CP the PSI phase is already active. */
646 
647 	case IPC_P_ROM:
648 		/* Before CP ROM driver starts the PSI image, it sets
649 		 * the exit_code field on the doorbell scratchpad and
650 		 * triggers the irq.
651 		 */
652 		ipc_imem_rom_irq_exec(ipc_imem);
653 		return;
654 
655 	default:
656 		break;
657 	}
658 
659 	/* process message ring */
660 	ipc_protocol_msg_process(ipc_imem, irq);
661 
662 	/* process all open pipes */
663 	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
664 		struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
665 		struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
666 
667 		if (dl_pipe->is_open &&
668 		    (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
669 			ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
670 
671 			if (dl_pipe->nr_of_queued_entries == 0)
672 				retry_allocation = true;
673 		}
674 
675 		if (ul_pipe->is_open)
676 			ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
677 	}
678 
679 	/* Try to generate new ADB or ADGH. */
680 	if (ipc_mux_ul_data_encode(ipc_imem->mux))
681 		ipc_imem_td_update_timer_start(ipc_imem);
682 
683 	/* Continue the send procedure with accumulated SIO or NETIF packets.
684 	 * Reset the debounce flags.
685 	 */
686 	ul_pending |= ipc_imem_ul_write_td(ipc_imem);
687 
688 	/* if UL data is pending restart TD update timer */
689 	if (ul_pending) {
690 		ipc_imem->hrtimer_period =
691 		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
692 		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
693 			hrtimer_start(&ipc_imem->tdupdate_timer,
694 				      ipc_imem->hrtimer_period,
695 				      HRTIMER_MODE_REL);
696 	}
697 
698 	/* If CP has executed the transition
699 	 * from IPC_INIT to IPC_RUNNING in the PSI
700 	 * phase, wake up the flash app to open the pipes.
701 	 */
702 	if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
703 	    ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
704 	    ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
705 						IPC_MEM_DEVICE_IPC_RUNNING) {
706 		complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
707 	}
708 
709 	/* Reset the expected CP state. */
710 	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
711 
712 	if (retry_allocation) {
713 		ipc_imem->hrtimer_period =
714 		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
715 		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
716 			hrtimer_start(&ipc_imem->td_alloc_timer,
717 				      ipc_imem->hrtimer_period,
718 				      HRTIMER_MODE_REL);
719 	}
720 }
721 
722 /* Callback by tasklet for handling interrupt events. */
723 static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
724 			      size_t size)
725 {
726 	ipc_imem_handle_irq(ipc_imem, arg);
727 
728 	return 0;
729 }
730 
731 void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
732 {
733 	/* start doorbell irq delay timer if UL is pending */
734 	if (ipc_imem_ul_write_td(ipc_imem))
735 		ipc_imem_td_update_timer_start(ipc_imem);
736 }
737 
738 /* Check the execution stage and update the AP phase */
739 static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
740 						  enum ipc_mem_exec_stage stage)
741 {
742 	switch (stage) {
743 	case IPC_MEM_EXEC_STAGE_BOOT:
744 		if (ipc_imem->phase != IPC_P_ROM) {
745 			/* Send this event only once */
746 			ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
747 		}
748 
749 		ipc_imem->phase = IPC_P_ROM;
750 		break;
751 
752 	case IPC_MEM_EXEC_STAGE_PSI:
753 		ipc_imem->phase = IPC_P_PSI;
754 		break;
755 
756 	case IPC_MEM_EXEC_STAGE_EBL:
757 		ipc_imem->phase = IPC_P_EBL;
758 		break;
759 
760 	case IPC_MEM_EXEC_STAGE_RUN:
761 		if (ipc_imem->phase != IPC_P_RUN &&
762 		    ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
763 			ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
764 		}
765 		ipc_imem->phase = IPC_P_RUN;
766 		break;
767 
768 	case IPC_MEM_EXEC_STAGE_CRASH:
769 		if (ipc_imem->phase != IPC_P_CRASH)
770 			ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
771 
772 		ipc_imem->phase = IPC_P_CRASH;
773 		break;
774 
775 	case IPC_MEM_EXEC_STAGE_CD_READY:
776 		if (ipc_imem->phase != IPC_P_CD_READY)
777 			ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
778 		ipc_imem->phase = IPC_P_CD_READY;
779 		break;
780 
781 	default:
782 		/* unknown exec stage:
783 		 * assume that link is down and send info to listeners
784 		 */
785 		ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
786 		break;
787 	}
788 
789 	return ipc_imem->phase;
790 }
791 
792 /* Send msg to device to open pipe */
793 static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
794 			       struct ipc_pipe *pipe)
795 {
796 	union ipc_msg_prep_args prep_args = {
797 		.pipe_open.pipe = pipe,
798 	};
799 
800 	if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
801 				  IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
802 		pipe->is_open = true;
803 
804 	return pipe->is_open;
805 }
806 
807 /* Allocates the TDs for the given pipe along with firing HP update DB. */
808 static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
809 				     void *msg, size_t size)
810 {
811 	struct ipc_pipe *dl_pipe = msg;
812 	bool processed = false;
813 	int i;
814 
815 	for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
816 		processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
817 
818 	/* Trigger the doorbell irq to inform CP that new downlink buffers are
819 	 * available.
820 	 */
821 	if (processed)
822 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
823 
824 	return 0;
825 }
826 
827 static enum hrtimer_restart
828 ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
829 {
830 	struct iosm_imem *ipc_imem =
831 		container_of(hr_timer, struct iosm_imem, tdupdate_timer);
832 
833 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
834 				 NULL, 0, false);
835 	return HRTIMER_NORESTART;
836 }
837 
838 /* Get the CP execution state and map it to the AP phase. */
839 enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
840 {
841 	enum ipc_mem_exec_stage exec_stage =
842 				ipc_imem_get_exec_stage_buffered(ipc_imem);
843 	/* If the CP stage is undef, return the internal precalculated phase. */
844 	return ipc_imem->phase == IPC_P_OFF_REQ ?
845 		       ipc_imem->phase :
846 		       ipc_imem_phase_update_check(ipc_imem, exec_stage);
847 }
848 
849 const char *ipc_imem_phase_get_string(enum ipc_phase phase)
850 {
851 	switch (phase) {
852 	case IPC_P_RUN:
853 		return "A-RUN";
854 
855 	case IPC_P_OFF:
856 		return "A-OFF";
857 
858 	case IPC_P_ROM:
859 		return "A-ROM";
860 
861 	case IPC_P_PSI:
862 		return "A-PSI";
863 
864 	case IPC_P_EBL:
865 		return "A-EBL";
866 
867 	case IPC_P_CRASH:
868 		return "A-CRASH";
869 
870 	case IPC_P_CD_READY:
871 		return "A-CD_READY";
872 
873 	case IPC_P_OFF_REQ:
874 		return "A-OFF_REQ";
875 
876 	default:
877 		return "A-???";
878 	}
879 }
880 
881 void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
882 {
883 	union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
884 
885 	pipe->is_open = false;
886 	ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
887 			      &prep_args);
888 
889 	ipc_imem_pipe_cleanup(ipc_imem, pipe);
890 }
891 
892 void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
893 {
894 	struct ipc_mem_channel *channel;
895 
896 	if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
897 		dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
898 		return;
899 	}
900 
901 	channel = &ipc_imem->channels[channel_id];
902 
903 	if (channel->state == IMEM_CHANNEL_FREE) {
904 		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
905 			channel_id, channel->state);
906 		return;
907 	}
908 
909 	/* Free only the channel id in the CP power off mode. */
910 	if (channel->state == IMEM_CHANNEL_RESERVED)
911 		/* Release only the channel id. */
912 		goto channel_free;
913 
914 	if (ipc_imem->phase == IPC_P_RUN) {
915 		ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
916 		ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
917 	}
918 
919 	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
920 	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
921 
922 channel_free:
923 	ipc_imem_channel_free(channel);
924 }
925 
926 struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
927 					      int channel_id, u32 db_id)
928 {
929 	struct ipc_mem_channel *channel;
930 
931 	if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
932 		dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
933 		return NULL;
934 	}
935 
936 	channel = &ipc_imem->channels[channel_id];
937 
938 	channel->state = IMEM_CHANNEL_ACTIVE;
939 
940 	if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
941 		goto ul_pipe_err;
942 
943 	if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
944 		goto dl_pipe_err;
945 
946 	/* Allocate the downlink buffers in tasklet context. */
947 	if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
948 				     &channel->dl_pipe, 0, false)) {
949 		dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
950 		goto task_failed;
951 	}
952 
953 	/* Active channel. */
954 	return channel;
955 task_failed:
956 	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
957 dl_pipe_err:
958 	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
959 ul_pipe_err:
960 	ipc_imem_channel_free(channel);
961 	return NULL;
962 }
963 
964 void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
965 {
966 	ipc_protocol_suspend(ipc_imem->ipc_protocol);
967 }
968 
969 void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
970 {
971 	ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
972 }
973 
974 void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
975 {
976 	enum ipc_mem_exec_stage stage;
977 
978 	if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
979 		stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
980 		ipc_imem_phase_update_check(ipc_imem, stage);
981 	}
982 }
983 
984 void ipc_imem_channel_free(struct ipc_mem_channel *channel)
985 {
986 	/* Reset dynamic channel elements. */
987 	channel->state = IMEM_CHANNEL_FREE;
988 }
989 
990 int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
991 			   enum ipc_ctype ctype)
992 {
993 	struct ipc_mem_channel *channel;
994 	int i;
995 
996 	/* Find channel of given type/index */
997 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
998 		channel = &ipc_imem->channels[i];
999 		if (channel->ctype == ctype && channel->index == index)
1000 			break;
1001 	}
1002 
1003 	if (i >= ipc_imem->nr_of_channels) {
1004 		dev_dbg(ipc_imem->dev,
1005 			"no channel definition for index=%d ctype=%d", index,
1006 			ctype);
1007 		return -ECHRNG;
1008 	}
1009 
1010 	if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
1011 		dev_dbg(ipc_imem->dev, "channel is in use");
1012 		return -EBUSY;
1013 	}
1014 
1015 	if (channel->ctype == IPC_CTYPE_WWAN &&
1016 	    index == IPC_MEM_MUX_IP_CH_IF_ID)
1017 		channel->if_id = index;
1018 
1019 	channel->channel_id = index;
1020 	channel->state = IMEM_CHANNEL_RESERVED;
1021 
1022 	return i;
1023 }
1024 
1025 void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
1026 			   struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1027 {
1028 	struct ipc_mem_channel *channel;
1029 
1030 	if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
1031 	    chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
1032 		dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
1033 			chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
1034 		return;
1035 	}
1036 
1037 	if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
1038 		dev_err(ipc_imem->dev, "too many channels");
1039 		return;
1040 	}
1041 
1042 	channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
1043 	channel->channel_id = ipc_imem->nr_of_channels;
1044 	channel->ctype = ctype;
1045 	channel->index = chnl_cfg.id;
1046 	channel->net_err_count = 0;
1047 	channel->state = IMEM_CHANNEL_FREE;
1048 	ipc_imem->nr_of_channels++;
1049 
1050 	ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
1051 				IRQ_MOD_OFF);
1052 
1053 	skb_queue_head_init(&channel->ul_list);
1054 
1055 	init_completion(&channel->ul_sem);
1056 }
1057 
1058 void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
1059 			     struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1060 {
1061 	struct ipc_mem_channel *channel;
1062 
1063 	if (id < 0 || id >= ipc_imem->nr_of_channels) {
1064 		dev_err(ipc_imem->dev, "invalid channel id %d", id);
1065 		return;
1066 	}
1067 
1068 	channel = &ipc_imem->channels[id];
1069 
1070 	if (channel->state != IMEM_CHANNEL_FREE &&
1071 	    channel->state != IMEM_CHANNEL_RESERVED) {
1072 		dev_err(ipc_imem->dev, "invalid channel state %d",
1073 			channel->state);
1074 		return;
1075 	}
1076 
1077 	channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
1078 	channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
1079 	channel->ul_pipe.is_open = false;
1080 	channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
1081 	channel->ul_pipe.channel = channel;
1082 	channel->ul_pipe.dir = IPC_MEM_DIR_UL;
1083 	channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1084 	channel->ul_pipe.irq_moderation = irq_moderation;
1085 	channel->ul_pipe.buf_size = 0;
1086 
1087 	channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
1088 	channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
1089 	channel->dl_pipe.is_open = false;
1090 	channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
1091 	channel->dl_pipe.channel = channel;
1092 	channel->dl_pipe.dir = IPC_MEM_DIR_DL;
1093 	channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1094 	channel->dl_pipe.irq_moderation = irq_moderation;
1095 	channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
1096 }
1097 
1098 static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
1099 {
1100 	int i;
1101 
1102 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1103 		struct ipc_mem_channel *channel;
1104 
1105 		channel = &ipc_imem->channels[i];
1106 
1107 		ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
1108 		ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
1109 
1110 		ipc_imem_channel_free(channel);
1111 	}
1112 }
1113 
1114 void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
1115 {
1116 	struct sk_buff *skb;
1117 
1118 	/* Force pipe to closed state also when not explicitly closed through
1119 	 * ipc_imem_pipe_close()
1120 	 */
1121 	pipe->is_open = false;
1122 
1123 	/* Empty the uplink skb accumulator. */
1124 	while ((skb = skb_dequeue(&pipe->channel->ul_list)))
1125 		ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
1126 
1127 	ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
1128 }
1129 
1130 /* Send IPC protocol uninit to the modem when Link is active. */
1131 static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
1132 {
1133 	int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
1134 	enum ipc_mem_device_ipc_state ipc_state;
1135 
1136 	/* When PCIe link is up set IPC_UNINIT
1137 	 * of the modem otherwise ignore it when PCIe link down happens.
1138 	 */
1139 	if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
1140 		/* set modem to UNINIT
1141 		 * (in case we want to reload the AP driver without resetting
1142 		 * the modem)
1143 		 */
1144 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
1145 				  IPC_MEM_DEVICE_IPC_UNINIT);
1146 		ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1147 
1148 		/* Wait for maximum 30ms to allow the Modem to uninitialize the
1149 		 * protocol.
1150 		 */
1151 		while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
1152 		       (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
1153 		       (timeout > 0)) {
1154 			usleep_range(1000, 1250);
1155 			timeout--;
1156 			ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1157 		}
1158 	}
1159 }
1160 
1161 void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
1162 {
1163 	ipc_imem->phase = IPC_P_OFF_REQ;
1164 
1165 	/* forward MDM_NOT_READY to listeners */
1166 	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
1167 
1168 	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1169 	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1170 	hrtimer_cancel(&ipc_imem->fast_update_timer);
1171 	hrtimer_cancel(&ipc_imem->startup_timer);
1172 
1173 	/* cancel the workqueue */
1174 	cancel_work_sync(&ipc_imem->run_state_worker);
1175 
1176 	if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
1177 		ipc_mux_deinit(ipc_imem->mux);
1178 		ipc_trace_deinit(ipc_imem->trace);
1179 		ipc_wwan_deinit(ipc_imem->wwan);
1180 		ipc_port_deinit(ipc_imem->ipc_port);
1181 	}
1182 
1183 	if (ipc_imem->ipc_devlink)
1184 		ipc_devlink_deinit(ipc_imem->ipc_devlink);
1185 
1186 	ipc_imem_device_ipc_uninit(ipc_imem);
1187 	ipc_imem_channel_reset(ipc_imem);
1188 
1189 	ipc_protocol_deinit(ipc_imem->ipc_protocol);
1190 	ipc_task_deinit(ipc_imem->ipc_task);
1191 
1192 	kfree(ipc_imem->ipc_task);
1193 	kfree(ipc_imem->mmio);
1194 
1195 	ipc_imem->phase = IPC_P_OFF;
1196 }
1197 
1198 /* After CP has unblocked the PCIe link, save the start address of the doorbell
1199  * scratchpad and prepare the shared memory region. If the flashing to RAM
1200  * procedure shall be executed, copy the chip information from the doorbell
1201  * scratchtpad to the application buffer and wake up the flash app.
1202  */
1203 static int ipc_imem_config(struct iosm_imem *ipc_imem)
1204 {
1205 	enum ipc_phase phase;
1206 
1207 	/* Initialize the semaphore for the blocking read UL/DL transfer. */
1208 	init_completion(&ipc_imem->ul_pend_sem);
1209 
1210 	init_completion(&ipc_imem->dl_pend_sem);
1211 
1212 	/* clear internal flags */
1213 	ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
1214 	ipc_imem->enter_runtime = 0;
1215 
1216 	phase = ipc_imem_phase_update(ipc_imem);
1217 
1218 	/* Either CP shall be in the power off or power on phase. */
1219 	switch (phase) {
1220 	case IPC_P_ROM:
1221 		ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
1222 		/* poll execution stage (for delayed start, e.g. NAND) */
1223 		if (!hrtimer_active(&ipc_imem->startup_timer))
1224 			hrtimer_start(&ipc_imem->startup_timer,
1225 				      ipc_imem->hrtimer_period,
1226 				      HRTIMER_MODE_REL);
1227 		return 0;
1228 
1229 	case IPC_P_PSI:
1230 	case IPC_P_EBL:
1231 	case IPC_P_RUN:
1232 		/* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
1233 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
1234 
1235 		/* Verify the exepected initial state. */
1236 		if (ipc_imem->ipc_requested_state ==
1237 		    ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
1238 			ipc_imem_ipc_init_check(ipc_imem);
1239 
1240 			return 0;
1241 		}
1242 		dev_err(ipc_imem->dev,
1243 			"ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
1244 			ipc_mmio_get_ipc_state(ipc_imem->mmio));
1245 		break;
1246 	case IPC_P_CRASH:
1247 	case IPC_P_CD_READY:
1248 		dev_dbg(ipc_imem->dev,
1249 			"Modem is in phase %d, reset Modem to collect CD",
1250 			phase);
1251 		return 0;
1252 	default:
1253 		dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
1254 		break;
1255 	}
1256 
1257 	complete(&ipc_imem->dl_pend_sem);
1258 	complete(&ipc_imem->ul_pend_sem);
1259 	ipc_imem->phase = IPC_P_OFF;
1260 	return -EIO;
1261 }
1262 
1263 /* Pass the dev ptr to the shared memory driver and request the entry points */
1264 struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
1265 				void __iomem *mmio, struct device *dev)
1266 {
1267 	struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
1268 	enum ipc_mem_exec_stage stage;
1269 
1270 	if (!ipc_imem)
1271 		return NULL;
1272 
1273 	/* Save the device address. */
1274 	ipc_imem->pcie = pcie;
1275 	ipc_imem->dev = dev;
1276 
1277 	ipc_imem->pci_device_id = device_id;
1278 
1279 	ipc_imem->ev_cdev_write_pending = false;
1280 	ipc_imem->cp_version = 0;
1281 	ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
1282 
1283 	/* Reset the max number of configured channels */
1284 	ipc_imem->nr_of_channels = 0;
1285 
1286 	/* allocate IPC MMIO */
1287 	ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
1288 	if (!ipc_imem->mmio) {
1289 		dev_err(ipc_imem->dev, "failed to initialize mmio region");
1290 		goto mmio_init_fail;
1291 	}
1292 
1293 	ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
1294 				     GFP_KERNEL);
1295 
1296 	/* Create tasklet for event handling*/
1297 	if (!ipc_imem->ipc_task)
1298 		goto ipc_task_fail;
1299 
1300 	if (ipc_task_init(ipc_imem->ipc_task))
1301 		goto ipc_task_init_fail;
1302 
1303 	ipc_imem->ipc_task->dev = ipc_imem->dev;
1304 
1305 	INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
1306 
1307 	ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
1308 
1309 	if (!ipc_imem->ipc_protocol)
1310 		goto protocol_init_fail;
1311 
1312 	/* The phase is set to power off. */
1313 	ipc_imem->phase = IPC_P_OFF;
1314 
1315 	hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
1316 		     HRTIMER_MODE_REL);
1317 	ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
1318 
1319 	hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
1320 		     HRTIMER_MODE_REL);
1321 	ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
1322 
1323 	hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
1324 		     HRTIMER_MODE_REL);
1325 	ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
1326 
1327 	hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
1328 		     HRTIMER_MODE_REL);
1329 	ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
1330 
1331 	if (ipc_imem_config(ipc_imem)) {
1332 		dev_err(ipc_imem->dev, "failed to initialize the imem");
1333 		goto imem_config_fail;
1334 	}
1335 
1336 	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1337 	if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
1338 		/* Alloc and Register devlink */
1339 		ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
1340 		if (!ipc_imem->ipc_devlink) {
1341 			dev_err(ipc_imem->dev, "Devlink register failed");
1342 			goto imem_config_fail;
1343 		}
1344 
1345 		if (ipc_flash_link_establish(ipc_imem))
1346 			goto devlink_channel_fail;
1347 	}
1348 	return ipc_imem;
1349 devlink_channel_fail:
1350 	ipc_devlink_deinit(ipc_imem->ipc_devlink);
1351 imem_config_fail:
1352 	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1353 	hrtimer_cancel(&ipc_imem->fast_update_timer);
1354 	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1355 	hrtimer_cancel(&ipc_imem->startup_timer);
1356 protocol_init_fail:
1357 	cancel_work_sync(&ipc_imem->run_state_worker);
1358 	ipc_task_deinit(ipc_imem->ipc_task);
1359 ipc_task_init_fail:
1360 	kfree(ipc_imem->ipc_task);
1361 ipc_task_fail:
1362 	kfree(ipc_imem->mmio);
1363 mmio_init_fail:
1364 	kfree(ipc_imem);
1365 	return NULL;
1366 }
1367 
1368 void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
1369 {
1370 	/* Debounce IPC_EV_IRQ. */
1371 	if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
1372 		ipc_imem->ev_irq_pending[irq] = true;
1373 		ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
1374 					 NULL, 0, false);
1375 	}
1376 }
1377 
1378 void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
1379 {
1380 	ipc_imem->td_update_timer_suspended = suspend;
1381 }
1382 
1383 /* Verify the CP execution state, copy the chip info,
1384  * change the execution phase to ROM
1385  */
1386 static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
1387 						 int arg, void *msg,
1388 						 size_t msgsize)
1389 {
1390 	enum ipc_mem_exec_stage stage;
1391 	struct sk_buff *skb;
1392 	int rc = -EINVAL;
1393 	size_t size;
1394 
1395 	/* Test the CP execution state. */
1396 	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1397 	if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
1398 		dev_err(ipc_imem->dev,
1399 			"Execution_stage: expected BOOT, received = %X", stage);
1400 		goto trigger_chip_info_fail;
1401 	}
1402 	/* Allocate a new sk buf for the chip info. */
1403 	size = ipc_imem->mmio->chip_info_size;
1404 	if (size > IOSM_CHIP_INFO_SIZE_MAX)
1405 		goto trigger_chip_info_fail;
1406 
1407 	skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
1408 	if (!skb) {
1409 		dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
1410 		rc = -ENOMEM;
1411 		goto trigger_chip_info_fail;
1412 	}
1413 	/* Copy the chip info characters into the ipc_skb. */
1414 	ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
1415 	/* First change to the ROM boot phase. */
1416 	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
1417 	ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
1418 	ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
1419 	rc = 0;
1420 trigger_chip_info_fail:
1421 	return rc;
1422 }
1423 
1424 int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
1425 {
1426 	return ipc_task_queue_send_task(ipc_imem,
1427 					ipc_imem_devlink_trigger_chip_info_cb,
1428 					0, NULL, 0, true);
1429 }
1430