xref: /linux/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c (revision 41fb0cf1bced59c1fe178cf6cc9f716b5da9e40e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_devlink.h"
10 #include "iosm_ipc_imem.h"
11 #include "iosm_ipc_imem_ops.h"
12 #include "iosm_ipc_port.h"
13 #include "iosm_ipc_task_queue.h"
14 #include "iosm_ipc_trace.h"
15 
16 /* Open a packet data online channel between the network layer and CP. */
17 int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
18 {
19 	dev_dbg(ipc_imem->dev, "%s if id: %d",
20 		ipc_imem_phase_get_string(ipc_imem->phase), if_id);
21 
22 	/* The network interface is only supported in the runtime phase. */
23 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
24 		dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
25 			ipc_imem_phase_get_string(ipc_imem->phase));
26 		return -EIO;
27 	}
28 
29 	return ipc_mux_open_session(ipc_imem->mux, if_id);
30 }
31 
32 /* Release a net link to CP. */
33 void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
34 			     int channel_id)
35 {
36 	if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
37 	    if_id <= IP_MUX_SESSION_END)
38 		ipc_mux_close_session(ipc_imem->mux, if_id);
39 }
40 
41 /* Tasklet call to do uplink transfer. */
42 static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
43 				  void *msg, size_t size)
44 {
45 	ipc_imem->ev_cdev_write_pending = false;
46 	ipc_imem_ul_send(ipc_imem);
47 
48 	return 0;
49 }
50 
51 /* Through tasklet to do sio write. */
52 static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
53 {
54 	if (ipc_imem->ev_cdev_write_pending)
55 		return -1;
56 
57 	ipc_imem->ev_cdev_write_pending = true;
58 
59 	return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
60 					NULL, 0, false);
61 }
62 
63 /* Function for transfer UL data */
64 int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
65 			       int if_id, int channel_id, struct sk_buff *skb)
66 {
67 	int ret = -EINVAL;
68 
69 	if (!ipc_imem || channel_id < 0)
70 		goto out;
71 
72 	/* Is CP Running? */
73 	if (ipc_imem->phase != IPC_P_RUN) {
74 		dev_dbg(ipc_imem->dev, "phase %s transmit",
75 			ipc_imem_phase_get_string(ipc_imem->phase));
76 		ret = -EIO;
77 		goto out;
78 	}
79 
80 	/* Route the UL packet through IP MUX Layer */
81 	ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
82 out:
83 	return ret;
84 }
85 
86 /* Initialize wwan channel */
87 void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
88 				enum ipc_mux_protocol mux_type)
89 {
90 	struct ipc_chnl_cfg chnl_cfg = { 0 };
91 
92 	ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
93 
94 	/* If modem version is invalid (0xffffffff), do not initialize WWAN. */
95 	if (ipc_imem->cp_version == -1) {
96 		dev_err(ipc_imem->dev, "invalid CP version");
97 		return;
98 	}
99 
100 	ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
101 	ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
102 			      IRQ_MOD_OFF);
103 
104 	/* WWAN registration. */
105 	ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
106 	if (!ipc_imem->wwan)
107 		dev_err(ipc_imem->dev,
108 			"failed to register the ipc_wwan interfaces");
109 }
110 
111 /**
112  * ipc_imem_trace_channel_init - Initializes trace channel.
113  * @ipc_imem:          Pointer to iosm_imem struct.
114  *
115  * Returns: Pointer to trace instance on success else NULL
116  */
117 struct iosm_trace *ipc_imem_trace_channel_init(struct iosm_imem *ipc_imem)
118 {
119 	struct ipc_chnl_cfg chnl_cfg = { 0 };
120 
121 	ipc_chnl_cfg_get(&chnl_cfg, IPC_MEM_CTRL_CHL_ID_3);
122 	ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL, chnl_cfg,
123 			      IRQ_MOD_OFF);
124 
125 	return ipc_trace_init(ipc_imem);
126 }
127 
128 /* Map SKB to DMA for transfer */
129 static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
130 				   struct sk_buff *skb)
131 {
132 	struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
133 	char *buf = skb->data;
134 	int len = skb->len;
135 	dma_addr_t mapping;
136 	int ret;
137 
138 	ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
139 
140 	if (ret)
141 		goto err;
142 
143 	BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
144 
145 	IPC_CB(skb)->mapping = mapping;
146 	IPC_CB(skb)->direction = DMA_TO_DEVICE;
147 	IPC_CB(skb)->len = len;
148 	IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
149 
150 err:
151 	return ret;
152 }
153 
154 /* return true if channel is ready for use */
155 static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
156 				       struct ipc_mem_channel *channel)
157 {
158 	enum ipc_phase phase;
159 
160 	/* Update the current operation phase. */
161 	phase = ipc_imem->phase;
162 
163 	/* Select the operation depending on the execution stage. */
164 	switch (phase) {
165 	case IPC_P_RUN:
166 	case IPC_P_PSI:
167 	case IPC_P_EBL:
168 		break;
169 
170 	case IPC_P_ROM:
171 		/* Prepare the PSI image for the CP ROM driver and
172 		 * suspend the flash app.
173 		 */
174 		if (channel->state != IMEM_CHANNEL_RESERVED) {
175 			dev_err(ipc_imem->dev,
176 				"ch[%d]:invalid channel state %d,expected %d",
177 				channel->channel_id, channel->state,
178 				IMEM_CHANNEL_RESERVED);
179 			goto channel_unavailable;
180 		}
181 		goto channel_available;
182 
183 	default:
184 		/* Ignore uplink actions in all other phases. */
185 		dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
186 			channel->channel_id, phase);
187 		goto channel_unavailable;
188 	}
189 	/* Check the full availability of the channel. */
190 	if (channel->state != IMEM_CHANNEL_ACTIVE) {
191 		dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
192 			channel->channel_id, channel->state);
193 		goto channel_unavailable;
194 	}
195 
196 channel_available:
197 	return true;
198 
199 channel_unavailable:
200 	return false;
201 }
202 
203 /**
204  * ipc_imem_sys_port_close - Release a sio link to CP.
205  * @ipc_imem:          Imem instance.
206  * @channel:           Channel instance.
207  */
208 void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem,
209 			     struct ipc_mem_channel *channel)
210 {
211 	enum ipc_phase curr_phase;
212 	int status = 0;
213 	u32 tail = 0;
214 
215 	curr_phase = ipc_imem->phase;
216 
217 	/* If current phase is IPC_P_OFF or SIO ID is -ve then
218 	 * channel is already freed. Nothing to do.
219 	 */
220 	if (curr_phase == IPC_P_OFF) {
221 		dev_err(ipc_imem->dev,
222 			"nothing to do. Current Phase: %s",
223 			ipc_imem_phase_get_string(curr_phase));
224 		return;
225 	}
226 
227 	if (channel->state == IMEM_CHANNEL_FREE) {
228 		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
229 			channel->channel_id, channel->state);
230 		return;
231 	}
232 
233 	/* If there are any pending TDs then wait for Timeout/Completion before
234 	 * closing pipe.
235 	 */
236 	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
237 		ipc_imem->app_notify_ul_pend = 1;
238 
239 		/* Suspend the user app and wait a certain time for processing
240 		 * UL Data.
241 		 */
242 		status = wait_for_completion_interruptible_timeout
243 			 (&ipc_imem->ul_pend_sem,
244 			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
245 		if (status == 0) {
246 			dev_dbg(ipc_imem->dev,
247 				"Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
248 				channel->ul_pipe.pipe_nr,
249 				channel->ul_pipe.old_head,
250 				channel->ul_pipe.old_tail);
251 		}
252 
253 		ipc_imem->app_notify_ul_pend = 0;
254 	}
255 
256 	/* If there are any pending TDs then wait for Timeout/Completion before
257 	 * closing pipe.
258 	 */
259 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
260 					 &channel->dl_pipe, NULL, &tail);
261 
262 	if (tail != channel->dl_pipe.old_tail) {
263 		ipc_imem->app_notify_dl_pend = 1;
264 
265 		/* Suspend the user app and wait a certain time for processing
266 		 * DL Data.
267 		 */
268 		status = wait_for_completion_interruptible_timeout
269 			 (&ipc_imem->dl_pend_sem,
270 			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
271 		if (status == 0) {
272 			dev_dbg(ipc_imem->dev,
273 				"Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
274 				channel->dl_pipe.pipe_nr,
275 				channel->dl_pipe.old_head,
276 				channel->dl_pipe.old_tail);
277 		}
278 
279 		ipc_imem->app_notify_dl_pend = 0;
280 	}
281 
282 	/* Due to wait for completion in messages, there is a small window
283 	 * between closing the pipe and updating the channel is closed. In this
284 	 * small window there could be HP update from Host Driver. Hence update
285 	 * the channel state as CLOSING to aviod unnecessary interrupt
286 	 * towards CP.
287 	 */
288 	channel->state = IMEM_CHANNEL_CLOSING;
289 
290 	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
291 	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
292 
293 	ipc_imem_channel_free(channel);
294 }
295 
296 /* Open a PORT link to CP and return the channel */
297 struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
298 					       int chl_id, int hp_id)
299 {
300 	struct ipc_mem_channel *channel;
301 	int ch_id;
302 
303 	/* The PORT interface is only supported in the runtime phase. */
304 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
305 		dev_err(ipc_imem->dev, "PORT open refused, phase %s",
306 			ipc_imem_phase_get_string(ipc_imem->phase));
307 		return NULL;
308 	}
309 
310 	ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
311 
312 	if (ch_id < 0) {
313 		dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
314 		return NULL;
315 	}
316 
317 	channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
318 
319 	if (!channel) {
320 		dev_err(ipc_imem->dev, "PORT channel id open failed");
321 		return NULL;
322 	}
323 
324 	return channel;
325 }
326 
327 /* transfer skb to modem */
328 int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
329 {
330 	struct ipc_mem_channel *channel = ipc_cdev->channel;
331 	struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
332 	int ret = -EIO;
333 
334 	if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
335 	    ipc_imem->phase == IPC_P_OFF_REQ)
336 		goto out;
337 
338 	ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
339 
340 	if (ret)
341 		goto out;
342 
343 	/* Add skb to the uplink skbuf accumulator. */
344 	skb_queue_tail(&channel->ul_list, skb);
345 
346 	ret = ipc_imem_call_cdev_write(ipc_imem);
347 
348 	if (ret) {
349 		skb_dequeue_tail(&channel->ul_list);
350 		dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
351 			ipc_cdev->channel->channel_id);
352 	}
353 out:
354 	return ret;
355 }
356 
357 /* Open a SIO link to CP and return the channel instance */
358 struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem)
359 {
360 	struct ipc_mem_channel *channel;
361 	enum ipc_phase phase;
362 	int channel_id;
363 
364 	phase = ipc_imem_phase_update(ipc_imem);
365 	switch (phase) {
366 	case IPC_P_OFF:
367 	case IPC_P_ROM:
368 		/* Get a channel id as flash id and reserve it. */
369 		channel_id = ipc_imem_channel_alloc(ipc_imem,
370 						    IPC_MEM_CTRL_CHL_ID_7,
371 						    IPC_CTYPE_CTRL);
372 
373 		if (channel_id < 0) {
374 			dev_err(ipc_imem->dev,
375 				"reservation of a flash channel id failed");
376 			goto error;
377 		}
378 
379 		ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id;
380 		channel = &ipc_imem->channels[channel_id];
381 
382 		/* Enqueue chip info data to be read */
383 		if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) {
384 			dev_err(ipc_imem->dev, "Enqueue of chip info failed");
385 			channel->state = IMEM_CHANNEL_FREE;
386 			goto error;
387 		}
388 
389 		return channel;
390 
391 	case IPC_P_PSI:
392 	case IPC_P_EBL:
393 		ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
394 		if (ipc_imem->cp_version == -1) {
395 			dev_err(ipc_imem->dev, "invalid CP version");
396 			goto error;
397 		}
398 
399 		channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id;
400 		return ipc_imem_channel_open(ipc_imem, channel_id,
401 					     IPC_HP_CDEV_OPEN);
402 
403 	default:
404 		/* CP is in the wrong state (e.g. CRASH or CD_READY) */
405 		dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase);
406 	}
407 error:
408 	return NULL;
409 }
410 
411 /* Release a SIO channel link to CP. */
412 void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
413 {
414 	struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
415 	int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT;
416 	enum ipc_mem_exec_stage exec_stage;
417 	struct ipc_mem_channel *channel;
418 	int status = 0;
419 	u32 tail = 0;
420 
421 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
422 	/* Increase the total wait time to boot_check_timeout */
423 	do {
424 		exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
425 		if (exec_stage == IPC_MEM_EXEC_STAGE_RUN ||
426 		    exec_stage == IPC_MEM_EXEC_STAGE_PSI)
427 			break;
428 		msleep(20);
429 		boot_check_timeout -= 20;
430 	} while (boot_check_timeout > 0);
431 
432 	/* If there are any pending TDs then wait for Timeout/Completion before
433 	 * closing pipe.
434 	 */
435 	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
436 		status = wait_for_completion_interruptible_timeout
437 			(&ipc_imem->ul_pend_sem,
438 			 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
439 		if (status == 0) {
440 			dev_dbg(ipc_imem->dev,
441 				"Data Timeout on UL-Pipe:%d Head:%d Tail:%d",
442 				channel->ul_pipe.pipe_nr,
443 				channel->ul_pipe.old_head,
444 				channel->ul_pipe.old_tail);
445 		}
446 	}
447 
448 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
449 					 &channel->dl_pipe, NULL, &tail);
450 
451 	if (tail != channel->dl_pipe.old_tail) {
452 		status = wait_for_completion_interruptible_timeout
453 			(&ipc_imem->dl_pend_sem,
454 			 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
455 		if (status == 0) {
456 			dev_dbg(ipc_imem->dev,
457 				"Data Timeout on DL-Pipe:%d Head:%d Tail:%d",
458 				channel->dl_pipe.pipe_nr,
459 				channel->dl_pipe.old_head,
460 				channel->dl_pipe.old_tail);
461 		}
462 	}
463 
464 	/* Due to wait for completion in messages, there is a small window
465 	 * between closing the pipe and updating the channel is closed. In this
466 	 * small window there could be HP update from Host Driver. Hence update
467 	 * the channel state as CLOSING to aviod unnecessary interrupt
468 	 * towards CP.
469 	 */
470 	channel->state = IMEM_CHANNEL_CLOSING;
471 	/* Release the pipe resources */
472 	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
473 	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
474 }
475 
476 void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
477 				    struct sk_buff *skb)
478 {
479 	skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb);
480 	complete(&ipc_devlink->devlink_sio.read_sem);
481 }
482 
483 /* PSI transfer */
484 static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem,
485 				     struct ipc_mem_channel *channel,
486 				     unsigned char *buf, int count)
487 {
488 	int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT;
489 	enum ipc_mem_exec_stage exec_stage;
490 
491 	dma_addr_t mapping = 0;
492 	int ret;
493 
494 	ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping,
495 				DMA_TO_DEVICE);
496 	if (ret)
497 		goto pcie_addr_map_fail;
498 
499 	/* Save the PSI information for the CP ROM driver on the doorbell
500 	 * scratchpad.
501 	 */
502 	ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count);
503 	ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT);
504 
505 	ret = wait_for_completion_interruptible_timeout
506 		(&channel->ul_sem,
507 		 msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
508 
509 	if (ret <= 0) {
510 		dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d",
511 			ret);
512 		goto psi_transfer_fail;
513 	}
514 	/* If the PSI download fails, return the CP boot ROM exit code */
515 	if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT &&
516 	    ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) {
517 		ret = (-1) * ((int)ipc_imem->rom_exit_code);
518 		goto psi_transfer_fail;
519 	}
520 
521 	dev_dbg(ipc_imem->dev, "PSI image successfully downloaded");
522 
523 	/* Wait psi_start_timeout milliseconds until the CP PSI image is
524 	 * running and updates the execution_stage field with
525 	 * IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage.
526 	 */
527 	do {
528 		exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
529 
530 		if (exec_stage == IPC_MEM_EXEC_STAGE_PSI)
531 			break;
532 
533 		msleep(20);
534 		psi_start_timeout -= 20;
535 	} while (psi_start_timeout > 0);
536 
537 	if (exec_stage != IPC_MEM_EXEC_STAGE_PSI)
538 		goto psi_transfer_fail; /* Unknown status of CP PSI process. */
539 
540 	ipc_imem->phase = IPC_P_PSI;
541 
542 	/* Enter the PSI phase. */
543 	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage);
544 
545 	/* Request the RUNNING state from CP and wait until it was reached
546 	 * or timeout.
547 	 */
548 	ipc_imem_ipc_init_check(ipc_imem);
549 
550 	ret = wait_for_completion_interruptible_timeout
551 		(&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
552 	if (ret <= 0) {
553 		dev_err(ipc_imem->dev,
554 			"Failed PSI RUNNING state on CP, Error-%d", ret);
555 		goto psi_transfer_fail;
556 	}
557 
558 	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) !=
559 			IPC_MEM_DEVICE_IPC_RUNNING) {
560 		dev_err(ipc_imem->dev,
561 			"ch[%d] %s: unexpected CP IPC state %d, not RUNNING",
562 			channel->channel_id,
563 			ipc_imem_phase_get_string(ipc_imem->phase),
564 			ipc_mmio_get_ipc_state(ipc_imem->mmio));
565 
566 		goto psi_transfer_fail;
567 	}
568 
569 	/* Create the flash channel for the transfer of the images. */
570 	if (!ipc_imem_sys_devlink_open(ipc_imem)) {
571 		dev_err(ipc_imem->dev, "can't open flash_channel");
572 		goto psi_transfer_fail;
573 	}
574 
575 	ret = 0;
576 psi_transfer_fail:
577 	ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE);
578 pcie_addr_map_fail:
579 	return ret;
580 }
581 
582 int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
583 			       unsigned char *buf, int count)
584 {
585 	struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
586 	struct ipc_mem_channel *channel;
587 	struct sk_buff *skb;
588 	dma_addr_t mapping;
589 	int ret;
590 
591 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
592 
593 	/* In the ROM phase the PSI image is passed to CP about a specific
594 	 *  shared memory area and doorbell scratchpad directly.
595 	 */
596 	if (ipc_imem->phase == IPC_P_ROM) {
597 		ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count);
598 		/* If the PSI transfer fails then send crash
599 		 * Signature.
600 		 */
601 		if (ret > 0)
602 			ipc_imem_msg_send_feature_set(ipc_imem,
603 						      IPC_MEM_INBAND_CRASH_SIG,
604 						      false);
605 		goto out;
606 	}
607 
608 	/* Allocate skb memory for the uplink buffer. */
609 	skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping,
610 				 DMA_TO_DEVICE, 0);
611 	if (!skb) {
612 		ret = -ENOMEM;
613 		goto out;
614 	}
615 
616 	memcpy(skb_put(skb, count), buf, count);
617 
618 	IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
619 
620 	/* Add skb to the uplink skbuf accumulator. */
621 	skb_queue_tail(&channel->ul_list, skb);
622 
623 	/* Inform the IPC tasklet to pass uplink IP packets to CP. */
624 	if (!ipc_imem_call_cdev_write(ipc_imem)) {
625 		ret = wait_for_completion_interruptible(&channel->ul_sem);
626 
627 		if (ret < 0) {
628 			dev_err(ipc_imem->dev,
629 				"ch[%d] no CP confirmation, status = %d",
630 				channel->channel_id, ret);
631 			ipc_pcie_kfree_skb(ipc_devlink->pcie, skb);
632 			goto out;
633 		}
634 	}
635 	ret = 0;
636 out:
637 	return ret;
638 }
639 
640 int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
641 			      u32 bytes_to_read, u32 *bytes_read)
642 {
643 	struct sk_buff *skb = NULL;
644 	int rc = 0;
645 
646 	/* check skb is available in rx_list or wait for skb */
647 	devlink->devlink_sio.devlink_read_pend = 1;
648 	while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) {
649 		if (!wait_for_completion_interruptible_timeout
650 				(&devlink->devlink_sio.read_sem,
651 				 msecs_to_jiffies(IPC_READ_TIMEOUT))) {
652 			dev_err(devlink->dev, "Read timedout");
653 			rc =  -ETIMEDOUT;
654 			goto devlink_read_fail;
655 		}
656 	}
657 	devlink->devlink_sio.devlink_read_pend = 0;
658 	if (bytes_to_read < skb->len) {
659 		dev_err(devlink->dev, "Invalid size,expected len %d", skb->len);
660 		rc = -EINVAL;
661 		goto devlink_read_fail;
662 	}
663 	*bytes_read = skb->len;
664 	memcpy(data, skb->data, skb->len);
665 
666 devlink_read_fail:
667 	dev_kfree_skb(skb);
668 	return rc;
669 }
670