xref: /linux/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_devlink.h"
10 #include "iosm_ipc_imem.h"
11 #include "iosm_ipc_imem_ops.h"
12 #include "iosm_ipc_port.h"
13 #include "iosm_ipc_task_queue.h"
14 
15 /* Open a packet data online channel between the network layer and CP. */
16 int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
17 {
18 	dev_dbg(ipc_imem->dev, "%s if id: %d",
19 		ipc_imem_phase_get_string(ipc_imem->phase), if_id);
20 
21 	/* The network interface is only supported in the runtime phase. */
22 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
23 		dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
24 			ipc_imem_phase_get_string(ipc_imem->phase));
25 		return -EIO;
26 	}
27 
28 	return ipc_mux_open_session(ipc_imem->mux, if_id);
29 }
30 
31 /* Release a net link to CP. */
32 void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
33 			     int channel_id)
34 {
35 	if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
36 	    if_id <= IP_MUX_SESSION_END)
37 		ipc_mux_close_session(ipc_imem->mux, if_id);
38 }
39 
40 /* Tasklet call to do uplink transfer. */
41 static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
42 				  void *msg, size_t size)
43 {
44 	ipc_imem_ul_send(ipc_imem);
45 
46 	return 0;
47 }
48 
49 /* Through tasklet to do sio write. */
50 static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
51 {
52 	return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
53 					NULL, 0, false);
54 }
55 
56 /* Function for transfer UL data */
57 int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
58 			       int if_id, int channel_id, struct sk_buff *skb)
59 {
60 	int ret = -EINVAL;
61 
62 	if (!ipc_imem || channel_id < 0)
63 		goto out;
64 
65 	/* Is CP Running? */
66 	if (ipc_imem->phase != IPC_P_RUN) {
67 		dev_dbg(ipc_imem->dev, "phase %s transmit",
68 			ipc_imem_phase_get_string(ipc_imem->phase));
69 		ret = -EIO;
70 		goto out;
71 	}
72 
73 	/* Route the UL packet through IP MUX Layer */
74 	ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
75 out:
76 	return ret;
77 }
78 
79 /* Initialize wwan channel */
80 int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
81 			       enum ipc_mux_protocol mux_type)
82 {
83 	struct ipc_chnl_cfg chnl_cfg = { 0 };
84 
85 	ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
86 
87 	/* If modem version is invalid (0xffffffff), do not initialize WWAN. */
88 	if (ipc_imem->cp_version == -1) {
89 		dev_err(ipc_imem->dev, "invalid CP version");
90 		return -EIO;
91 	}
92 
93 	ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
94 
95 	if (ipc_imem->mmio->mux_protocol == MUX_AGGREGATION &&
96 	    ipc_imem->nr_of_channels == IPC_MEM_IP_CHL_ID_0) {
97 		chnl_cfg.ul_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
98 		chnl_cfg.dl_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_DL;
99 		chnl_cfg.dl_buf_size = IPC_MEM_MAX_ADB_BUF_SIZE;
100 	}
101 
102 	ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
103 			      IRQ_MOD_OFF);
104 
105 	/* WWAN registration. */
106 	ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
107 	if (!ipc_imem->wwan) {
108 		dev_err(ipc_imem->dev,
109 			"failed to register the ipc_wwan interfaces");
110 		return -ENOMEM;
111 	}
112 
113 	return 0;
114 }
115 
116 /* Map SKB to DMA for transfer */
117 static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
118 				   struct sk_buff *skb)
119 {
120 	struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
121 	char *buf = skb->data;
122 	int len = skb->len;
123 	dma_addr_t mapping;
124 	int ret;
125 
126 	ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
127 
128 	if (ret)
129 		goto err;
130 
131 	BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
132 
133 	IPC_CB(skb)->mapping = mapping;
134 	IPC_CB(skb)->direction = DMA_TO_DEVICE;
135 	IPC_CB(skb)->len = len;
136 	IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
137 
138 err:
139 	return ret;
140 }
141 
142 /* return true if channel is ready for use */
143 static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
144 				       struct ipc_mem_channel *channel)
145 {
146 	enum ipc_phase phase;
147 
148 	/* Update the current operation phase. */
149 	phase = ipc_imem->phase;
150 
151 	/* Select the operation depending on the execution stage. */
152 	switch (phase) {
153 	case IPC_P_RUN:
154 	case IPC_P_PSI:
155 	case IPC_P_EBL:
156 		break;
157 
158 	case IPC_P_ROM:
159 		/* Prepare the PSI image for the CP ROM driver and
160 		 * suspend the flash app.
161 		 */
162 		if (channel->state != IMEM_CHANNEL_RESERVED) {
163 			dev_err(ipc_imem->dev,
164 				"ch[%d]:invalid channel state %d,expected %d",
165 				channel->channel_id, channel->state,
166 				IMEM_CHANNEL_RESERVED);
167 			goto channel_unavailable;
168 		}
169 		goto channel_available;
170 
171 	default:
172 		/* Ignore uplink actions in all other phases. */
173 		dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
174 			channel->channel_id, phase);
175 		goto channel_unavailable;
176 	}
177 	/* Check the full availability of the channel. */
178 	if (channel->state != IMEM_CHANNEL_ACTIVE) {
179 		dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
180 			channel->channel_id, channel->state);
181 		goto channel_unavailable;
182 	}
183 
184 channel_available:
185 	return true;
186 
187 channel_unavailable:
188 	return false;
189 }
190 
191 /**
192  * ipc_imem_sys_port_close - Release a sio link to CP.
193  * @ipc_imem:          Imem instance.
194  * @channel:           Channel instance.
195  */
196 void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem,
197 			     struct ipc_mem_channel *channel)
198 {
199 	enum ipc_phase curr_phase;
200 	int status = 0;
201 	u32 tail = 0;
202 
203 	curr_phase = ipc_imem->phase;
204 
205 	/* If current phase is IPC_P_OFF or SIO ID is -ve then
206 	 * channel is already freed. Nothing to do.
207 	 */
208 	if (curr_phase == IPC_P_OFF) {
209 		dev_err(ipc_imem->dev,
210 			"nothing to do. Current Phase: %s",
211 			ipc_imem_phase_get_string(curr_phase));
212 		return;
213 	}
214 
215 	if (channel->state == IMEM_CHANNEL_FREE) {
216 		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
217 			channel->channel_id, channel->state);
218 		return;
219 	}
220 
221 	/* If there are any pending TDs then wait for Timeout/Completion before
222 	 * closing pipe.
223 	 */
224 	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
225 		ipc_imem->app_notify_ul_pend = 1;
226 
227 		/* Suspend the user app and wait a certain time for processing
228 		 * UL Data.
229 		 */
230 		status = wait_for_completion_interruptible_timeout
231 			 (&ipc_imem->ul_pend_sem,
232 			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
233 		if (status == 0) {
234 			dev_dbg(ipc_imem->dev,
235 				"Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
236 				channel->ul_pipe.pipe_nr,
237 				channel->ul_pipe.old_head,
238 				channel->ul_pipe.old_tail);
239 		}
240 
241 		ipc_imem->app_notify_ul_pend = 0;
242 	}
243 
244 	/* If there are any pending TDs then wait for Timeout/Completion before
245 	 * closing pipe.
246 	 */
247 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
248 					 &channel->dl_pipe, NULL, &tail);
249 
250 	if (tail != channel->dl_pipe.old_tail) {
251 		ipc_imem->app_notify_dl_pend = 1;
252 
253 		/* Suspend the user app and wait a certain time for processing
254 		 * DL Data.
255 		 */
256 		status = wait_for_completion_interruptible_timeout
257 			 (&ipc_imem->dl_pend_sem,
258 			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
259 		if (status == 0) {
260 			dev_dbg(ipc_imem->dev,
261 				"Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
262 				channel->dl_pipe.pipe_nr,
263 				channel->dl_pipe.old_head,
264 				channel->dl_pipe.old_tail);
265 		}
266 
267 		ipc_imem->app_notify_dl_pend = 0;
268 	}
269 
270 	/* Due to wait for completion in messages, there is a small window
271 	 * between closing the pipe and updating the channel is closed. In this
272 	 * small window there could be HP update from Host Driver. Hence update
273 	 * the channel state as CLOSING to aviod unnecessary interrupt
274 	 * towards CP.
275 	 */
276 	channel->state = IMEM_CHANNEL_CLOSING;
277 
278 	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
279 	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
280 
281 	ipc_imem_channel_free(channel);
282 }
283 
284 /* Open a PORT link to CP and return the channel */
285 struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
286 					       int chl_id, int hp_id)
287 {
288 	struct ipc_mem_channel *channel;
289 	int ch_id;
290 
291 	/* The PORT interface is only supported in the runtime phase. */
292 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
293 		dev_err(ipc_imem->dev, "PORT open refused, phase %s",
294 			ipc_imem_phase_get_string(ipc_imem->phase));
295 		return NULL;
296 	}
297 
298 	ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
299 
300 	if (ch_id < 0) {
301 		dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
302 		return NULL;
303 	}
304 
305 	channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
306 
307 	if (!channel) {
308 		dev_err(ipc_imem->dev, "PORT channel id open failed");
309 		return NULL;
310 	}
311 
312 	return channel;
313 }
314 
315 /* transfer skb to modem */
316 int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
317 {
318 	struct ipc_mem_channel *channel = ipc_cdev->channel;
319 	struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
320 	int ret = -EIO;
321 
322 	if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
323 	    ipc_imem->phase == IPC_P_OFF_REQ)
324 		goto out;
325 
326 	ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
327 
328 	if (ret)
329 		goto out;
330 
331 	/* Add skb to the uplink skbuf accumulator. */
332 	skb_queue_tail(&channel->ul_list, skb);
333 
334 	ret = ipc_imem_call_cdev_write(ipc_imem);
335 
336 	if (ret) {
337 		skb_dequeue_tail(&channel->ul_list);
338 		dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
339 			ipc_cdev->channel->channel_id);
340 	}
341 out:
342 	return ret;
343 }
344 
345 /* Open a SIO link to CP and return the channel instance */
346 struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem)
347 {
348 	struct ipc_mem_channel *channel;
349 	enum ipc_phase phase;
350 	int channel_id;
351 
352 	phase = ipc_imem_phase_update(ipc_imem);
353 	switch (phase) {
354 	case IPC_P_OFF:
355 	case IPC_P_ROM:
356 		/* Get a channel id as flash id and reserve it. */
357 		channel_id = ipc_imem_channel_alloc(ipc_imem,
358 						    IPC_MEM_CTRL_CHL_ID_7,
359 						    IPC_CTYPE_CTRL);
360 
361 		if (channel_id < 0) {
362 			dev_err(ipc_imem->dev,
363 				"reservation of a flash channel id failed");
364 			goto error;
365 		}
366 
367 		ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id;
368 		channel = &ipc_imem->channels[channel_id];
369 
370 		/* Enqueue chip info data to be read */
371 		if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) {
372 			dev_err(ipc_imem->dev, "Enqueue of chip info failed");
373 			channel->state = IMEM_CHANNEL_FREE;
374 			goto error;
375 		}
376 
377 		return channel;
378 
379 	case IPC_P_PSI:
380 	case IPC_P_EBL:
381 		ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
382 		if (ipc_imem->cp_version == -1) {
383 			dev_err(ipc_imem->dev, "invalid CP version");
384 			goto error;
385 		}
386 
387 		channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id;
388 		return ipc_imem_channel_open(ipc_imem, channel_id,
389 					     IPC_HP_CDEV_OPEN);
390 
391 	default:
392 		/* CP is in the wrong state (e.g. CRASH or CD_READY) */
393 		dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase);
394 	}
395 error:
396 	return NULL;
397 }
398 
399 /* Release a SIO channel link to CP. */
400 void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
401 {
402 	struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
403 	int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT;
404 	enum ipc_mem_exec_stage exec_stage;
405 	struct ipc_mem_channel *channel;
406 	int status = 0;
407 	u32 tail = 0;
408 
409 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
410 	/* Increase the total wait time to boot_check_timeout */
411 	do {
412 		exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
413 		if (exec_stage == IPC_MEM_EXEC_STAGE_RUN ||
414 		    exec_stage == IPC_MEM_EXEC_STAGE_PSI)
415 			break;
416 		msleep(20);
417 		boot_check_timeout -= 20;
418 	} while (boot_check_timeout > 0);
419 
420 	/* If there are any pending TDs then wait for Timeout/Completion before
421 	 * closing pipe.
422 	 */
423 	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
424 		status = wait_for_completion_interruptible_timeout
425 			(&ipc_imem->ul_pend_sem,
426 			 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
427 		if (status == 0) {
428 			dev_dbg(ipc_imem->dev,
429 				"Data Timeout on UL-Pipe:%d Head:%d Tail:%d",
430 				channel->ul_pipe.pipe_nr,
431 				channel->ul_pipe.old_head,
432 				channel->ul_pipe.old_tail);
433 		}
434 	}
435 
436 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
437 					 &channel->dl_pipe, NULL, &tail);
438 
439 	if (tail != channel->dl_pipe.old_tail) {
440 		status = wait_for_completion_interruptible_timeout
441 			(&ipc_imem->dl_pend_sem,
442 			 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
443 		if (status == 0) {
444 			dev_dbg(ipc_imem->dev,
445 				"Data Timeout on DL-Pipe:%d Head:%d Tail:%d",
446 				channel->dl_pipe.pipe_nr,
447 				channel->dl_pipe.old_head,
448 				channel->dl_pipe.old_tail);
449 		}
450 	}
451 
452 	/* Due to wait for completion in messages, there is a small window
453 	 * between closing the pipe and updating the channel is closed. In this
454 	 * small window there could be HP update from Host Driver. Hence update
455 	 * the channel state as CLOSING to aviod unnecessary interrupt
456 	 * towards CP.
457 	 */
458 	channel->state = IMEM_CHANNEL_CLOSING;
459 	/* Release the pipe resources */
460 	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
461 	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
462 	ipc_imem->nr_of_channels--;
463 }
464 
465 void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
466 				    struct sk_buff *skb)
467 {
468 	skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb);
469 	complete(&ipc_devlink->devlink_sio.read_sem);
470 }
471 
472 /* PSI transfer */
473 static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem,
474 				     struct ipc_mem_channel *channel,
475 				     unsigned char *buf, int count)
476 {
477 	int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT;
478 	enum ipc_mem_exec_stage exec_stage;
479 
480 	dma_addr_t mapping = 0;
481 	int ret;
482 
483 	ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping,
484 				DMA_TO_DEVICE);
485 	if (ret)
486 		goto pcie_addr_map_fail;
487 
488 	/* Save the PSI information for the CP ROM driver on the doorbell
489 	 * scratchpad.
490 	 */
491 	ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count);
492 	ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT);
493 
494 	ret = wait_for_completion_interruptible_timeout
495 		(&channel->ul_sem,
496 		 msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
497 
498 	if (ret <= 0) {
499 		dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d",
500 			ret);
501 		goto psi_transfer_fail;
502 	}
503 	/* If the PSI download fails, return the CP boot ROM exit code */
504 	if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT &&
505 	    ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) {
506 		ret = (-1) * ((int)ipc_imem->rom_exit_code);
507 		goto psi_transfer_fail;
508 	}
509 
510 	dev_dbg(ipc_imem->dev, "PSI image successfully downloaded");
511 
512 	/* Wait psi_start_timeout milliseconds until the CP PSI image is
513 	 * running and updates the execution_stage field with
514 	 * IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage.
515 	 */
516 	do {
517 		exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
518 
519 		if (exec_stage == IPC_MEM_EXEC_STAGE_PSI)
520 			break;
521 
522 		msleep(20);
523 		psi_start_timeout -= 20;
524 	} while (psi_start_timeout > 0);
525 
526 	if (exec_stage != IPC_MEM_EXEC_STAGE_PSI)
527 		goto psi_transfer_fail; /* Unknown status of CP PSI process. */
528 
529 	ipc_imem->phase = IPC_P_PSI;
530 
531 	/* Enter the PSI phase. */
532 	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage);
533 
534 	/* Request the RUNNING state from CP and wait until it was reached
535 	 * or timeout.
536 	 */
537 	ipc_imem_ipc_init_check(ipc_imem);
538 
539 	ret = wait_for_completion_interruptible_timeout
540 		(&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
541 	if (ret <= 0) {
542 		dev_err(ipc_imem->dev,
543 			"Failed PSI RUNNING state on CP, Error-%d", ret);
544 		goto psi_transfer_fail;
545 	}
546 
547 	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) !=
548 			IPC_MEM_DEVICE_IPC_RUNNING) {
549 		dev_err(ipc_imem->dev,
550 			"ch[%d] %s: unexpected CP IPC state %d, not RUNNING",
551 			channel->channel_id,
552 			ipc_imem_phase_get_string(ipc_imem->phase),
553 			ipc_mmio_get_ipc_state(ipc_imem->mmio));
554 
555 		goto psi_transfer_fail;
556 	}
557 
558 	/* Create the flash channel for the transfer of the images. */
559 	if (!ipc_imem_sys_devlink_open(ipc_imem)) {
560 		dev_err(ipc_imem->dev, "can't open flash_channel");
561 		goto psi_transfer_fail;
562 	}
563 
564 	ret = 0;
565 psi_transfer_fail:
566 	ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE);
567 pcie_addr_map_fail:
568 	return ret;
569 }
570 
571 int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
572 			       unsigned char *buf, int count)
573 {
574 	struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
575 	struct ipc_mem_channel *channel;
576 	struct sk_buff *skb;
577 	dma_addr_t mapping;
578 	int ret;
579 
580 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
581 
582 	/* In the ROM phase the PSI image is passed to CP about a specific
583 	 *  shared memory area and doorbell scratchpad directly.
584 	 */
585 	if (ipc_imem->phase == IPC_P_ROM) {
586 		ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count);
587 		/* If the PSI transfer fails then send crash
588 		 * Signature.
589 		 */
590 		if (ret > 0)
591 			ipc_imem_msg_send_feature_set(ipc_imem,
592 						      IPC_MEM_INBAND_CRASH_SIG,
593 						      false);
594 		goto out;
595 	}
596 
597 	/* Allocate skb memory for the uplink buffer. */
598 	skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping,
599 				 DMA_TO_DEVICE, 0);
600 	if (!skb) {
601 		ret = -ENOMEM;
602 		goto out;
603 	}
604 
605 	skb_put_data(skb, buf, count);
606 
607 	IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
608 
609 	/* Add skb to the uplink skbuf accumulator. */
610 	skb_queue_tail(&channel->ul_list, skb);
611 
612 	/* Inform the IPC tasklet to pass uplink IP packets to CP. */
613 	if (!ipc_imem_call_cdev_write(ipc_imem)) {
614 		ret = wait_for_completion_interruptible(&channel->ul_sem);
615 
616 		if (ret < 0) {
617 			dev_err(ipc_imem->dev,
618 				"ch[%d] no CP confirmation, status = %d",
619 				channel->channel_id, ret);
620 			ipc_pcie_kfree_skb(ipc_devlink->pcie, skb);
621 			goto out;
622 		}
623 	}
624 	ret = 0;
625 out:
626 	return ret;
627 }
628 
629 int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
630 			      u32 bytes_to_read, u32 *bytes_read)
631 {
632 	struct sk_buff *skb = NULL;
633 	int rc = 0;
634 
635 	/* check skb is available in rx_list or wait for skb */
636 	devlink->devlink_sio.devlink_read_pend = 1;
637 	while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) {
638 		if (!wait_for_completion_interruptible_timeout
639 				(&devlink->devlink_sio.read_sem,
640 				 msecs_to_jiffies(IPC_READ_TIMEOUT))) {
641 			dev_err(devlink->dev, "Read timedout");
642 			rc =  -ETIMEDOUT;
643 			goto devlink_read_fail;
644 		}
645 	}
646 	devlink->devlink_sio.devlink_read_pend = 0;
647 	if (bytes_to_read < skb->len) {
648 		dev_err(devlink->dev, "Invalid size,expected len %d", skb->len);
649 		rc = -EINVAL;
650 		goto devlink_read_fail;
651 	}
652 	*bytes_read = skb->len;
653 	memcpy(data, skb->data, skb->len);
654 
655 devlink_read_fail:
656 	dev_kfree_skb(skb);
657 	return rc;
658 }
659