xref: /linux/drivers/net/wwan/iosm/iosm_ipc_mux.c (revision 64b14a184e83eb62ea0615e31a409956049d40e7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include "iosm_ipc_mux_codec.h"
7 
8 /* At the begin of the runtime phase the IP MUX channel shall created. */
9 static int ipc_mux_channel_create(struct iosm_mux *ipc_mux)
10 {
11 	int channel_id;
12 
13 	channel_id = ipc_imem_channel_alloc(ipc_mux->imem, ipc_mux->instance_id,
14 					    IPC_CTYPE_WWAN);
15 
16 	if (channel_id < 0) {
17 		dev_err(ipc_mux->dev,
18 			"allocation of the MUX channel id failed");
19 		ipc_mux->state = MUX_S_ERROR;
20 		ipc_mux->event = MUX_E_NOT_APPLICABLE;
21 		goto no_channel;
22 	}
23 
24 	/* Establish the MUX channel in blocking mode. */
25 	ipc_mux->channel = ipc_imem_channel_open(ipc_mux->imem, channel_id,
26 						 IPC_HP_NET_CHANNEL_INIT);
27 
28 	if (!ipc_mux->channel) {
29 		dev_err(ipc_mux->dev, "ipc_imem_channel_open failed");
30 		ipc_mux->state = MUX_S_ERROR;
31 		ipc_mux->event = MUX_E_NOT_APPLICABLE;
32 		return -ENODEV; /* MUX channel is not available. */
33 	}
34 
35 	/* Define the MUX active state properties. */
36 	ipc_mux->state = MUX_S_ACTIVE;
37 	ipc_mux->event = MUX_E_NO_ORDERS;
38 
39 no_channel:
40 	return channel_id;
41 }
42 
43 /* Reset the session/if id state. */
44 static void ipc_mux_session_free(struct iosm_mux *ipc_mux, int if_id)
45 {
46 	struct mux_session *if_entry;
47 
48 	if_entry = &ipc_mux->session[if_id];
49 	/* Reset the session state. */
50 	if_entry->wwan = NULL;
51 }
52 
53 /* Create and send the session open command. */
54 static struct mux_cmd_open_session_resp *
55 ipc_mux_session_open_send(struct iosm_mux *ipc_mux, int if_id)
56 {
57 	struct mux_cmd_open_session_resp *open_session_resp;
58 	struct mux_acb *acb = &ipc_mux->acb;
59 	union mux_cmd_param param;
60 
61 	/* open_session commands to one ACB and start transmission. */
62 	param.open_session.flow_ctrl = 0;
63 	param.open_session.ipv4v6_hints = 0;
64 	param.open_session.reserved2 = 0;
65 	param.open_session.dl_head_pad_len = cpu_to_le32(IPC_MEM_DL_ETH_OFFSET);
66 
67 	/* Finish and transfer ACB. The user thread is suspended.
68 	 * It is a blocking function call, until CP responds or timeout.
69 	 */
70 	acb->wanted_response = MUX_CMD_OPEN_SESSION_RESP;
71 	if (ipc_mux_dl_acb_send_cmds(ipc_mux, MUX_CMD_OPEN_SESSION, if_id, 0,
72 				     &param, sizeof(param.open_session), true,
73 				 false) ||
74 	    acb->got_response != MUX_CMD_OPEN_SESSION_RESP) {
75 		dev_err(ipc_mux->dev, "if_id %d: OPEN_SESSION send failed",
76 			if_id);
77 		return NULL;
78 	}
79 
80 	open_session_resp = &ipc_mux->acb.got_param.open_session_resp;
81 	if (open_session_resp->response != cpu_to_le32(MUX_CMD_RESP_SUCCESS)) {
82 		dev_err(ipc_mux->dev,
83 			"if_id %d,session open failed,response=%d", if_id,
84 			open_session_resp->response);
85 		return NULL;
86 	}
87 
88 	return open_session_resp;
89 }
90 
91 /* Open the first IP session. */
92 static bool ipc_mux_session_open(struct iosm_mux *ipc_mux,
93 				 struct mux_session_open *session_open)
94 {
95 	struct mux_cmd_open_session_resp *open_session_resp;
96 	int if_id;
97 
98 	/* Search for a free session interface id. */
99 	if_id = le32_to_cpu(session_open->if_id);
100 	if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
101 		dev_err(ipc_mux->dev, "invalid interface id=%d", if_id);
102 		return false;
103 	}
104 
105 	/* Create and send the session open command.
106 	 * It is a blocking function call, until CP responds or timeout.
107 	 */
108 	open_session_resp = ipc_mux_session_open_send(ipc_mux, if_id);
109 	if (!open_session_resp) {
110 		ipc_mux_session_free(ipc_mux, if_id);
111 		session_open->if_id = cpu_to_le32(-1);
112 		return false;
113 	}
114 
115 	/* Initialize the uplink skb accumulator. */
116 	skb_queue_head_init(&ipc_mux->session[if_id].ul_list);
117 
118 	ipc_mux->session[if_id].dl_head_pad_len = IPC_MEM_DL_ETH_OFFSET;
119 	ipc_mux->session[if_id].ul_head_pad_len =
120 		le32_to_cpu(open_session_resp->ul_head_pad_len);
121 	ipc_mux->session[if_id].wwan = ipc_mux->wwan;
122 
123 	/* Reset the flow ctrl stats of the session */
124 	ipc_mux->session[if_id].flow_ctl_en_cnt = 0;
125 	ipc_mux->session[if_id].flow_ctl_dis_cnt = 0;
126 	ipc_mux->session[if_id].ul_flow_credits = 0;
127 	ipc_mux->session[if_id].net_tx_stop = false;
128 	ipc_mux->session[if_id].flow_ctl_mask = 0;
129 
130 	/* Save and return the assigned if id. */
131 	session_open->if_id = cpu_to_le32(if_id);
132 	ipc_mux->nr_sessions++;
133 
134 	return true;
135 }
136 
137 /* Free pending session UL packet. */
138 static void ipc_mux_session_reset(struct iosm_mux *ipc_mux, int if_id)
139 {
140 	/* Reset the session/if id state. */
141 	ipc_mux_session_free(ipc_mux, if_id);
142 
143 	/* Empty the uplink skb accumulator. */
144 	skb_queue_purge(&ipc_mux->session[if_id].ul_list);
145 }
146 
147 static void ipc_mux_session_close(struct iosm_mux *ipc_mux,
148 				  struct mux_session_close *msg)
149 {
150 	int if_id;
151 
152 	/* Copy the session interface id. */
153 	if_id = le32_to_cpu(msg->if_id);
154 
155 	if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
156 		dev_err(ipc_mux->dev, "invalid session id %d", if_id);
157 		return;
158 	}
159 
160 	/* Create and send the session close command.
161 	 * It is a blocking function call, until CP responds or timeout.
162 	 */
163 	if (ipc_mux_dl_acb_send_cmds(ipc_mux, MUX_CMD_CLOSE_SESSION, if_id, 0,
164 				     NULL, 0, true, false))
165 		dev_err(ipc_mux->dev, "if_id %d: CLOSE_SESSION send failed",
166 			if_id);
167 
168 	/* Reset the flow ctrl stats of the session */
169 	ipc_mux->session[if_id].flow_ctl_en_cnt = 0;
170 	ipc_mux->session[if_id].flow_ctl_dis_cnt = 0;
171 	ipc_mux->session[if_id].flow_ctl_mask = 0;
172 
173 	ipc_mux_session_reset(ipc_mux, if_id);
174 	ipc_mux->nr_sessions--;
175 }
176 
177 static void ipc_mux_channel_close(struct iosm_mux *ipc_mux,
178 				  struct mux_channel_close *channel_close_p)
179 {
180 	int i;
181 
182 	/* Free pending session UL packet. */
183 	for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++)
184 		if (ipc_mux->session[i].wwan)
185 			ipc_mux_session_reset(ipc_mux, i);
186 
187 	ipc_imem_channel_close(ipc_mux->imem, ipc_mux->channel_id);
188 
189 	/* Reset the MUX object. */
190 	ipc_mux->state = MUX_S_INACTIVE;
191 	ipc_mux->event = MUX_E_INACTIVE;
192 }
193 
194 /* CP has interrupted AP. If AP is in IP MUX mode, execute the pending ops. */
195 static int ipc_mux_schedule(struct iosm_mux *ipc_mux, union mux_msg *msg)
196 {
197 	enum mux_event order;
198 	bool success;
199 	int ret = -EIO;
200 
201 	if (!ipc_mux->initialized) {
202 		ret = -EAGAIN;
203 		goto out;
204 	}
205 
206 	order = msg->common.event;
207 
208 	switch (ipc_mux->state) {
209 	case MUX_S_INACTIVE:
210 		if (order != MUX_E_MUX_SESSION_OPEN)
211 			goto out; /* Wait for the request to open a session */
212 
213 		if (ipc_mux->event == MUX_E_INACTIVE)
214 			/* Establish the MUX channel and the new state. */
215 			ipc_mux->channel_id = ipc_mux_channel_create(ipc_mux);
216 
217 		if (ipc_mux->state != MUX_S_ACTIVE) {
218 			ret = ipc_mux->channel_id; /* Missing the MUX channel */
219 			goto out;
220 		}
221 
222 		/* Disable the TD update timer and open the first IP session. */
223 		ipc_imem_td_update_timer_suspend(ipc_mux->imem, true);
224 		ipc_mux->event = MUX_E_MUX_SESSION_OPEN;
225 		success = ipc_mux_session_open(ipc_mux, &msg->session_open);
226 
227 		ipc_imem_td_update_timer_suspend(ipc_mux->imem, false);
228 		if (success)
229 			ret = ipc_mux->channel_id;
230 		goto out;
231 
232 	case MUX_S_ACTIVE:
233 		switch (order) {
234 		case MUX_E_MUX_SESSION_OPEN:
235 			/* Disable the TD update timer and open a session */
236 			ipc_imem_td_update_timer_suspend(ipc_mux->imem, true);
237 			ipc_mux->event = MUX_E_MUX_SESSION_OPEN;
238 			success = ipc_mux_session_open(ipc_mux,
239 						       &msg->session_open);
240 			ipc_imem_td_update_timer_suspend(ipc_mux->imem, false);
241 			if (success)
242 				ret = ipc_mux->channel_id;
243 			goto out;
244 
245 		case MUX_E_MUX_SESSION_CLOSE:
246 			/* Release an IP session. */
247 			ipc_mux->event = MUX_E_MUX_SESSION_CLOSE;
248 			ipc_mux_session_close(ipc_mux, &msg->session_close);
249 			if (!ipc_mux->nr_sessions) {
250 				ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE;
251 				ipc_mux_channel_close(ipc_mux,
252 						      &msg->channel_close);
253 			}
254 			ret = ipc_mux->channel_id;
255 			goto out;
256 
257 		case MUX_E_MUX_CHANNEL_CLOSE:
258 			/* Close the MUX channel pipes. */
259 			ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE;
260 			ipc_mux_channel_close(ipc_mux, &msg->channel_close);
261 			ret = ipc_mux->channel_id;
262 			goto out;
263 
264 		default:
265 			/* Invalid order. */
266 			goto out;
267 		}
268 
269 	default:
270 		dev_err(ipc_mux->dev,
271 			"unexpected MUX transition: state=%d, event=%d",
272 			ipc_mux->state, ipc_mux->event);
273 	}
274 out:
275 	return ret;
276 }
277 
278 struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
279 			      struct iosm_imem *imem)
280 {
281 	struct iosm_mux *ipc_mux = kzalloc(sizeof(*ipc_mux), GFP_KERNEL);
282 	int i, ul_tds, ul_td_size;
283 	struct sk_buff_head *free_list;
284 	struct sk_buff *skb;
285 
286 	if (!ipc_mux)
287 		return NULL;
288 
289 	ipc_mux->protocol = mux_cfg->protocol;
290 	ipc_mux->ul_flow = mux_cfg->ul_flow;
291 	ipc_mux->instance_id = mux_cfg->instance_id;
292 	ipc_mux->wwan_q_offset = 0;
293 
294 	ipc_mux->pcie = imem->pcie;
295 	ipc_mux->imem = imem;
296 	ipc_mux->ipc_protocol = imem->ipc_protocol;
297 	ipc_mux->dev = imem->dev;
298 	ipc_mux->wwan = imem->wwan;
299 
300 	/* Get the reference to the UL ADB list. */
301 	free_list = &ipc_mux->ul_adb.free_list;
302 
303 	/* Initialize the list with free ADB. */
304 	skb_queue_head_init(free_list);
305 
306 	ul_td_size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
307 
308 	ul_tds = IPC_MEM_MAX_TDS_MUX_LITE_UL;
309 
310 	ipc_mux->ul_adb.dest_skb = NULL;
311 
312 	ipc_mux->initialized = true;
313 	ipc_mux->adb_prep_ongoing = false;
314 	ipc_mux->size_needed = 0;
315 	ipc_mux->ul_data_pend_bytes = 0;
316 	ipc_mux->state = MUX_S_INACTIVE;
317 	ipc_mux->ev_mux_net_transmit_pending = false;
318 	ipc_mux->tx_transaction_id = 0;
319 	ipc_mux->rr_next_session = 0;
320 	ipc_mux->event = MUX_E_INACTIVE;
321 	ipc_mux->channel_id = -1;
322 	ipc_mux->channel = NULL;
323 
324 	/* Allocate the list of UL ADB. */
325 	for (i = 0; i < ul_tds; i++) {
326 		dma_addr_t mapping;
327 
328 		skb = ipc_pcie_alloc_skb(ipc_mux->pcie, ul_td_size, GFP_ATOMIC,
329 					 &mapping, DMA_TO_DEVICE, 0);
330 		if (!skb) {
331 			ipc_mux_deinit(ipc_mux);
332 			return NULL;
333 		}
334 		/* Extend the UL ADB list. */
335 		skb_queue_tail(free_list, skb);
336 	}
337 
338 	return ipc_mux;
339 }
340 
341 /* Informs the network stack to restart transmission for all opened session if
342  * Flow Control is not ON for that session.
343  */
344 static void ipc_mux_restart_tx_for_all_sessions(struct iosm_mux *ipc_mux)
345 {
346 	struct mux_session *session;
347 	int idx;
348 
349 	for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
350 		session = &ipc_mux->session[idx];
351 
352 		if (!session->wwan)
353 			continue;
354 
355 		/* If flow control of the session is OFF and if there was tx
356 		 * stop then restart. Inform the network interface to restart
357 		 * sending data.
358 		 */
359 		if (session->flow_ctl_mask == 0) {
360 			session->net_tx_stop = false;
361 			ipc_mux_netif_tx_flowctrl(session, idx, false);
362 		}
363 	}
364 }
365 
366 /* Informs the network stack to stop sending further pkt for all opened
367  * sessions
368  */
369 static void ipc_mux_stop_netif_for_all_sessions(struct iosm_mux *ipc_mux)
370 {
371 	struct mux_session *session;
372 	int idx;
373 
374 	for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
375 		session = &ipc_mux->session[idx];
376 
377 		if (!session->wwan)
378 			continue;
379 
380 		ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
381 	}
382 }
383 
384 void ipc_mux_check_n_restart_tx(struct iosm_mux *ipc_mux)
385 {
386 	if (ipc_mux->ul_flow == MUX_UL) {
387 		int low_thresh = IPC_MEM_MUX_UL_FLOWCTRL_LOW_B;
388 
389 		if (ipc_mux->ul_data_pend_bytes < low_thresh)
390 			ipc_mux_restart_tx_for_all_sessions(ipc_mux);
391 	}
392 }
393 
394 int ipc_mux_get_max_sessions(struct iosm_mux *ipc_mux)
395 {
396 	return ipc_mux ? IPC_MEM_MUX_IP_SESSION_ENTRIES : -EFAULT;
397 }
398 
399 enum ipc_mux_protocol ipc_mux_get_active_protocol(struct iosm_mux *ipc_mux)
400 {
401 	return ipc_mux ? ipc_mux->protocol : MUX_UNKNOWN;
402 }
403 
404 int ipc_mux_open_session(struct iosm_mux *ipc_mux, int session_nr)
405 {
406 	struct mux_session_open *session_open;
407 	union mux_msg mux_msg;
408 
409 	session_open = &mux_msg.session_open;
410 	session_open->event = MUX_E_MUX_SESSION_OPEN;
411 
412 	session_open->if_id = cpu_to_le32(session_nr);
413 	ipc_mux->session[session_nr].flags |= IPC_MEM_WWAN_MUX;
414 	return ipc_mux_schedule(ipc_mux, &mux_msg);
415 }
416 
417 int ipc_mux_close_session(struct iosm_mux *ipc_mux, int session_nr)
418 {
419 	struct mux_session_close *session_close;
420 	union mux_msg mux_msg;
421 	int ret_val;
422 
423 	session_close = &mux_msg.session_close;
424 	session_close->event = MUX_E_MUX_SESSION_CLOSE;
425 
426 	session_close->if_id = cpu_to_le32(session_nr);
427 	ret_val = ipc_mux_schedule(ipc_mux, &mux_msg);
428 	ipc_mux->session[session_nr].flags &= ~IPC_MEM_WWAN_MUX;
429 
430 	return ret_val;
431 }
432 
433 void ipc_mux_deinit(struct iosm_mux *ipc_mux)
434 {
435 	struct mux_channel_close *channel_close;
436 	struct sk_buff_head *free_list;
437 	union mux_msg mux_msg;
438 	struct sk_buff *skb;
439 
440 	if (!ipc_mux->initialized)
441 		return;
442 	ipc_mux_stop_netif_for_all_sessions(ipc_mux);
443 
444 	if (ipc_mux->state == MUX_S_ACTIVE) {
445 		channel_close = &mux_msg.channel_close;
446 		channel_close->event = MUX_E_MUX_CHANNEL_CLOSE;
447 		ipc_mux_schedule(ipc_mux, &mux_msg);
448 	}
449 
450 	/* Empty the ADB free list. */
451 	free_list = &ipc_mux->ul_adb.free_list;
452 
453 	/* Remove from the head of the downlink queue. */
454 	while ((skb = skb_dequeue(free_list)))
455 		ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
456 
457 	if (ipc_mux->channel) {
458 		ipc_mux->channel->ul_pipe.is_open = false;
459 		ipc_mux->channel->dl_pipe.is_open = false;
460 	}
461 
462 	kfree(ipc_mux);
463 }
464