xref: /linux/drivers/net/wwan/qcom_bam_dmux.c (revision 8c245fe7dde3bf776253550fc914a36293db4ff3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Qualcomm BAM-DMUX WWAN network driver
4  * Copyright (c) 2020, Stephan Gerhold <stephan@gerhold.net>
5  */
6 
7 #include <linux/atomic.h>
8 #include <linux/bitops.h>
9 #include <linux/completion.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmaengine.h>
12 #include <linux/if_arp.h>
13 #include <linux/interrupt.h>
14 #include <linux/mod_devicetable.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/soc/qcom/smem_state.h>
20 #include <linux/spinlock.h>
21 #include <linux/wait.h>
22 #include <linux/workqueue.h>
23 #include <net/pkt_sched.h>
24 
25 #define BAM_DMUX_BUFFER_SIZE		SZ_2K
26 #define BAM_DMUX_HDR_SIZE		sizeof(struct bam_dmux_hdr)
27 #define BAM_DMUX_MAX_DATA_SIZE		(BAM_DMUX_BUFFER_SIZE - BAM_DMUX_HDR_SIZE)
28 #define BAM_DMUX_NUM_SKB		32
29 
30 #define BAM_DMUX_HDR_MAGIC		0x33fc
31 
32 #define BAM_DMUX_AUTOSUSPEND_DELAY	1000
33 #define BAM_DMUX_REMOTE_TIMEOUT		msecs_to_jiffies(2000)
34 
35 enum {
36 	BAM_DMUX_CMD_DATA,
37 	BAM_DMUX_CMD_OPEN,
38 	BAM_DMUX_CMD_CLOSE,
39 };
40 
41 enum {
42 	BAM_DMUX_CH_DATA_0,
43 	BAM_DMUX_CH_DATA_1,
44 	BAM_DMUX_CH_DATA_2,
45 	BAM_DMUX_CH_DATA_3,
46 	BAM_DMUX_CH_DATA_4,
47 	BAM_DMUX_CH_DATA_5,
48 	BAM_DMUX_CH_DATA_6,
49 	BAM_DMUX_CH_DATA_7,
50 	BAM_DMUX_NUM_CH
51 };
52 
53 struct bam_dmux_hdr {
54 	u16 magic;
55 	u8 signal;
56 	u8 cmd;
57 	u8 pad;
58 	u8 ch;
59 	u16 len;
60 };
61 
62 struct bam_dmux_skb_dma {
63 	struct bam_dmux *dmux;
64 	struct sk_buff *skb;
65 	dma_addr_t addr;
66 };
67 
68 struct bam_dmux {
69 	struct device *dev;
70 
71 	int pc_irq;
72 	bool pc_state, pc_ack_state;
73 	struct qcom_smem_state *pc, *pc_ack;
74 	u32 pc_mask, pc_ack_mask;
75 	wait_queue_head_t pc_wait;
76 	struct completion pc_ack_completion;
77 
78 	struct dma_chan *rx, *tx;
79 	struct bam_dmux_skb_dma rx_skbs[BAM_DMUX_NUM_SKB];
80 	struct bam_dmux_skb_dma tx_skbs[BAM_DMUX_NUM_SKB];
81 	spinlock_t tx_lock; /* Protect tx_skbs, tx_next_skb */
82 	unsigned int tx_next_skb;
83 	atomic_long_t tx_deferred_skb;
84 	struct work_struct tx_wakeup_work;
85 
86 	DECLARE_BITMAP(remote_channels, BAM_DMUX_NUM_CH);
87 	struct work_struct register_netdev_work;
88 	struct net_device *netdevs[BAM_DMUX_NUM_CH];
89 };
90 
91 struct bam_dmux_netdev {
92 	struct bam_dmux *dmux;
93 	u8 ch;
94 };
95 
bam_dmux_pc_vote(struct bam_dmux * dmux,bool enable)96 static void bam_dmux_pc_vote(struct bam_dmux *dmux, bool enable)
97 {
98 	reinit_completion(&dmux->pc_ack_completion);
99 	qcom_smem_state_update_bits(dmux->pc, dmux->pc_mask,
100 				    enable ? dmux->pc_mask : 0);
101 }
102 
bam_dmux_pc_ack(struct bam_dmux * dmux)103 static void bam_dmux_pc_ack(struct bam_dmux *dmux)
104 {
105 	qcom_smem_state_update_bits(dmux->pc_ack, dmux->pc_ack_mask,
106 				    dmux->pc_ack_state ? 0 : dmux->pc_ack_mask);
107 	dmux->pc_ack_state = !dmux->pc_ack_state;
108 }
109 
bam_dmux_skb_dma_map(struct bam_dmux_skb_dma * skb_dma,enum dma_data_direction dir)110 static bool bam_dmux_skb_dma_map(struct bam_dmux_skb_dma *skb_dma,
111 				 enum dma_data_direction dir)
112 {
113 	struct device *dev = skb_dma->dmux->dev;
114 
115 	skb_dma->addr = dma_map_single(dev, skb_dma->skb->data, skb_dma->skb->len, dir);
116 	if (dma_mapping_error(dev, skb_dma->addr)) {
117 		dev_err(dev, "Failed to DMA map buffer\n");
118 		skb_dma->addr = 0;
119 		return false;
120 	}
121 
122 	return true;
123 }
124 
bam_dmux_skb_dma_unmap(struct bam_dmux_skb_dma * skb_dma,enum dma_data_direction dir)125 static void bam_dmux_skb_dma_unmap(struct bam_dmux_skb_dma *skb_dma,
126 				   enum dma_data_direction dir)
127 {
128 	dma_unmap_single(skb_dma->dmux->dev, skb_dma->addr, skb_dma->skb->len, dir);
129 	skb_dma->addr = 0;
130 }
131 
bam_dmux_tx_wake_queues(struct bam_dmux * dmux)132 static void bam_dmux_tx_wake_queues(struct bam_dmux *dmux)
133 {
134 	int i;
135 
136 	dev_dbg(dmux->dev, "wake queues\n");
137 
138 	for (i = 0; i < BAM_DMUX_NUM_CH; ++i) {
139 		struct net_device *netdev = dmux->netdevs[i];
140 
141 		if (netdev && netif_running(netdev))
142 			netif_wake_queue(netdev);
143 	}
144 }
145 
bam_dmux_tx_stop_queues(struct bam_dmux * dmux)146 static void bam_dmux_tx_stop_queues(struct bam_dmux *dmux)
147 {
148 	int i;
149 
150 	dev_dbg(dmux->dev, "stop queues\n");
151 
152 	for (i = 0; i < BAM_DMUX_NUM_CH; ++i) {
153 		struct net_device *netdev = dmux->netdevs[i];
154 
155 		if (netdev)
156 			netif_stop_queue(netdev);
157 	}
158 }
159 
bam_dmux_tx_done(struct bam_dmux_skb_dma * skb_dma)160 static void bam_dmux_tx_done(struct bam_dmux_skb_dma *skb_dma)
161 {
162 	struct bam_dmux *dmux = skb_dma->dmux;
163 	unsigned long flags;
164 
165 	pm_runtime_mark_last_busy(dmux->dev);
166 	pm_runtime_put_autosuspend(dmux->dev);
167 
168 	if (skb_dma->addr)
169 		bam_dmux_skb_dma_unmap(skb_dma, DMA_TO_DEVICE);
170 
171 	spin_lock_irqsave(&dmux->tx_lock, flags);
172 	skb_dma->skb = NULL;
173 	if (skb_dma == &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB])
174 		bam_dmux_tx_wake_queues(dmux);
175 	spin_unlock_irqrestore(&dmux->tx_lock, flags);
176 }
177 
bam_dmux_tx_callback(void * data)178 static void bam_dmux_tx_callback(void *data)
179 {
180 	struct bam_dmux_skb_dma *skb_dma = data;
181 	struct sk_buff *skb = skb_dma->skb;
182 
183 	bam_dmux_tx_done(skb_dma);
184 	dev_consume_skb_any(skb);
185 }
186 
bam_dmux_skb_dma_submit_tx(struct bam_dmux_skb_dma * skb_dma)187 static bool bam_dmux_skb_dma_submit_tx(struct bam_dmux_skb_dma *skb_dma)
188 {
189 	struct bam_dmux *dmux = skb_dma->dmux;
190 	struct dma_async_tx_descriptor *desc;
191 
192 	desc = dmaengine_prep_slave_single(dmux->tx, skb_dma->addr,
193 					   skb_dma->skb->len, DMA_MEM_TO_DEV,
194 					   DMA_PREP_INTERRUPT);
195 	if (!desc) {
196 		dev_err(dmux->dev, "Failed to prepare TX DMA buffer\n");
197 		return false;
198 	}
199 
200 	desc->callback = bam_dmux_tx_callback;
201 	desc->callback_param = skb_dma;
202 	desc->cookie = dmaengine_submit(desc);
203 	return true;
204 }
205 
206 static struct bam_dmux_skb_dma *
bam_dmux_tx_queue(struct bam_dmux * dmux,struct sk_buff * skb)207 bam_dmux_tx_queue(struct bam_dmux *dmux, struct sk_buff *skb)
208 {
209 	struct bam_dmux_skb_dma *skb_dma;
210 	unsigned long flags;
211 
212 	spin_lock_irqsave(&dmux->tx_lock, flags);
213 
214 	skb_dma = &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB];
215 	if (skb_dma->skb) {
216 		bam_dmux_tx_stop_queues(dmux);
217 		spin_unlock_irqrestore(&dmux->tx_lock, flags);
218 		return NULL;
219 	}
220 	skb_dma->skb = skb;
221 
222 	dmux->tx_next_skb++;
223 	if (dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB].skb)
224 		bam_dmux_tx_stop_queues(dmux);
225 
226 	spin_unlock_irqrestore(&dmux->tx_lock, flags);
227 	return skb_dma;
228 }
229 
bam_dmux_send_cmd(struct bam_dmux_netdev * bndev,u8 cmd)230 static int bam_dmux_send_cmd(struct bam_dmux_netdev *bndev, u8 cmd)
231 {
232 	struct bam_dmux *dmux = bndev->dmux;
233 	struct bam_dmux_skb_dma *skb_dma;
234 	struct bam_dmux_hdr *hdr;
235 	struct sk_buff *skb;
236 	int ret;
237 
238 	skb = alloc_skb(sizeof(*hdr), GFP_KERNEL);
239 	if (!skb)
240 		return -ENOMEM;
241 
242 	hdr = skb_put_zero(skb, sizeof(*hdr));
243 	hdr->magic = BAM_DMUX_HDR_MAGIC;
244 	hdr->cmd = cmd;
245 	hdr->ch = bndev->ch;
246 
247 	skb_dma = bam_dmux_tx_queue(dmux, skb);
248 	if (!skb_dma) {
249 		ret = -EAGAIN;
250 		goto free_skb;
251 	}
252 
253 	ret = pm_runtime_get_sync(dmux->dev);
254 	if (ret < 0)
255 		goto tx_fail;
256 
257 	if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE)) {
258 		ret = -ENOMEM;
259 		goto tx_fail;
260 	}
261 
262 	if (!bam_dmux_skb_dma_submit_tx(skb_dma)) {
263 		ret = -EIO;
264 		goto tx_fail;
265 	}
266 
267 	dma_async_issue_pending(dmux->tx);
268 	return 0;
269 
270 tx_fail:
271 	bam_dmux_tx_done(skb_dma);
272 free_skb:
273 	dev_kfree_skb(skb);
274 	return ret;
275 }
276 
bam_dmux_netdev_open(struct net_device * netdev)277 static int bam_dmux_netdev_open(struct net_device *netdev)
278 {
279 	struct bam_dmux_netdev *bndev = netdev_priv(netdev);
280 	int ret;
281 
282 	ret = bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_OPEN);
283 	if (ret)
284 		return ret;
285 
286 	netif_start_queue(netdev);
287 	return 0;
288 }
289 
bam_dmux_netdev_stop(struct net_device * netdev)290 static int bam_dmux_netdev_stop(struct net_device *netdev)
291 {
292 	struct bam_dmux_netdev *bndev = netdev_priv(netdev);
293 
294 	netif_stop_queue(netdev);
295 	bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_CLOSE);
296 	return 0;
297 }
298 
needed_room(unsigned int avail,unsigned int needed)299 static unsigned int needed_room(unsigned int avail, unsigned int needed)
300 {
301 	if (avail >= needed)
302 		return 0;
303 	return needed - avail;
304 }
305 
bam_dmux_tx_prepare_skb(struct bam_dmux_netdev * bndev,struct sk_buff * skb)306 static int bam_dmux_tx_prepare_skb(struct bam_dmux_netdev *bndev,
307 				   struct sk_buff *skb)
308 {
309 	unsigned int head = needed_room(skb_headroom(skb), BAM_DMUX_HDR_SIZE);
310 	unsigned int pad = sizeof(u32) - skb->len % sizeof(u32);
311 	unsigned int tail = needed_room(skb_tailroom(skb), pad);
312 	struct bam_dmux_hdr *hdr;
313 	int ret;
314 
315 	if (head || tail || skb_cloned(skb)) {
316 		ret = pskb_expand_head(skb, head, tail, GFP_ATOMIC);
317 		if (ret)
318 			return ret;
319 	}
320 
321 	hdr = skb_push(skb, sizeof(*hdr));
322 	hdr->magic = BAM_DMUX_HDR_MAGIC;
323 	hdr->signal = 0;
324 	hdr->cmd = BAM_DMUX_CMD_DATA;
325 	hdr->pad = pad;
326 	hdr->ch = bndev->ch;
327 	hdr->len = skb->len - sizeof(*hdr);
328 	if (pad)
329 		skb_put_zero(skb, pad);
330 
331 	return 0;
332 }
333 
bam_dmux_netdev_start_xmit(struct sk_buff * skb,struct net_device * netdev)334 static netdev_tx_t bam_dmux_netdev_start_xmit(struct sk_buff *skb,
335 					      struct net_device *netdev)
336 {
337 	struct bam_dmux_netdev *bndev = netdev_priv(netdev);
338 	struct bam_dmux *dmux = bndev->dmux;
339 	struct bam_dmux_skb_dma *skb_dma;
340 	int active, ret;
341 
342 	skb_dma = bam_dmux_tx_queue(dmux, skb);
343 	if (!skb_dma)
344 		return NETDEV_TX_BUSY;
345 
346 	active = pm_runtime_get(dmux->dev);
347 	if (active < 0 && active != -EINPROGRESS)
348 		goto drop;
349 
350 	ret = bam_dmux_tx_prepare_skb(bndev, skb);
351 	if (ret)
352 		goto drop;
353 
354 	if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE))
355 		goto drop;
356 
357 	if (active <= 0) {
358 		/* Cannot sleep here so mark skb for wakeup handler and return */
359 		if (!atomic_long_fetch_or(BIT(skb_dma - dmux->tx_skbs),
360 					  &dmux->tx_deferred_skb))
361 			queue_pm_work(&dmux->tx_wakeup_work);
362 		return NETDEV_TX_OK;
363 	}
364 
365 	if (!bam_dmux_skb_dma_submit_tx(skb_dma))
366 		goto drop;
367 
368 	dma_async_issue_pending(dmux->tx);
369 	return NETDEV_TX_OK;
370 
371 drop:
372 	bam_dmux_tx_done(skb_dma);
373 	dev_kfree_skb_any(skb);
374 	return NETDEV_TX_OK;
375 }
376 
bam_dmux_tx_wakeup_work(struct work_struct * work)377 static void bam_dmux_tx_wakeup_work(struct work_struct *work)
378 {
379 	struct bam_dmux *dmux = container_of(work, struct bam_dmux, tx_wakeup_work);
380 	unsigned long pending;
381 	int ret, i;
382 
383 	ret = pm_runtime_resume_and_get(dmux->dev);
384 	if (ret < 0) {
385 		dev_err(dmux->dev, "Failed to resume: %d\n", ret);
386 		return;
387 	}
388 
389 	pending = atomic_long_xchg(&dmux->tx_deferred_skb, 0);
390 	if (!pending)
391 		goto out;
392 
393 	dev_dbg(dmux->dev, "pending skbs after wakeup: %#lx\n", pending);
394 	for_each_set_bit(i, &pending, BAM_DMUX_NUM_SKB) {
395 		bam_dmux_skb_dma_submit_tx(&dmux->tx_skbs[i]);
396 	}
397 	dma_async_issue_pending(dmux->tx);
398 
399 out:
400 	pm_runtime_mark_last_busy(dmux->dev);
401 	pm_runtime_put_autosuspend(dmux->dev);
402 }
403 
404 static const struct net_device_ops bam_dmux_ops = {
405 	.ndo_open	= bam_dmux_netdev_open,
406 	.ndo_stop	= bam_dmux_netdev_stop,
407 	.ndo_start_xmit	= bam_dmux_netdev_start_xmit,
408 };
409 
410 static const struct device_type wwan_type = {
411 	.name = "wwan",
412 };
413 
bam_dmux_netdev_setup(struct net_device * dev)414 static void bam_dmux_netdev_setup(struct net_device *dev)
415 {
416 	dev->netdev_ops = &bam_dmux_ops;
417 
418 	dev->type = ARPHRD_RAWIP;
419 	SET_NETDEV_DEVTYPE(dev, &wwan_type);
420 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
421 
422 	dev->mtu = ETH_DATA_LEN;
423 	dev->max_mtu = BAM_DMUX_MAX_DATA_SIZE;
424 	dev->needed_headroom = sizeof(struct bam_dmux_hdr);
425 	dev->needed_tailroom = sizeof(u32); /* word-aligned */
426 	dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
427 
428 	/* This perm addr will be used as interface identifier by IPv6 */
429 	dev->addr_assign_type = NET_ADDR_RANDOM;
430 	eth_random_addr(dev->perm_addr);
431 }
432 
bam_dmux_register_netdev_work(struct work_struct * work)433 static void bam_dmux_register_netdev_work(struct work_struct *work)
434 {
435 	struct bam_dmux *dmux = container_of(work, struct bam_dmux, register_netdev_work);
436 	struct bam_dmux_netdev *bndev;
437 	struct net_device *netdev;
438 	int ch, ret;
439 
440 	for_each_set_bit(ch, dmux->remote_channels, BAM_DMUX_NUM_CH) {
441 		if (dmux->netdevs[ch])
442 			continue;
443 
444 		netdev = alloc_netdev(sizeof(*bndev), "wwan%d", NET_NAME_ENUM,
445 				      bam_dmux_netdev_setup);
446 		if (!netdev)
447 			return;
448 
449 		SET_NETDEV_DEV(netdev, dmux->dev);
450 		netdev->dev_port = ch;
451 
452 		bndev = netdev_priv(netdev);
453 		bndev->dmux = dmux;
454 		bndev->ch = ch;
455 
456 		ret = register_netdev(netdev);
457 		if (ret) {
458 			dev_err(dmux->dev, "Failed to register netdev for channel %u: %d\n",
459 				ch, ret);
460 			free_netdev(netdev);
461 			return;
462 		}
463 
464 		dmux->netdevs[ch] = netdev;
465 	}
466 }
467 
468 static void bam_dmux_rx_callback(void *data);
469 
bam_dmux_skb_dma_submit_rx(struct bam_dmux_skb_dma * skb_dma)470 static bool bam_dmux_skb_dma_submit_rx(struct bam_dmux_skb_dma *skb_dma)
471 {
472 	struct bam_dmux *dmux = skb_dma->dmux;
473 	struct dma_async_tx_descriptor *desc;
474 
475 	desc = dmaengine_prep_slave_single(dmux->rx, skb_dma->addr,
476 					   skb_dma->skb->len, DMA_DEV_TO_MEM,
477 					   DMA_PREP_INTERRUPT);
478 	if (!desc) {
479 		dev_err(dmux->dev, "Failed to prepare RX DMA buffer\n");
480 		return false;
481 	}
482 
483 	desc->callback = bam_dmux_rx_callback;
484 	desc->callback_param = skb_dma;
485 	desc->cookie = dmaengine_submit(desc);
486 	return true;
487 }
488 
bam_dmux_skb_dma_queue_rx(struct bam_dmux_skb_dma * skb_dma,gfp_t gfp)489 static bool bam_dmux_skb_dma_queue_rx(struct bam_dmux_skb_dma *skb_dma, gfp_t gfp)
490 {
491 	if (!skb_dma->skb) {
492 		skb_dma->skb = __netdev_alloc_skb(NULL, BAM_DMUX_BUFFER_SIZE, gfp);
493 		if (!skb_dma->skb)
494 			return false;
495 		skb_put(skb_dma->skb, BAM_DMUX_BUFFER_SIZE);
496 	}
497 
498 	return bam_dmux_skb_dma_map(skb_dma, DMA_FROM_DEVICE) &&
499 	       bam_dmux_skb_dma_submit_rx(skb_dma);
500 }
501 
bam_dmux_cmd_data(struct bam_dmux_skb_dma * skb_dma)502 static void bam_dmux_cmd_data(struct bam_dmux_skb_dma *skb_dma)
503 {
504 	struct bam_dmux *dmux = skb_dma->dmux;
505 	struct sk_buff *skb = skb_dma->skb;
506 	struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data;
507 	struct net_device *netdev = dmux->netdevs[hdr->ch];
508 
509 	if (!netdev || !netif_running(netdev)) {
510 		dev_warn(dmux->dev, "Data for inactive channel %u\n", hdr->ch);
511 		return;
512 	}
513 
514 	if (hdr->len > BAM_DMUX_MAX_DATA_SIZE) {
515 		dev_err(dmux->dev, "Data larger than buffer? (%u > %u)\n",
516 			hdr->len, (u16)BAM_DMUX_MAX_DATA_SIZE);
517 		return;
518 	}
519 
520 	skb_dma->skb = NULL; /* Hand over to network stack */
521 
522 	skb_pull(skb, sizeof(*hdr));
523 	skb_trim(skb, hdr->len);
524 	skb->dev = netdev;
525 
526 	/* Only Raw-IP/QMAP is supported by this driver */
527 	switch (skb->data[0] & 0xf0) {
528 	case 0x40:
529 		skb->protocol = htons(ETH_P_IP);
530 		break;
531 	case 0x60:
532 		skb->protocol = htons(ETH_P_IPV6);
533 		break;
534 	default:
535 		skb->protocol = htons(ETH_P_MAP);
536 		break;
537 	}
538 
539 	netif_receive_skb(skb);
540 }
541 
bam_dmux_cmd_open(struct bam_dmux * dmux,struct bam_dmux_hdr * hdr)542 static void bam_dmux_cmd_open(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr)
543 {
544 	struct net_device *netdev = dmux->netdevs[hdr->ch];
545 
546 	dev_dbg(dmux->dev, "open channel: %u\n", hdr->ch);
547 
548 	if (__test_and_set_bit(hdr->ch, dmux->remote_channels)) {
549 		dev_warn(dmux->dev, "Channel already open: %u\n", hdr->ch);
550 		return;
551 	}
552 
553 	if (netdev) {
554 		netif_device_attach(netdev);
555 	} else {
556 		/* Cannot sleep here, schedule work to register the netdev */
557 		schedule_work(&dmux->register_netdev_work);
558 	}
559 }
560 
bam_dmux_cmd_close(struct bam_dmux * dmux,struct bam_dmux_hdr * hdr)561 static void bam_dmux_cmd_close(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr)
562 {
563 	struct net_device *netdev = dmux->netdevs[hdr->ch];
564 
565 	dev_dbg(dmux->dev, "close channel: %u\n", hdr->ch);
566 
567 	if (!__test_and_clear_bit(hdr->ch, dmux->remote_channels)) {
568 		dev_err(dmux->dev, "Channel not open: %u\n", hdr->ch);
569 		return;
570 	}
571 
572 	if (netdev)
573 		netif_device_detach(netdev);
574 }
575 
bam_dmux_rx_callback(void * data)576 static void bam_dmux_rx_callback(void *data)
577 {
578 	struct bam_dmux_skb_dma *skb_dma = data;
579 	struct bam_dmux *dmux = skb_dma->dmux;
580 	struct sk_buff *skb = skb_dma->skb;
581 	struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data;
582 
583 	bam_dmux_skb_dma_unmap(skb_dma, DMA_FROM_DEVICE);
584 
585 	if (hdr->magic != BAM_DMUX_HDR_MAGIC) {
586 		dev_err(dmux->dev, "Invalid magic in header: %#x\n", hdr->magic);
587 		goto out;
588 	}
589 
590 	if (hdr->ch >= BAM_DMUX_NUM_CH) {
591 		dev_dbg(dmux->dev, "Unsupported channel: %u\n", hdr->ch);
592 		goto out;
593 	}
594 
595 	switch (hdr->cmd) {
596 	case BAM_DMUX_CMD_DATA:
597 		bam_dmux_cmd_data(skb_dma);
598 		break;
599 	case BAM_DMUX_CMD_OPEN:
600 		bam_dmux_cmd_open(dmux, hdr);
601 		break;
602 	case BAM_DMUX_CMD_CLOSE:
603 		bam_dmux_cmd_close(dmux, hdr);
604 		break;
605 	default:
606 		dev_err(dmux->dev, "Unsupported command %u on channel %u\n",
607 			hdr->cmd, hdr->ch);
608 		break;
609 	}
610 
611 out:
612 	if (bam_dmux_skb_dma_queue_rx(skb_dma, GFP_ATOMIC))
613 		dma_async_issue_pending(dmux->rx);
614 }
615 
bam_dmux_power_on(struct bam_dmux * dmux)616 static bool bam_dmux_power_on(struct bam_dmux *dmux)
617 {
618 	struct device *dev = dmux->dev;
619 	struct dma_slave_config dma_rx_conf = {
620 		.direction = DMA_DEV_TO_MEM,
621 		.src_maxburst = BAM_DMUX_BUFFER_SIZE,
622 	};
623 	int i;
624 
625 	dmux->rx = dma_request_chan(dev, "rx");
626 	if (IS_ERR(dmux->rx)) {
627 		dev_err(dev, "Failed to request RX DMA channel: %pe\n", dmux->rx);
628 		dmux->rx = NULL;
629 		return false;
630 	}
631 	dmaengine_slave_config(dmux->rx, &dma_rx_conf);
632 
633 	for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
634 		if (!bam_dmux_skb_dma_queue_rx(&dmux->rx_skbs[i], GFP_KERNEL))
635 			return false;
636 	}
637 	dma_async_issue_pending(dmux->rx);
638 
639 	return true;
640 }
641 
bam_dmux_free_skbs(struct bam_dmux_skb_dma skbs[],enum dma_data_direction dir)642 static void bam_dmux_free_skbs(struct bam_dmux_skb_dma skbs[],
643 			       enum dma_data_direction dir)
644 {
645 	int i;
646 
647 	for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
648 		struct bam_dmux_skb_dma *skb_dma = &skbs[i];
649 
650 		if (skb_dma->addr)
651 			bam_dmux_skb_dma_unmap(skb_dma, dir);
652 		if (skb_dma->skb) {
653 			dev_kfree_skb(skb_dma->skb);
654 			skb_dma->skb = NULL;
655 		}
656 	}
657 }
658 
bam_dmux_power_off(struct bam_dmux * dmux)659 static void bam_dmux_power_off(struct bam_dmux *dmux)
660 {
661 	if (dmux->tx) {
662 		dmaengine_terminate_sync(dmux->tx);
663 		dma_release_channel(dmux->tx);
664 		dmux->tx = NULL;
665 	}
666 
667 	if (dmux->rx) {
668 		dmaengine_terminate_sync(dmux->rx);
669 		dma_release_channel(dmux->rx);
670 		dmux->rx = NULL;
671 	}
672 
673 	bam_dmux_free_skbs(dmux->rx_skbs, DMA_FROM_DEVICE);
674 }
675 
bam_dmux_pc_irq(int irq,void * data)676 static irqreturn_t bam_dmux_pc_irq(int irq, void *data)
677 {
678 	struct bam_dmux *dmux = data;
679 	bool new_state = !dmux->pc_state;
680 
681 	dev_dbg(dmux->dev, "pc: %u\n", new_state);
682 
683 	if (new_state) {
684 		if (bam_dmux_power_on(dmux))
685 			bam_dmux_pc_ack(dmux);
686 		else
687 			bam_dmux_power_off(dmux);
688 	} else {
689 		bam_dmux_power_off(dmux);
690 		bam_dmux_pc_ack(dmux);
691 	}
692 
693 	dmux->pc_state = new_state;
694 	wake_up_all(&dmux->pc_wait);
695 
696 	return IRQ_HANDLED;
697 }
698 
bam_dmux_pc_ack_irq(int irq,void * data)699 static irqreturn_t bam_dmux_pc_ack_irq(int irq, void *data)
700 {
701 	struct bam_dmux *dmux = data;
702 
703 	dev_dbg(dmux->dev, "pc ack\n");
704 	complete_all(&dmux->pc_ack_completion);
705 
706 	return IRQ_HANDLED;
707 }
708 
bam_dmux_runtime_suspend(struct device * dev)709 static int bam_dmux_runtime_suspend(struct device *dev)
710 {
711 	struct bam_dmux *dmux = dev_get_drvdata(dev);
712 
713 	dev_dbg(dev, "runtime suspend\n");
714 	bam_dmux_pc_vote(dmux, false);
715 
716 	return 0;
717 }
718 
bam_dmux_runtime_resume(struct device * dev)719 static int __maybe_unused bam_dmux_runtime_resume(struct device *dev)
720 {
721 	struct bam_dmux *dmux = dev_get_drvdata(dev);
722 
723 	dev_dbg(dev, "runtime resume\n");
724 
725 	/* Wait until previous power down was acked */
726 	if (!wait_for_completion_timeout(&dmux->pc_ack_completion,
727 					 BAM_DMUX_REMOTE_TIMEOUT))
728 		return -ETIMEDOUT;
729 
730 	/* Vote for power state */
731 	bam_dmux_pc_vote(dmux, true);
732 
733 	/* Wait for ack */
734 	if (!wait_for_completion_timeout(&dmux->pc_ack_completion,
735 					 BAM_DMUX_REMOTE_TIMEOUT)) {
736 		bam_dmux_pc_vote(dmux, false);
737 		return -ETIMEDOUT;
738 	}
739 
740 	/* Wait until we're up */
741 	if (!wait_event_timeout(dmux->pc_wait, dmux->pc_state,
742 				BAM_DMUX_REMOTE_TIMEOUT)) {
743 		bam_dmux_pc_vote(dmux, false);
744 		return -ETIMEDOUT;
745 	}
746 
747 	/* Ensure that we actually initialized successfully */
748 	if (!dmux->rx) {
749 		bam_dmux_pc_vote(dmux, false);
750 		return -ENXIO;
751 	}
752 
753 	/* Request TX channel if necessary */
754 	if (dmux->tx)
755 		return 0;
756 
757 	dmux->tx = dma_request_chan(dev, "tx");
758 	if (IS_ERR(dmux->tx)) {
759 		dev_err(dev, "Failed to request TX DMA channel: %pe\n", dmux->tx);
760 		dmux->tx = NULL;
761 		bam_dmux_runtime_suspend(dev);
762 		return -ENXIO;
763 	}
764 
765 	return 0;
766 }
767 
bam_dmux_probe(struct platform_device * pdev)768 static int bam_dmux_probe(struct platform_device *pdev)
769 {
770 	struct device *dev = &pdev->dev;
771 	struct bam_dmux *dmux;
772 	int ret, pc_ack_irq, i;
773 	unsigned int bit;
774 
775 	dmux = devm_kzalloc(dev, sizeof(*dmux), GFP_KERNEL);
776 	if (!dmux)
777 		return -ENOMEM;
778 
779 	dmux->dev = dev;
780 	platform_set_drvdata(pdev, dmux);
781 
782 	dmux->pc_irq = platform_get_irq_byname(pdev, "pc");
783 	if (dmux->pc_irq < 0)
784 		return dmux->pc_irq;
785 
786 	pc_ack_irq = platform_get_irq_byname(pdev, "pc-ack");
787 	if (pc_ack_irq < 0)
788 		return pc_ack_irq;
789 
790 	dmux->pc = devm_qcom_smem_state_get(dev, "pc", &bit);
791 	if (IS_ERR(dmux->pc))
792 		return dev_err_probe(dev, PTR_ERR(dmux->pc),
793 				     "Failed to get pc state\n");
794 	dmux->pc_mask = BIT(bit);
795 
796 	dmux->pc_ack = devm_qcom_smem_state_get(dev, "pc-ack", &bit);
797 	if (IS_ERR(dmux->pc_ack))
798 		return dev_err_probe(dev, PTR_ERR(dmux->pc_ack),
799 				     "Failed to get pc-ack state\n");
800 	dmux->pc_ack_mask = BIT(bit);
801 
802 	init_waitqueue_head(&dmux->pc_wait);
803 	init_completion(&dmux->pc_ack_completion);
804 	complete_all(&dmux->pc_ack_completion);
805 
806 	spin_lock_init(&dmux->tx_lock);
807 	INIT_WORK(&dmux->tx_wakeup_work, bam_dmux_tx_wakeup_work);
808 	INIT_WORK(&dmux->register_netdev_work, bam_dmux_register_netdev_work);
809 
810 	for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
811 		dmux->rx_skbs[i].dmux = dmux;
812 		dmux->tx_skbs[i].dmux = dmux;
813 	}
814 
815 	/* Runtime PM manages our own power vote.
816 	 * Note that the RX path may be active even if we are runtime suspended,
817 	 * since it is controlled by the remote side.
818 	 */
819 	pm_runtime_set_autosuspend_delay(dev, BAM_DMUX_AUTOSUSPEND_DELAY);
820 	pm_runtime_use_autosuspend(dev);
821 	pm_runtime_enable(dev);
822 
823 	ret = devm_request_threaded_irq(dev, pc_ack_irq, NULL, bam_dmux_pc_ack_irq,
824 					IRQF_ONESHOT, NULL, dmux);
825 	if (ret)
826 		goto err_disable_pm;
827 
828 	ret = devm_request_threaded_irq(dev, dmux->pc_irq, NULL, bam_dmux_pc_irq,
829 					IRQF_ONESHOT, NULL, dmux);
830 	if (ret)
831 		goto err_disable_pm;
832 
833 	ret = irq_get_irqchip_state(dmux->pc_irq, IRQCHIP_STATE_LINE_LEVEL,
834 				    &dmux->pc_state);
835 	if (ret)
836 		goto err_disable_pm;
837 
838 	/* Check if remote finished initialization before us */
839 	if (dmux->pc_state) {
840 		if (bam_dmux_power_on(dmux))
841 			bam_dmux_pc_ack(dmux);
842 		else
843 			bam_dmux_power_off(dmux);
844 	}
845 
846 	return 0;
847 
848 err_disable_pm:
849 	pm_runtime_disable(dev);
850 	pm_runtime_dont_use_autosuspend(dev);
851 	return ret;
852 }
853 
bam_dmux_remove(struct platform_device * pdev)854 static void bam_dmux_remove(struct platform_device *pdev)
855 {
856 	struct bam_dmux *dmux = platform_get_drvdata(pdev);
857 	struct device *dev = dmux->dev;
858 	LIST_HEAD(list);
859 	int i;
860 
861 	/* Unregister network interfaces */
862 	cancel_work_sync(&dmux->register_netdev_work);
863 	rtnl_lock();
864 	for (i = 0; i < BAM_DMUX_NUM_CH; ++i)
865 		if (dmux->netdevs[i])
866 			unregister_netdevice_queue(dmux->netdevs[i], &list);
867 	unregister_netdevice_many(&list);
868 	rtnl_unlock();
869 	cancel_work_sync(&dmux->tx_wakeup_work);
870 
871 	/* Drop our own power vote */
872 	pm_runtime_disable(dev);
873 	pm_runtime_dont_use_autosuspend(dev);
874 	bam_dmux_runtime_suspend(dev);
875 	pm_runtime_set_suspended(dev);
876 
877 	/* Try to wait for remote side to drop power vote */
878 	if (!wait_event_timeout(dmux->pc_wait, !dmux->rx, BAM_DMUX_REMOTE_TIMEOUT))
879 		dev_err(dev, "Timed out waiting for remote side to suspend\n");
880 
881 	/* Make sure everything is cleaned up before we return */
882 	disable_irq(dmux->pc_irq);
883 	bam_dmux_power_off(dmux);
884 	bam_dmux_free_skbs(dmux->tx_skbs, DMA_TO_DEVICE);
885 }
886 
887 static const struct dev_pm_ops bam_dmux_pm_ops = {
888 	SET_RUNTIME_PM_OPS(bam_dmux_runtime_suspend, bam_dmux_runtime_resume, NULL)
889 };
890 
891 static const struct of_device_id bam_dmux_of_match[] = {
892 	{ .compatible = "qcom,bam-dmux" },
893 	{ /* sentinel */ }
894 };
895 MODULE_DEVICE_TABLE(of, bam_dmux_of_match);
896 
897 static struct platform_driver bam_dmux_driver = {
898 	.probe = bam_dmux_probe,
899 	.remove_new = bam_dmux_remove,
900 	.driver = {
901 		.name = "bam-dmux",
902 		.pm = &bam_dmux_pm_ops,
903 		.of_match_table = bam_dmux_of_match,
904 	},
905 };
906 module_platform_driver(bam_dmux_driver);
907 
908 MODULE_LICENSE("GPL v2");
909 MODULE_DESCRIPTION("Qualcomm BAM-DMUX WWAN Network Driver");
910 MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>");
911