1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* MHI MBIM Network driver - Network/MBIM over MHI bus
3 *
4 * Copyright (C) 2021 Linaro Ltd <loic.poulain@linaro.org>
5 *
6 * This driver copy some code from cdc_ncm, which is:
7 * Copyright (C) ST-Ericsson 2010-2012
8 * and cdc_mbim, which is:
9 * Copyright (c) 2012 Smith Micro Software, Inc.
10 * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
11 *
12 */
13
14 #include <linux/ethtool.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_vlan.h>
17 #include <linux/ip.h>
18 #include <linux/mhi.h>
19 #include <linux/mii.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/module.h>
22 #include <linux/netdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/u64_stats_sync.h>
25 #include <linux/usb.h>
26 #include <linux/usb/cdc.h>
27 #include <linux/usb/usbnet.h>
28 #include <linux/usb/cdc_ncm.h>
29 #include <linux/wwan.h>
30
31 /* 3500 allows to optimize skb allocation, the skbs will basically fit in
32 * one 4K page. Large MBIM packets will simply be split over several MHI
33 * transfers and chained by the MHI net layer (zerocopy).
34 */
35 #define MHI_DEFAULT_MRU 3500
36
37 #define MHI_MBIM_DEFAULT_MTU 1500
38 #define MHI_MAX_BUF_SZ 0xffff
39
40 #define MBIM_NDP16_SIGN_MASK 0x00ffffff
41
42 #define MHI_MBIM_LINK_HASH_SIZE 8
43 #define LINK_HASH(session) ((session) % MHI_MBIM_LINK_HASH_SIZE)
44
45 #define WDS_BIND_MUX_DATA_PORT_MUX_ID 112
46
47 struct mhi_mbim_link {
48 struct mhi_mbim_context *mbim;
49 struct net_device *ndev;
50 unsigned int session;
51
52 /* stats */
53 u64_stats_t rx_packets;
54 u64_stats_t rx_bytes;
55 u64_stats_t rx_errors;
56 u64_stats_t tx_packets;
57 u64_stats_t tx_bytes;
58 u64_stats_t tx_errors;
59 u64_stats_t tx_dropped;
60 struct u64_stats_sync tx_syncp;
61 struct u64_stats_sync rx_syncp;
62
63 struct hlist_node hlnode;
64 };
65
66 struct mhi_mbim_context {
67 struct mhi_device *mdev;
68 struct sk_buff *skbagg_head;
69 struct sk_buff *skbagg_tail;
70 unsigned int mru;
71 u32 rx_queue_sz;
72 u16 rx_seq;
73 u16 tx_seq;
74 struct delayed_work rx_refill;
75 spinlock_t tx_lock;
76 struct hlist_head link_list[MHI_MBIM_LINK_HASH_SIZE];
77 };
78
79 struct mbim_tx_hdr {
80 struct usb_cdc_ncm_nth16 nth16;
81 struct usb_cdc_ncm_ndp16 ndp16;
82 struct usb_cdc_ncm_dpe16 dpe16[2];
83 } __packed;
84
mhi_mbim_get_link_rcu(struct mhi_mbim_context * mbim,unsigned int session)85 static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim,
86 unsigned int session)
87 {
88 struct mhi_mbim_link *link;
89
90 hlist_for_each_entry_rcu(link, &mbim->link_list[LINK_HASH(session)], hlnode) {
91 if (link->session == session)
92 return link;
93 }
94
95 return NULL;
96 }
97
mhi_mbim_get_link_mux_id(struct mhi_controller * cntrl)98 static int mhi_mbim_get_link_mux_id(struct mhi_controller *cntrl)
99 {
100 if (strcmp(cntrl->name, "foxconn-dw5934e") == 0 ||
101 strcmp(cntrl->name, "foxconn-t99w515") == 0)
102 return WDS_BIND_MUX_DATA_PORT_MUX_ID;
103
104 return 0;
105 }
106
mbim_tx_fixup(struct sk_buff * skb,unsigned int session,u16 tx_seq)107 static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
108 u16 tx_seq)
109 {
110 unsigned int dgram_size = skb->len;
111 struct usb_cdc_ncm_nth16 *nth16;
112 struct usb_cdc_ncm_ndp16 *ndp16;
113 struct mbim_tx_hdr *mbim_hdr;
114
115 /* Only one NDP is sent, containing the IP packet (no aggregation) */
116
117 /* Ensure we have enough headroom for crafting MBIM header */
118 if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) {
119 dev_kfree_skb_any(skb);
120 return NULL;
121 }
122
123 mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr));
124
125 /* Fill NTB header */
126 nth16 = &mbim_hdr->nth16;
127 nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
128 nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
129 nth16->wSequence = cpu_to_le16(tx_seq);
130 nth16->wBlockLength = cpu_to_le16(skb->len);
131 nth16->wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
132
133 /* Fill the unique NDP */
134 ndp16 = &mbim_hdr->ndp16;
135 ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN | (session << 24));
136 ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16)
137 + sizeof(struct usb_cdc_ncm_dpe16) * 2);
138 ndp16->wNextNdpIndex = 0;
139
140 /* Datagram follows the mbim header */
141 ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr));
142 ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size);
143
144 /* null termination */
145 ndp16->dpe16[1].wDatagramIndex = 0;
146 ndp16->dpe16[1].wDatagramLength = 0;
147
148 return skb;
149 }
150
mhi_mbim_ndo_xmit(struct sk_buff * skb,struct net_device * ndev)151 static netdev_tx_t mhi_mbim_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
152 {
153 struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
154 struct mhi_mbim_context *mbim = link->mbim;
155 unsigned long flags;
156 int err = -ENOMEM;
157
158 /* Serialize MHI channel queuing and MBIM seq */
159 spin_lock_irqsave(&mbim->tx_lock, flags);
160
161 skb = mbim_tx_fixup(skb, link->session, mbim->tx_seq);
162 if (unlikely(!skb))
163 goto exit_unlock;
164
165 err = mhi_queue_skb(mbim->mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
166
167 if (mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE))
168 netif_stop_queue(ndev);
169
170 if (!err)
171 mbim->tx_seq++;
172
173 exit_unlock:
174 spin_unlock_irqrestore(&mbim->tx_lock, flags);
175
176 if (unlikely(err)) {
177 net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
178 ndev->name, err);
179 dev_kfree_skb_any(skb);
180 goto exit_drop;
181 }
182
183 return NETDEV_TX_OK;
184
185 exit_drop:
186 u64_stats_update_begin(&link->tx_syncp);
187 u64_stats_inc(&link->tx_dropped);
188 u64_stats_update_end(&link->tx_syncp);
189
190 return NETDEV_TX_OK;
191 }
192
mbim_rx_verify_nth16(struct mhi_mbim_context * mbim,struct sk_buff * skb)193 static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *skb)
194 {
195 struct usb_cdc_ncm_nth16 *nth16;
196 int len;
197
198 if (skb->len < sizeof(struct usb_cdc_ncm_nth16) +
199 sizeof(struct usb_cdc_ncm_ndp16)) {
200 net_err_ratelimited("frame too short\n");
201 return -EINVAL;
202 }
203
204 nth16 = (struct usb_cdc_ncm_nth16 *)skb->data;
205
206 if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) {
207 net_err_ratelimited("invalid NTH16 signature <%#010x>\n",
208 le32_to_cpu(nth16->dwSignature));
209 return -EINVAL;
210 }
211
212 /* No limit on the block length, except the size of the data pkt */
213 len = le16_to_cpu(nth16->wBlockLength);
214 if (len > skb->len) {
215 net_err_ratelimited("NTB does not fit into the skb %u/%u\n",
216 len, skb->len);
217 return -EINVAL;
218 }
219
220 if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) &&
221 (mbim->rx_seq || le16_to_cpu(nth16->wSequence)) &&
222 !(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) {
223 net_err_ratelimited("sequence number glitch prev=%d curr=%d\n",
224 mbim->rx_seq, le16_to_cpu(nth16->wSequence));
225 }
226 mbim->rx_seq = le16_to_cpu(nth16->wSequence);
227
228 return le16_to_cpu(nth16->wNdpIndex);
229 }
230
mbim_rx_verify_ndp16(struct sk_buff * skb,struct usb_cdc_ncm_ndp16 * ndp16)231 static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16)
232 {
233 int ret;
234
235 if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
236 net_err_ratelimited("invalid DPT16 length <%u>\n",
237 le16_to_cpu(ndp16->wLength));
238 return -EINVAL;
239 }
240
241 ret = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16))
242 / sizeof(struct usb_cdc_ncm_dpe16));
243 ret--; /* Last entry is always a NULL terminator */
244
245 if (sizeof(struct usb_cdc_ncm_ndp16) +
246 ret * sizeof(struct usb_cdc_ncm_dpe16) > skb->len) {
247 net_err_ratelimited("Invalid nframes = %d\n", ret);
248 return -EINVAL;
249 }
250
251 return ret;
252 }
253
mhi_mbim_rx(struct mhi_mbim_context * mbim,struct sk_buff * skb)254 static void mhi_mbim_rx(struct mhi_mbim_context *mbim, struct sk_buff *skb)
255 {
256 int ndpoffset;
257
258 /* Check NTB header and retrieve first NDP offset */
259 ndpoffset = mbim_rx_verify_nth16(mbim, skb);
260 if (ndpoffset < 0) {
261 net_err_ratelimited("mbim: Incorrect NTB header\n");
262 goto error;
263 }
264
265 /* Process each NDP */
266 while (1) {
267 struct usb_cdc_ncm_ndp16 ndp16;
268 struct usb_cdc_ncm_dpe16 dpe16;
269 struct mhi_mbim_link *link;
270 int nframes, n, dpeoffset;
271 unsigned int session;
272
273 if (skb_copy_bits(skb, ndpoffset, &ndp16, sizeof(ndp16))) {
274 net_err_ratelimited("mbim: Incorrect NDP offset (%u)\n",
275 ndpoffset);
276 goto error;
277 }
278
279 /* Check NDP header and retrieve number of datagrams */
280 nframes = mbim_rx_verify_ndp16(skb, &ndp16);
281 if (nframes < 0) {
282 net_err_ratelimited("mbim: Incorrect NDP16\n");
283 goto error;
284 }
285
286 /* Only IP data type supported, no DSS in MHI context */
287 if ((ndp16.dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK))
288 != cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN)) {
289 net_err_ratelimited("mbim: Unsupported NDP type\n");
290 goto next_ndp;
291 }
292
293 session = (le32_to_cpu(ndp16.dwSignature) & ~MBIM_NDP16_SIGN_MASK) >> 24;
294
295 rcu_read_lock();
296
297 link = mhi_mbim_get_link_rcu(mbim, session);
298 if (!link) {
299 net_err_ratelimited("mbim: bad packet session (%u)\n", session);
300 goto unlock;
301 }
302
303 /* de-aggregate and deliver IP packets */
304 dpeoffset = ndpoffset + sizeof(struct usb_cdc_ncm_ndp16);
305 for (n = 0; n < nframes; n++, dpeoffset += sizeof(dpe16)) {
306 u16 dgram_offset, dgram_len;
307 struct sk_buff *skbn;
308
309 if (skb_copy_bits(skb, dpeoffset, &dpe16, sizeof(dpe16)))
310 break;
311
312 dgram_offset = le16_to_cpu(dpe16.wDatagramIndex);
313 dgram_len = le16_to_cpu(dpe16.wDatagramLength);
314
315 if (!dgram_offset || !dgram_len)
316 break; /* null terminator */
317
318 skbn = netdev_alloc_skb(link->ndev, dgram_len);
319 if (!skbn)
320 continue;
321
322 skb_put(skbn, dgram_len);
323 skb_copy_bits(skb, dgram_offset, skbn->data, dgram_len);
324
325 switch (skbn->data[0] & 0xf0) {
326 case 0x40:
327 skbn->protocol = htons(ETH_P_IP);
328 break;
329 case 0x60:
330 skbn->protocol = htons(ETH_P_IPV6);
331 break;
332 default:
333 net_err_ratelimited("%s: unknown protocol\n",
334 link->ndev->name);
335 dev_kfree_skb_any(skbn);
336 u64_stats_update_begin(&link->rx_syncp);
337 u64_stats_inc(&link->rx_errors);
338 u64_stats_update_end(&link->rx_syncp);
339 continue;
340 }
341
342 u64_stats_update_begin(&link->rx_syncp);
343 u64_stats_inc(&link->rx_packets);
344 u64_stats_add(&link->rx_bytes, skbn->len);
345 u64_stats_update_end(&link->rx_syncp);
346
347 netif_rx(skbn);
348 }
349 unlock:
350 rcu_read_unlock();
351 next_ndp:
352 /* Other NDP to process? */
353 ndpoffset = (int)le16_to_cpu(ndp16.wNextNdpIndex);
354 if (!ndpoffset)
355 break;
356 }
357
358 /* free skb */
359 dev_consume_skb_any(skb);
360 return;
361 error:
362 dev_kfree_skb_any(skb);
363 }
364
mhi_net_skb_agg(struct mhi_mbim_context * mbim,struct sk_buff * skb)365 static struct sk_buff *mhi_net_skb_agg(struct mhi_mbim_context *mbim,
366 struct sk_buff *skb)
367 {
368 struct sk_buff *head = mbim->skbagg_head;
369 struct sk_buff *tail = mbim->skbagg_tail;
370
371 /* This is non-paged skb chaining using frag_list */
372 if (!head) {
373 mbim->skbagg_head = skb;
374 return skb;
375 }
376
377 if (!skb_shinfo(head)->frag_list)
378 skb_shinfo(head)->frag_list = skb;
379 else
380 tail->next = skb;
381
382 head->len += skb->len;
383 head->data_len += skb->len;
384 head->truesize += skb->truesize;
385
386 mbim->skbagg_tail = skb;
387
388 return mbim->skbagg_head;
389 }
390
mhi_net_rx_refill_work(struct work_struct * work)391 static void mhi_net_rx_refill_work(struct work_struct *work)
392 {
393 struct mhi_mbim_context *mbim = container_of(work, struct mhi_mbim_context,
394 rx_refill.work);
395 struct mhi_device *mdev = mbim->mdev;
396 int err;
397
398 while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
399 struct sk_buff *skb = alloc_skb(mbim->mru, GFP_KERNEL);
400
401 if (unlikely(!skb))
402 break;
403
404 err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb,
405 mbim->mru, MHI_EOT);
406 if (unlikely(err)) {
407 kfree_skb(skb);
408 break;
409 }
410
411 /* Do not hog the CPU if rx buffers are consumed faster than
412 * queued (unlikely).
413 */
414 cond_resched();
415 }
416
417 /* If we're still starved of rx buffers, reschedule later */
418 if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mbim->rx_queue_sz)
419 schedule_delayed_work(&mbim->rx_refill, HZ / 2);
420 }
421
mhi_mbim_dl_callback(struct mhi_device * mhi_dev,struct mhi_result * mhi_res)422 static void mhi_mbim_dl_callback(struct mhi_device *mhi_dev,
423 struct mhi_result *mhi_res)
424 {
425 struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
426 struct sk_buff *skb = mhi_res->buf_addr;
427 int free_desc_count;
428
429 free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
430
431 if (unlikely(mhi_res->transaction_status)) {
432 switch (mhi_res->transaction_status) {
433 case -EOVERFLOW:
434 /* Packet has been split over multiple transfers */
435 skb_put(skb, mhi_res->bytes_xferd);
436 mhi_net_skb_agg(mbim, skb);
437 break;
438 case -ENOTCONN:
439 /* MHI layer stopping/resetting the DL channel */
440 dev_kfree_skb_any(skb);
441 return;
442 default:
443 /* Unknown error, simply drop */
444 dev_kfree_skb_any(skb);
445 }
446 } else {
447 skb_put(skb, mhi_res->bytes_xferd);
448
449 if (mbim->skbagg_head) {
450 /* Aggregate the final fragment */
451 skb = mhi_net_skb_agg(mbim, skb);
452 mbim->skbagg_head = NULL;
453 }
454
455 mhi_mbim_rx(mbim, skb);
456 }
457
458 /* Refill if RX buffers queue becomes low */
459 if (free_desc_count >= mbim->rx_queue_sz / 2)
460 schedule_delayed_work(&mbim->rx_refill, 0);
461 }
462
mhi_mbim_ndo_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * stats)463 static void mhi_mbim_ndo_get_stats64(struct net_device *ndev,
464 struct rtnl_link_stats64 *stats)
465 {
466 struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
467 unsigned int start;
468
469 do {
470 start = u64_stats_fetch_begin(&link->rx_syncp);
471 stats->rx_packets = u64_stats_read(&link->rx_packets);
472 stats->rx_bytes = u64_stats_read(&link->rx_bytes);
473 stats->rx_errors = u64_stats_read(&link->rx_errors);
474 } while (u64_stats_fetch_retry(&link->rx_syncp, start));
475
476 do {
477 start = u64_stats_fetch_begin(&link->tx_syncp);
478 stats->tx_packets = u64_stats_read(&link->tx_packets);
479 stats->tx_bytes = u64_stats_read(&link->tx_bytes);
480 stats->tx_errors = u64_stats_read(&link->tx_errors);
481 stats->tx_dropped = u64_stats_read(&link->tx_dropped);
482 } while (u64_stats_fetch_retry(&link->tx_syncp, start));
483 }
484
mhi_mbim_ul_callback(struct mhi_device * mhi_dev,struct mhi_result * mhi_res)485 static void mhi_mbim_ul_callback(struct mhi_device *mhi_dev,
486 struct mhi_result *mhi_res)
487 {
488 struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
489 struct sk_buff *skb = mhi_res->buf_addr;
490 struct net_device *ndev = skb->dev;
491 struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
492
493 /* Hardware has consumed the buffer, so free the skb (which is not
494 * freed by the MHI stack) and perform accounting.
495 */
496 dev_consume_skb_any(skb);
497
498 u64_stats_update_begin(&link->tx_syncp);
499 if (unlikely(mhi_res->transaction_status)) {
500 /* MHI layer stopping/resetting the UL channel */
501 if (mhi_res->transaction_status == -ENOTCONN) {
502 u64_stats_update_end(&link->tx_syncp);
503 return;
504 }
505
506 u64_stats_inc(&link->tx_errors);
507 } else {
508 u64_stats_inc(&link->tx_packets);
509 u64_stats_add(&link->tx_bytes, mhi_res->bytes_xferd);
510 }
511 u64_stats_update_end(&link->tx_syncp);
512
513 if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE))
514 netif_wake_queue(ndev);
515 }
516
mhi_mbim_ndo_open(struct net_device * ndev)517 static int mhi_mbim_ndo_open(struct net_device *ndev)
518 {
519 struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
520
521 /* Feed the MHI rx buffer pool */
522 schedule_delayed_work(&link->mbim->rx_refill, 0);
523
524 /* Carrier is established via out-of-band channel (e.g. qmi) */
525 netif_carrier_on(ndev);
526
527 netif_start_queue(ndev);
528
529 return 0;
530 }
531
mhi_mbim_ndo_stop(struct net_device * ndev)532 static int mhi_mbim_ndo_stop(struct net_device *ndev)
533 {
534 netif_stop_queue(ndev);
535 netif_carrier_off(ndev);
536
537 return 0;
538 }
539
540 static const struct net_device_ops mhi_mbim_ndo = {
541 .ndo_open = mhi_mbim_ndo_open,
542 .ndo_stop = mhi_mbim_ndo_stop,
543 .ndo_start_xmit = mhi_mbim_ndo_xmit,
544 .ndo_get_stats64 = mhi_mbim_ndo_get_stats64,
545 };
546
mhi_mbim_newlink(void * ctxt,struct net_device * ndev,u32 if_id,struct netlink_ext_ack * extack)547 static int mhi_mbim_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
548 struct netlink_ext_ack *extack)
549 {
550 struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
551 struct mhi_mbim_context *mbim = ctxt;
552
553 link->session = if_id;
554 link->mbim = mbim;
555 link->ndev = ndev;
556 u64_stats_init(&link->rx_syncp);
557 u64_stats_init(&link->tx_syncp);
558
559 rcu_read_lock();
560 if (mhi_mbim_get_link_rcu(mbim, if_id)) {
561 rcu_read_unlock();
562 return -EEXIST;
563 }
564 rcu_read_unlock();
565
566 /* Already protected by RTNL lock */
567 hlist_add_head_rcu(&link->hlnode, &mbim->link_list[LINK_HASH(if_id)]);
568
569 return register_netdevice(ndev);
570 }
571
mhi_mbim_dellink(void * ctxt,struct net_device * ndev,struct list_head * head)572 static void mhi_mbim_dellink(void *ctxt, struct net_device *ndev,
573 struct list_head *head)
574 {
575 struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
576
577 hlist_del_init_rcu(&link->hlnode);
578 synchronize_rcu();
579
580 unregister_netdevice_queue(ndev, head);
581 }
582
mhi_mbim_setup(struct net_device * ndev)583 static void mhi_mbim_setup(struct net_device *ndev)
584 {
585 ndev->header_ops = NULL; /* No header */
586 ndev->type = ARPHRD_RAWIP;
587 ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
588 ndev->hard_header_len = 0;
589 ndev->addr_len = 0;
590 ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
591 ndev->netdev_ops = &mhi_mbim_ndo;
592 ndev->mtu = MHI_MBIM_DEFAULT_MTU;
593 ndev->min_mtu = ETH_MIN_MTU;
594 ndev->max_mtu = MHI_MAX_BUF_SZ - ndev->needed_headroom;
595 ndev->tx_queue_len = 1000;
596 ndev->needs_free_netdev = true;
597 }
598
599 static const struct wwan_ops mhi_mbim_wwan_ops = {
600 .priv_size = sizeof(struct mhi_mbim_link),
601 .setup = mhi_mbim_setup,
602 .newlink = mhi_mbim_newlink,
603 .dellink = mhi_mbim_dellink,
604 };
605
mhi_mbim_probe(struct mhi_device * mhi_dev,const struct mhi_device_id * id)606 static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
607 {
608 struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
609 struct mhi_mbim_context *mbim;
610 int err, link_id;
611
612 mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL);
613 if (!mbim)
614 return -ENOMEM;
615
616 spin_lock_init(&mbim->tx_lock);
617 dev_set_drvdata(&mhi_dev->dev, mbim);
618 mbim->mdev = mhi_dev;
619 mbim->mru = mhi_dev->mhi_cntrl->mru ? mhi_dev->mhi_cntrl->mru : MHI_DEFAULT_MRU;
620
621 INIT_DELAYED_WORK(&mbim->rx_refill, mhi_net_rx_refill_work);
622
623 /* Start MHI channels */
624 err = mhi_prepare_for_transfer(mhi_dev);
625 if (err)
626 return err;
627
628 /* Number of transfer descriptors determines size of the queue */
629 mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
630
631 /* Get the corresponding mux_id from mhi */
632 link_id = mhi_mbim_get_link_mux_id(cntrl);
633
634 /* Register wwan link ops with MHI controller representing WWAN instance */
635 return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, link_id);
636 }
637
mhi_mbim_remove(struct mhi_device * mhi_dev)638 static void mhi_mbim_remove(struct mhi_device *mhi_dev)
639 {
640 struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
641 struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
642
643 mhi_unprepare_from_transfer(mhi_dev);
644 cancel_delayed_work_sync(&mbim->rx_refill);
645 wwan_unregister_ops(&cntrl->mhi_dev->dev);
646 kfree_skb(mbim->skbagg_head);
647 dev_set_drvdata(&mhi_dev->dev, NULL);
648 }
649
650 static const struct mhi_device_id mhi_mbim_id_table[] = {
651 /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */
652 { .chan = "IP_HW0_MBIM", .driver_data = 0 },
653 {}
654 };
655 MODULE_DEVICE_TABLE(mhi, mhi_mbim_id_table);
656
657 static struct mhi_driver mhi_mbim_driver = {
658 .probe = mhi_mbim_probe,
659 .remove = mhi_mbim_remove,
660 .dl_xfer_cb = mhi_mbim_dl_callback,
661 .ul_xfer_cb = mhi_mbim_ul_callback,
662 .id_table = mhi_mbim_id_table,
663 .driver = {
664 .name = "mhi_wwan_mbim",
665 },
666 };
667
668 module_mhi_driver(mhi_mbim_driver);
669
670 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
671 MODULE_DESCRIPTION("Network/MBIM over MHI");
672 MODULE_LICENSE("GPL v2");
673