1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mctp-usb.c - MCTP-over-USB (DMTF DSP0283) transport binding driver.
4 *
5 * DSP0283 is available at:
6 * https://www.dmtf.org/sites/default/files/standards/documents/DSP0283_1.0.1.pdf
7 *
8 * Copyright (C) 2024-2025 Code Construct Pty Ltd
9 */
10
11 #include <linux/module.h>
12 #include <linux/netdevice.h>
13 #include <linux/usb.h>
14 #include <linux/usb/mctp-usb.h>
15
16 #include <net/mctp.h>
17 #include <net/mctpdevice.h>
18 #include <net/pkt_sched.h>
19
20 #include <uapi/linux/if_arp.h>
21
22 struct mctp_usb {
23 struct usb_device *usbdev;
24 struct usb_interface *intf;
25 bool stopped;
26
27 struct net_device *netdev;
28
29 u8 ep_in;
30 u8 ep_out;
31
32 struct urb *tx_urb;
33 struct urb *rx_urb;
34
35 struct delayed_work rx_retry_work;
36 };
37
mctp_usb_out_complete(struct urb * urb)38 static void mctp_usb_out_complete(struct urb *urb)
39 {
40 struct sk_buff *skb = urb->context;
41 struct net_device *netdev = skb->dev;
42 int status;
43
44 status = urb->status;
45
46 switch (status) {
47 case -ENOENT:
48 case -ECONNRESET:
49 case -ESHUTDOWN:
50 case -EPROTO:
51 dev_dstats_tx_dropped(netdev);
52 break;
53 case 0:
54 dev_dstats_tx_add(netdev, skb->len);
55 netif_wake_queue(netdev);
56 consume_skb(skb);
57 return;
58 default:
59 netdev_dbg(netdev, "unexpected tx urb status: %d\n", status);
60 dev_dstats_tx_dropped(netdev);
61 }
62
63 kfree_skb(skb);
64 }
65
mctp_usb_start_xmit(struct sk_buff * skb,struct net_device * dev)66 static netdev_tx_t mctp_usb_start_xmit(struct sk_buff *skb,
67 struct net_device *dev)
68 {
69 struct mctp_usb *mctp_usb = netdev_priv(dev);
70 struct mctp_usb_hdr *hdr;
71 unsigned int plen;
72 struct urb *urb;
73 int rc;
74
75 plen = skb->len;
76
77 if (plen + sizeof(*hdr) > MCTP_USB_XFER_SIZE)
78 goto err_drop;
79
80 rc = skb_cow_head(skb, sizeof(*hdr));
81 if (rc)
82 goto err_drop;
83
84 hdr = skb_push(skb, sizeof(*hdr));
85 if (!hdr)
86 goto err_drop;
87
88 hdr->id = cpu_to_be16(MCTP_USB_DMTF_ID);
89 hdr->rsvd = 0;
90 hdr->len = plen + sizeof(*hdr);
91
92 urb = mctp_usb->tx_urb;
93
94 usb_fill_bulk_urb(urb, mctp_usb->usbdev,
95 usb_sndbulkpipe(mctp_usb->usbdev, mctp_usb->ep_out),
96 skb->data, skb->len,
97 mctp_usb_out_complete, skb);
98
99 /* Stops TX queue first to prevent race condition with URB complete */
100 netif_stop_queue(dev);
101 rc = usb_submit_urb(urb, GFP_ATOMIC);
102 if (rc) {
103 netif_wake_queue(dev);
104 goto err_drop;
105 }
106
107 return NETDEV_TX_OK;
108
109 err_drop:
110 dev_dstats_tx_dropped(dev);
111 kfree_skb(skb);
112 return NETDEV_TX_OK;
113 }
114
115 static void mctp_usb_in_complete(struct urb *urb);
116
117 /* If we fail to queue an in urb atomically (either due to skb allocation or
118 * urb submission), we will schedule a rx queue in nonatomic context
119 * after a delay, specified in jiffies
120 */
121 static const unsigned long RX_RETRY_DELAY = HZ / 4;
122
mctp_usb_rx_queue(struct mctp_usb * mctp_usb,gfp_t gfp)123 static int mctp_usb_rx_queue(struct mctp_usb *mctp_usb, gfp_t gfp)
124 {
125 struct sk_buff *skb;
126 int rc;
127
128 skb = __netdev_alloc_skb(mctp_usb->netdev, MCTP_USB_XFER_SIZE, gfp);
129 if (!skb) {
130 rc = -ENOMEM;
131 goto err_retry;
132 }
133
134 usb_fill_bulk_urb(mctp_usb->rx_urb, mctp_usb->usbdev,
135 usb_rcvbulkpipe(mctp_usb->usbdev, mctp_usb->ep_in),
136 skb->data, MCTP_USB_XFER_SIZE,
137 mctp_usb_in_complete, skb);
138
139 rc = usb_submit_urb(mctp_usb->rx_urb, gfp);
140 if (rc) {
141 netdev_dbg(mctp_usb->netdev, "rx urb submit failure: %d\n", rc);
142 kfree_skb(skb);
143 if (rc == -ENOMEM)
144 goto err_retry;
145 }
146
147 return rc;
148
149 err_retry:
150 schedule_delayed_work(&mctp_usb->rx_retry_work, RX_RETRY_DELAY);
151 return rc;
152 }
153
mctp_usb_in_complete(struct urb * urb)154 static void mctp_usb_in_complete(struct urb *urb)
155 {
156 struct sk_buff *skb = urb->context;
157 struct net_device *netdev = skb->dev;
158 struct mctp_usb *mctp_usb = netdev_priv(netdev);
159 struct mctp_skb_cb *cb;
160 unsigned int len;
161 int status;
162
163 status = urb->status;
164
165 switch (status) {
166 case -ENOENT:
167 case -ECONNRESET:
168 case -ESHUTDOWN:
169 case -EPROTO:
170 kfree_skb(skb);
171 return;
172 case 0:
173 break;
174 default:
175 netdev_dbg(netdev, "unexpected rx urb status: %d\n", status);
176 kfree_skb(skb);
177 return;
178 }
179
180 len = urb->actual_length;
181 __skb_put(skb, len);
182
183 while (skb) {
184 struct sk_buff *skb2 = NULL;
185 struct mctp_usb_hdr *hdr;
186 u8 pkt_len; /* length of MCTP packet, no USB header */
187
188 skb_reset_mac_header(skb);
189 hdr = skb_pull_data(skb, sizeof(*hdr));
190 if (!hdr)
191 break;
192
193 if (be16_to_cpu(hdr->id) != MCTP_USB_DMTF_ID) {
194 netdev_dbg(netdev, "rx: invalid id %04x\n",
195 be16_to_cpu(hdr->id));
196 break;
197 }
198
199 if (hdr->len <
200 sizeof(struct mctp_hdr) + sizeof(struct mctp_usb_hdr)) {
201 netdev_dbg(netdev, "rx: short packet (hdr) %d\n",
202 hdr->len);
203 break;
204 }
205
206 /* we know we have at least sizeof(struct mctp_usb_hdr) here */
207 pkt_len = hdr->len - sizeof(struct mctp_usb_hdr);
208 if (pkt_len > skb->len) {
209 netdev_dbg(netdev,
210 "rx: short packet (xfer) %d, actual %d\n",
211 hdr->len, skb->len);
212 break;
213 }
214
215 if (pkt_len < skb->len) {
216 /* more packets may follow - clone to a new
217 * skb to use on the next iteration
218 */
219 skb2 = skb_clone(skb, GFP_ATOMIC);
220 if (skb2) {
221 if (!skb_pull(skb2, pkt_len)) {
222 kfree_skb(skb2);
223 skb2 = NULL;
224 }
225 }
226 skb_trim(skb, pkt_len);
227 }
228
229 dev_dstats_rx_add(netdev, skb->len);
230
231 skb->protocol = htons(ETH_P_MCTP);
232 skb_reset_network_header(skb);
233 cb = __mctp_cb(skb);
234 cb->halen = 0;
235 netif_rx(skb);
236
237 skb = skb2;
238 }
239
240 if (skb)
241 kfree_skb(skb);
242
243 mctp_usb_rx_queue(mctp_usb, GFP_ATOMIC);
244 }
245
mctp_usb_rx_retry_work(struct work_struct * work)246 static void mctp_usb_rx_retry_work(struct work_struct *work)
247 {
248 struct mctp_usb *mctp_usb = container_of(work, struct mctp_usb,
249 rx_retry_work.work);
250
251 if (READ_ONCE(mctp_usb->stopped))
252 return;
253
254 mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
255 }
256
mctp_usb_open(struct net_device * dev)257 static int mctp_usb_open(struct net_device *dev)
258 {
259 struct mctp_usb *mctp_usb = netdev_priv(dev);
260
261 WRITE_ONCE(mctp_usb->stopped, false);
262
263 netif_start_queue(dev);
264
265 return mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
266 }
267
mctp_usb_stop(struct net_device * dev)268 static int mctp_usb_stop(struct net_device *dev)
269 {
270 struct mctp_usb *mctp_usb = netdev_priv(dev);
271
272 netif_stop_queue(dev);
273
274 /* prevent RX submission retry */
275 WRITE_ONCE(mctp_usb->stopped, true);
276
277 usb_kill_urb(mctp_usb->rx_urb);
278 usb_kill_urb(mctp_usb->tx_urb);
279
280 cancel_delayed_work_sync(&mctp_usb->rx_retry_work);
281
282 return 0;
283 }
284
285 static const struct net_device_ops mctp_usb_netdev_ops = {
286 .ndo_start_xmit = mctp_usb_start_xmit,
287 .ndo_open = mctp_usb_open,
288 .ndo_stop = mctp_usb_stop,
289 };
290
mctp_usb_netdev_setup(struct net_device * dev)291 static void mctp_usb_netdev_setup(struct net_device *dev)
292 {
293 dev->type = ARPHRD_MCTP;
294
295 dev->mtu = MCTP_USB_MTU_MIN;
296 dev->min_mtu = MCTP_USB_MTU_MIN;
297 dev->max_mtu = MCTP_USB_MTU_MAX;
298
299 dev->hard_header_len = sizeof(struct mctp_usb_hdr);
300 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
301 dev->flags = IFF_NOARP;
302 dev->netdev_ops = &mctp_usb_netdev_ops;
303 dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
304 }
305
mctp_usb_probe(struct usb_interface * intf,const struct usb_device_id * id)306 static int mctp_usb_probe(struct usb_interface *intf,
307 const struct usb_device_id *id)
308 {
309 struct usb_endpoint_descriptor *ep_in, *ep_out;
310 struct usb_host_interface *iface_desc;
311 struct net_device *netdev;
312 struct mctp_usb *dev;
313 int rc;
314
315 /* only one alternate */
316 iface_desc = intf->cur_altsetting;
317
318 rc = usb_find_common_endpoints(iface_desc, &ep_in, &ep_out, NULL, NULL);
319 if (rc) {
320 dev_err(&intf->dev, "invalid endpoints on device?\n");
321 return rc;
322 }
323
324 netdev = alloc_netdev(sizeof(*dev), "mctpusb%d", NET_NAME_ENUM,
325 mctp_usb_netdev_setup);
326 if (!netdev)
327 return -ENOMEM;
328
329 SET_NETDEV_DEV(netdev, &intf->dev);
330 dev = netdev_priv(netdev);
331 dev->netdev = netdev;
332 dev->usbdev = usb_get_dev(interface_to_usbdev(intf));
333 dev->intf = intf;
334 usb_set_intfdata(intf, dev);
335
336 dev->ep_in = ep_in->bEndpointAddress;
337 dev->ep_out = ep_out->bEndpointAddress;
338
339 dev->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
340 dev->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
341 if (!dev->tx_urb || !dev->rx_urb) {
342 rc = -ENOMEM;
343 goto err_free_urbs;
344 }
345
346 INIT_DELAYED_WORK(&dev->rx_retry_work, mctp_usb_rx_retry_work);
347
348 rc = mctp_register_netdev(netdev, NULL, MCTP_PHYS_BINDING_USB);
349 if (rc)
350 goto err_free_urbs;
351
352 return 0;
353
354 err_free_urbs:
355 usb_free_urb(dev->tx_urb);
356 usb_free_urb(dev->rx_urb);
357 free_netdev(netdev);
358 return rc;
359 }
360
mctp_usb_disconnect(struct usb_interface * intf)361 static void mctp_usb_disconnect(struct usb_interface *intf)
362 {
363 struct mctp_usb *dev = usb_get_intfdata(intf);
364
365 mctp_unregister_netdev(dev->netdev);
366 usb_free_urb(dev->tx_urb);
367 usb_free_urb(dev->rx_urb);
368 usb_put_dev(dev->usbdev);
369 free_netdev(dev->netdev);
370 }
371
372 static const struct usb_device_id mctp_usb_devices[] = {
373 { USB_INTERFACE_INFO(USB_CLASS_MCTP, 0x0, 0x1) },
374 { 0 },
375 };
376
377 MODULE_DEVICE_TABLE(usb, mctp_usb_devices);
378
379 static struct usb_driver mctp_usb_driver = {
380 .name = "mctp-usb",
381 .id_table = mctp_usb_devices,
382 .probe = mctp_usb_probe,
383 .disconnect = mctp_usb_disconnect,
384 };
385
386 module_usb_driver(mctp_usb_driver)
387
388 MODULE_LICENSE("GPL");
389 MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
390 MODULE_DESCRIPTION("MCTP USB transport");
391