xref: /linux/drivers/net/ntb_netdev.c (revision b29580d58be6d4d24d66c89d9bea96db342cff00)
1 // SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
2 /*
3  * PCIe NTB Network Linux driver
4  */
5 #include <linux/etherdevice.h>
6 #include <linux/ethtool.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/ntb.h>
10 #include <linux/ntb_transport.h>
11 #include <linux/slab.h>
12 
13 #define NTB_NETDEV_VER	"0.7"
14 
15 MODULE_DESCRIPTION(KBUILD_MODNAME);
16 MODULE_VERSION(NTB_NETDEV_VER);
17 MODULE_LICENSE("Dual BSD/GPL");
18 MODULE_AUTHOR("Intel Corporation");
19 
20 /* Time in usecs for tx resource reaper */
21 static unsigned int tx_time = 1;
22 
23 /* Number of descriptors to free before resuming tx */
24 static unsigned int tx_start = 10;
25 
26 /* Number of descriptors still available before stop upper layer tx */
27 static unsigned int tx_stop = 5;
28 
29 #define NTB_NETDEV_MAX_QUEUES		64
30 #define NTB_NETDEV_DEFAULT_QUEUES	1
31 
32 struct ntb_netdev;
33 
34 struct ntb_netdev_queue {
35 	struct ntb_netdev *ntdev;
36 	struct ntb_transport_qp *qp;
37 	struct timer_list tx_timer;
38 	u16 qid;
39 };
40 
41 struct ntb_netdev {
42 	struct pci_dev *pdev;
43 	struct device *client_dev;
44 	struct net_device *ndev;
45 	unsigned int num_queues;
46 	struct ntb_netdev_queue *queues;
47 };
48 
49 #define	NTB_TX_TIMEOUT_MS	1000
50 #define	NTB_RXQ_SIZE		100
51 
52 static void ntb_netdev_update_carrier(struct ntb_netdev *dev)
53 {
54 	struct net_device *ndev;
55 	bool any_up = false;
56 	unsigned int i;
57 
58 	ndev = dev->ndev;
59 
60 	for (i = 0; i < dev->num_queues; i++) {
61 		if (ntb_transport_link_query(dev->queues[i].qp)) {
62 			any_up = true;
63 			break;
64 		}
65 	}
66 
67 	if (any_up)
68 		netif_carrier_on(ndev);
69 	else
70 		netif_carrier_off(ndev);
71 }
72 
73 static void ntb_netdev_queue_rx_drain(struct ntb_netdev_queue *queue)
74 {
75 	struct sk_buff *skb;
76 	int len;
77 
78 	while ((skb = ntb_transport_rx_remove(queue->qp, &len)))
79 		dev_kfree_skb(skb);
80 }
81 
82 static int ntb_netdev_queue_rx_fill(struct net_device *ndev,
83 				    struct ntb_netdev_queue *queue)
84 {
85 	struct sk_buff *skb;
86 	int rc, i;
87 
88 	for (i = 0; i < NTB_RXQ_SIZE; i++) {
89 		skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
90 		if (!skb)
91 			return -ENOMEM;
92 
93 		rc = ntb_transport_rx_enqueue(queue->qp, skb, skb->data,
94 					      ndev->mtu + ETH_HLEN);
95 		if (rc) {
96 			dev_kfree_skb(skb);
97 			return rc;
98 		}
99 	}
100 
101 	return 0;
102 }
103 
104 static void ntb_netdev_event_handler(void *data, int link_is_up)
105 {
106 	struct ntb_netdev_queue *q = data;
107 	struct ntb_netdev *dev = q->ntdev;
108 	struct net_device *ndev;
109 
110 	ndev = dev->ndev;
111 
112 	netdev_dbg(ndev, "Event %x, Link %x, qp %u\n", link_is_up,
113 		   ntb_transport_link_query(q->qp), q->qid);
114 
115 	if (netif_running(ndev)) {
116 		if (link_is_up)
117 			netif_wake_subqueue(ndev, q->qid);
118 		else
119 			netif_stop_subqueue(ndev, q->qid);
120 	}
121 
122 	ntb_netdev_update_carrier(dev);
123 }
124 
125 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
126 				  void *data, int len)
127 {
128 	struct ntb_netdev_queue *q = qp_data;
129 	struct ntb_netdev *dev = q->ntdev;
130 	struct net_device *ndev;
131 	struct sk_buff *skb;
132 	int rc;
133 
134 	ndev = dev->ndev;
135 	skb = data;
136 	if (!skb)
137 		return;
138 
139 	netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
140 
141 	if (len < 0) {
142 		ndev->stats.rx_errors++;
143 		ndev->stats.rx_length_errors++;
144 		goto enqueue_again;
145 	}
146 
147 	skb_put(skb, len);
148 	skb->protocol = eth_type_trans(skb, ndev);
149 	skb->ip_summed = CHECKSUM_NONE;
150 	skb_record_rx_queue(skb, q->qid);
151 
152 	if (netif_rx(skb) == NET_RX_DROP) {
153 		ndev->stats.rx_errors++;
154 		ndev->stats.rx_dropped++;
155 	} else {
156 		ndev->stats.rx_packets++;
157 		ndev->stats.rx_bytes += len;
158 	}
159 
160 	skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
161 	if (!skb) {
162 		ndev->stats.rx_errors++;
163 		ndev->stats.rx_frame_errors++;
164 		return;
165 	}
166 
167 enqueue_again:
168 	rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
169 	if (rc) {
170 		dev_kfree_skb_any(skb);
171 		ndev->stats.rx_errors++;
172 		ndev->stats.rx_fifo_errors++;
173 	}
174 }
175 
176 static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev,
177 				      struct ntb_netdev_queue *q, int size)
178 {
179 	netif_stop_subqueue(netdev, q->qid);
180 
181 	/* Make sure to see the latest value of ntb_transport_tx_free_entry()
182 	 * since the queue was last started.
183 	 */
184 	smp_mb();
185 
186 	if (likely(ntb_transport_tx_free_entry(q->qp) < size)) {
187 		mod_timer(&q->tx_timer, jiffies + usecs_to_jiffies(tx_time));
188 		return -EBUSY;
189 	}
190 
191 	/* The subqueue must be kept stopped if the link is down */
192 	if (ntb_transport_link_query(q->qp))
193 		netif_start_subqueue(netdev, q->qid);
194 
195 	return 0;
196 }
197 
198 static int ntb_netdev_maybe_stop_tx(struct net_device *ndev,
199 				    struct ntb_netdev_queue *q, int size)
200 {
201 	if (__netif_subqueue_stopped(ndev, q->qid) ||
202 	    (ntb_transport_tx_free_entry(q->qp) >= size))
203 		return 0;
204 
205 	return __ntb_netdev_maybe_stop_tx(ndev, q, size);
206 }
207 
208 static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
209 				  void *data, int len)
210 {
211 	struct ntb_netdev_queue *q = qp_data;
212 	struct ntb_netdev *dev = q->ntdev;
213 	struct net_device *ndev;
214 	struct sk_buff *skb;
215 
216 	ndev = dev->ndev;
217 	skb = data;
218 	if (!skb || !ndev)
219 		return;
220 
221 	if (len > 0) {
222 		ndev->stats.tx_packets++;
223 		ndev->stats.tx_bytes += skb->len;
224 	} else {
225 		ndev->stats.tx_errors++;
226 		ndev->stats.tx_aborted_errors++;
227 	}
228 
229 	dev_kfree_skb_any(skb);
230 
231 	if (ntb_transport_tx_free_entry(qp) >= tx_start) {
232 		/* Make sure anybody stopping the queue after this sees the new
233 		 * value of ntb_transport_tx_free_entry()
234 		 */
235 		smp_mb();
236 		if (__netif_subqueue_stopped(ndev, q->qid) &&
237 		    ntb_transport_link_query(q->qp))
238 			netif_wake_subqueue(ndev, q->qid);
239 	}
240 }
241 
242 static const struct ntb_queue_handlers ntb_netdev_handlers = {
243 	.tx_handler = ntb_netdev_tx_handler,
244 	.rx_handler = ntb_netdev_rx_handler,
245 	.event_handler = ntb_netdev_event_handler,
246 };
247 
248 static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
249 					 struct net_device *ndev)
250 {
251 	struct ntb_netdev *dev = netdev_priv(ndev);
252 	u16 qid = skb_get_queue_mapping(skb);
253 	struct ntb_netdev_queue *q;
254 	int rc;
255 
256 	q = &dev->queues[qid];
257 
258 	ntb_netdev_maybe_stop_tx(ndev, q, tx_stop);
259 
260 	rc = ntb_transport_tx_enqueue(q->qp, skb, skb->data, skb->len);
261 	if (rc)
262 		goto err;
263 
264 	/* check for next submit */
265 	ntb_netdev_maybe_stop_tx(ndev, q, tx_stop);
266 
267 	return NETDEV_TX_OK;
268 
269 err:
270 	ndev->stats.tx_dropped++;
271 	ndev->stats.tx_errors++;
272 	return NETDEV_TX_BUSY;
273 }
274 
275 static void ntb_netdev_tx_timer(struct timer_list *t)
276 {
277 	struct ntb_netdev_queue *q = timer_container_of(q, t, tx_timer);
278 	struct ntb_netdev *dev = q->ntdev;
279 	struct net_device *ndev;
280 
281 	ndev = dev->ndev;
282 
283 	if (ntb_transport_tx_free_entry(q->qp) < tx_stop) {
284 		mod_timer(&q->tx_timer, jiffies + usecs_to_jiffies(tx_time));
285 	} else {
286 		/* Make sure anybody stopping the queue after this sees the new
287 		 * value of ntb_transport_tx_free_entry()
288 		 */
289 		smp_mb();
290 
291 		/* The subqueue must be kept stopped if the link is down */
292 		if (__netif_subqueue_stopped(ndev, q->qid) &&
293 		    ntb_transport_link_query(q->qp))
294 			netif_wake_subqueue(ndev, q->qid);
295 	}
296 }
297 
298 static int ntb_netdev_open(struct net_device *ndev)
299 {
300 	struct ntb_netdev *dev = netdev_priv(ndev);
301 	struct ntb_netdev_queue *queue;
302 	unsigned int q;
303 	int rc = 0;
304 
305 	/* Add some empty rx bufs for each queue */
306 	for (q = 0; q < dev->num_queues; q++) {
307 		queue = &dev->queues[q];
308 
309 		rc = ntb_netdev_queue_rx_fill(ndev, queue);
310 		if (rc)
311 			goto err;
312 
313 		timer_setup(&queue->tx_timer, ntb_netdev_tx_timer, 0);
314 	}
315 
316 	netif_carrier_off(ndev);
317 	netif_tx_stop_all_queues(ndev);
318 
319 	for (q = 0; q < dev->num_queues; q++)
320 		ntb_transport_link_up(dev->queues[q].qp);
321 
322 	return 0;
323 
324 err:
325 	for (q = 0; q < dev->num_queues; q++) {
326 		queue = &dev->queues[q];
327 		ntb_netdev_queue_rx_drain(queue);
328 	}
329 	return rc;
330 }
331 
332 static int ntb_netdev_close(struct net_device *ndev)
333 {
334 	struct ntb_netdev *dev = netdev_priv(ndev);
335 	struct ntb_netdev_queue *queue;
336 	unsigned int q;
337 
338 	netif_tx_stop_all_queues(ndev);
339 	netif_carrier_off(ndev);
340 
341 	for (q = 0; q < dev->num_queues; q++) {
342 		queue = &dev->queues[q];
343 
344 		ntb_transport_link_down(queue->qp);
345 		ntb_netdev_queue_rx_drain(queue);
346 		timer_delete_sync(&queue->tx_timer);
347 	}
348 
349 	return 0;
350 }
351 
352 static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
353 {
354 	struct ntb_netdev *dev = netdev_priv(ndev);
355 	struct ntb_netdev_queue *queue;
356 	struct sk_buff *skb;
357 	unsigned int q, i;
358 	int len, rc = 0;
359 
360 	if (new_mtu > ntb_transport_max_size(dev->queues[0].qp) - ETH_HLEN)
361 		return -EINVAL;
362 
363 	if (!netif_running(ndev)) {
364 		WRITE_ONCE(ndev->mtu, new_mtu);
365 		return 0;
366 	}
367 
368 	/* Bring down the link and dispose of posted rx entries */
369 	for (q = 0; q < dev->num_queues; q++)
370 		ntb_transport_link_down(dev->queues[q].qp);
371 
372 	if (ndev->mtu < new_mtu) {
373 		for (q = 0; q < dev->num_queues; q++) {
374 			queue = &dev->queues[q];
375 
376 			for (i = 0;
377 			     (skb = ntb_transport_rx_remove(queue->qp, &len));
378 			     i++)
379 				dev_kfree_skb(skb);
380 
381 			for (; i; i--) {
382 				skb = netdev_alloc_skb(ndev,
383 						       new_mtu + ETH_HLEN);
384 				if (!skb) {
385 					rc = -ENOMEM;
386 					goto err;
387 				}
388 
389 				rc = ntb_transport_rx_enqueue(queue->qp, skb,
390 							      skb->data,
391 							      new_mtu +
392 							      ETH_HLEN);
393 				if (rc) {
394 					dev_kfree_skb(skb);
395 					goto err;
396 				}
397 			}
398 		}
399 	}
400 
401 	WRITE_ONCE(ndev->mtu, new_mtu);
402 
403 	for (q = 0; q < dev->num_queues; q++)
404 		ntb_transport_link_up(dev->queues[q].qp);
405 
406 	return 0;
407 
408 err:
409 	for (q = 0; q < dev->num_queues; q++) {
410 		struct ntb_netdev_queue *queue = &dev->queues[q];
411 
412 		ntb_transport_link_down(queue->qp);
413 
414 		ntb_netdev_queue_rx_drain(queue);
415 	}
416 
417 	netdev_err(ndev, "Error changing MTU, device inoperable\n");
418 	return rc;
419 }
420 
421 static const struct net_device_ops ntb_netdev_ops = {
422 	.ndo_open = ntb_netdev_open,
423 	.ndo_stop = ntb_netdev_close,
424 	.ndo_start_xmit = ntb_netdev_start_xmit,
425 	.ndo_change_mtu = ntb_netdev_change_mtu,
426 	.ndo_set_mac_address = eth_mac_addr,
427 };
428 
429 static void ntb_get_drvinfo(struct net_device *ndev,
430 			    struct ethtool_drvinfo *info)
431 {
432 	struct ntb_netdev *dev = netdev_priv(ndev);
433 
434 	strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
435 	strscpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
436 	strscpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
437 }
438 
439 static int ntb_get_link_ksettings(struct net_device *dev,
440 				  struct ethtool_link_ksettings *cmd)
441 {
442 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
443 	ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
444 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
445 	ethtool_link_ksettings_add_link_mode(cmd, advertising, Backplane);
446 
447 	cmd->base.speed = SPEED_UNKNOWN;
448 	cmd->base.duplex = DUPLEX_FULL;
449 	cmd->base.port = PORT_OTHER;
450 	cmd->base.phy_address = 0;
451 	cmd->base.autoneg = AUTONEG_ENABLE;
452 
453 	return 0;
454 }
455 
456 static void ntb_get_channels(struct net_device *ndev,
457 			     struct ethtool_channels *channels)
458 {
459 	struct ntb_netdev *dev = netdev_priv(ndev);
460 
461 	channels->combined_count = dev->num_queues;
462 	channels->max_combined = ndev->num_tx_queues;
463 }
464 
465 static int ntb_inc_channels(struct net_device *ndev,
466 			    unsigned int old, unsigned int new)
467 {
468 	struct ntb_netdev *dev = netdev_priv(ndev);
469 	bool running = netif_running(ndev);
470 	struct ntb_netdev_queue *queue;
471 	unsigned int q, created;
472 	int rc;
473 
474 	created = old;
475 	for (q = old; q < new; q++) {
476 		queue = &dev->queues[q];
477 
478 		queue->ntdev = dev;
479 		queue->qid = q;
480 		queue->qp = ntb_transport_create_queue(queue, dev->client_dev,
481 						       &ntb_netdev_handlers);
482 		if (!queue->qp) {
483 			rc = -ENOSPC;
484 			goto err_new;
485 		}
486 		created++;
487 
488 		if (!running)
489 			continue;
490 
491 		timer_setup(&queue->tx_timer, ntb_netdev_tx_timer, 0);
492 
493 		rc = ntb_netdev_queue_rx_fill(ndev, queue);
494 		if (rc)
495 			goto err_new;
496 
497 		/*
498 		 * Carrier may already be on due to other QPs. Keep the new
499 		 * subqueue stopped until we get a Link Up event for this QP.
500 		 */
501 		netif_stop_subqueue(ndev, q);
502 	}
503 
504 	rc = netif_set_real_num_queues(ndev, new, new);
505 	if (rc)
506 		goto err_new;
507 
508 	dev->num_queues = new;
509 
510 	if (running)
511 		for (q = old; q < new; q++)
512 			ntb_transport_link_up(dev->queues[q].qp);
513 
514 	return 0;
515 
516 err_new:
517 	if (running) {
518 		unsigned int rollback = created;
519 
520 		while (rollback-- > old) {
521 			queue = &dev->queues[rollback];
522 			ntb_transport_link_down(queue->qp);
523 			ntb_netdev_queue_rx_drain(queue);
524 			timer_delete_sync(&queue->tx_timer);
525 		}
526 	}
527 	while (created-- > old) {
528 		queue = &dev->queues[created];
529 		ntb_transport_free_queue(queue->qp);
530 		queue->qp = NULL;
531 	}
532 	return rc;
533 }
534 
535 static int ntb_dec_channels(struct net_device *ndev,
536 			    unsigned int old, unsigned int new)
537 {
538 	struct ntb_netdev *dev = netdev_priv(ndev);
539 	bool running = netif_running(ndev);
540 	struct ntb_netdev_queue *queue;
541 	unsigned int q;
542 	int rc;
543 
544 	if (running)
545 		for (q = new; q < old; q++)
546 			netif_stop_subqueue(ndev, q);
547 
548 	rc = netif_set_real_num_queues(ndev, new, new);
549 	if (rc)
550 		goto err;
551 
552 	/* Publish new queue count before invalidating QP pointers */
553 	dev->num_queues = new;
554 
555 	for (q = new; q < old; q++) {
556 		queue = &dev->queues[q];
557 
558 		if (running) {
559 			ntb_transport_link_down(queue->qp);
560 			ntb_netdev_queue_rx_drain(queue);
561 			timer_delete_sync(&queue->tx_timer);
562 		}
563 
564 		ntb_transport_free_queue(queue->qp);
565 		queue->qp = NULL;
566 	}
567 
568 	/*
569 	 * It might be the case that the removed queues are the only queues that
570 	 * were up, so see if the global carrier needs to change.
571 	 */
572 	ntb_netdev_update_carrier(dev);
573 	return 0;
574 
575 err:
576 	if (running) {
577 		for (q = new; q < old; q++)
578 			netif_wake_subqueue(ndev, q);
579 	}
580 	return rc;
581 }
582 
583 static int ntb_set_channels(struct net_device *ndev,
584 			    struct ethtool_channels *channels)
585 {
586 	struct ntb_netdev *dev = netdev_priv(ndev);
587 	unsigned int new = channels->combined_count;
588 	unsigned int old = dev->num_queues;
589 
590 	if (new == old)
591 		return 0;
592 
593 	if (new < old)
594 		return ntb_dec_channels(ndev, old, new);
595 	else
596 		return ntb_inc_channels(ndev, old, new);
597 }
598 
599 static const struct ethtool_ops ntb_ethtool_ops = {
600 	.get_drvinfo = ntb_get_drvinfo,
601 	.get_link = ethtool_op_get_link,
602 	.get_link_ksettings = ntb_get_link_ksettings,
603 	.get_channels = ntb_get_channels,
604 	.set_channels = ntb_set_channels,
605 };
606 
607 static int ntb_netdev_probe(struct device *client_dev)
608 {
609 	struct ntb_dev *ntb;
610 	struct net_device *ndev;
611 	struct pci_dev *pdev;
612 	struct ntb_netdev *dev;
613 	unsigned int q;
614 	int rc;
615 
616 	ntb = dev_ntb(client_dev->parent);
617 	pdev = ntb->pdev;
618 	if (!pdev)
619 		return -ENODEV;
620 
621 	ndev = alloc_etherdev_mq(sizeof(*dev), NTB_NETDEV_MAX_QUEUES);
622 	if (!ndev)
623 		return -ENOMEM;
624 
625 	SET_NETDEV_DEV(ndev, client_dev);
626 
627 	dev = netdev_priv(ndev);
628 	dev->ndev = ndev;
629 	dev->pdev = pdev;
630 	dev->client_dev = client_dev;
631 	dev->num_queues = 0;
632 
633 	dev->queues = kzalloc_objs(*dev->queues, NTB_NETDEV_MAX_QUEUES,
634 				   GFP_KERNEL);
635 	if (!dev->queues) {
636 		rc = -ENOMEM;
637 		goto err_free_netdev;
638 	}
639 
640 	ndev->features = NETIF_F_HIGHDMA;
641 
642 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
643 
644 	ndev->hw_features = ndev->features;
645 	ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
646 
647 	eth_random_addr(ndev->perm_addr);
648 	dev_addr_set(ndev, ndev->perm_addr);
649 
650 	ndev->netdev_ops = &ntb_netdev_ops;
651 	ndev->ethtool_ops = &ntb_ethtool_ops;
652 
653 	ndev->min_mtu = 0;
654 	ndev->max_mtu = ETH_MAX_MTU;
655 
656 	for (q = 0; q < NTB_NETDEV_DEFAULT_QUEUES; q++) {
657 		struct ntb_netdev_queue *queue = &dev->queues[q];
658 
659 		queue->ntdev = dev;
660 		queue->qid = q;
661 		queue->qp = ntb_transport_create_queue(queue, client_dev,
662 						       &ntb_netdev_handlers);
663 		if (!queue->qp)
664 			break;
665 
666 		dev->num_queues++;
667 	}
668 
669 	if (!dev->num_queues) {
670 		rc = -EIO;
671 		goto err_free_queues;
672 	}
673 
674 	rc = netif_set_real_num_queues(ndev, dev->num_queues, dev->num_queues);
675 	if (rc)
676 		goto err_free_qps;
677 
678 	ndev->mtu = ntb_transport_max_size(dev->queues[0].qp) - ETH_HLEN;
679 
680 	rc = register_netdev(ndev);
681 	if (rc)
682 		goto err_free_qps;
683 
684 	dev_set_drvdata(client_dev, ndev);
685 	dev_info(&pdev->dev, "%s created with %u queue pairs\n",
686 		 ndev->name, dev->num_queues);
687 	return 0;
688 
689 err_free_qps:
690 	for (q = 0; q < dev->num_queues; q++)
691 		ntb_transport_free_queue(dev->queues[q].qp);
692 
693 err_free_queues:
694 	kfree(dev->queues);
695 
696 err_free_netdev:
697 	free_netdev(ndev);
698 	return rc;
699 }
700 
701 static void ntb_netdev_remove(struct device *client_dev)
702 {
703 	struct net_device *ndev = dev_get_drvdata(client_dev);
704 	struct ntb_netdev *dev = netdev_priv(ndev);
705 	unsigned int q;
706 
707 	unregister_netdev(ndev);
708 	for (q = 0; q < dev->num_queues; q++)
709 		ntb_transport_free_queue(dev->queues[q].qp);
710 
711 	kfree(dev->queues);
712 	free_netdev(ndev);
713 }
714 
715 static struct ntb_transport_client ntb_netdev_client = {
716 	.driver.name = KBUILD_MODNAME,
717 	.driver.owner = THIS_MODULE,
718 	.probe = ntb_netdev_probe,
719 	.remove = ntb_netdev_remove,
720 };
721 
722 static int __init ntb_netdev_init_module(void)
723 {
724 	int rc;
725 
726 	rc = ntb_transport_register_client_dev(KBUILD_MODNAME);
727 	if (rc)
728 		return rc;
729 
730 	rc = ntb_transport_register_client(&ntb_netdev_client);
731 	if (rc) {
732 		ntb_transport_unregister_client_dev(KBUILD_MODNAME);
733 		return rc;
734 	}
735 
736 	return 0;
737 }
738 late_initcall(ntb_netdev_init_module);
739 
740 static void __exit ntb_netdev_exit_module(void)
741 {
742 	ntb_transport_unregister_client(&ntb_netdev_client);
743 	ntb_transport_unregister_client_dev(KBUILD_MODNAME);
744 }
745 module_exit(ntb_netdev_exit_module);
746