xref: /linux/drivers/net/ntb_netdev.c (revision 9b29afa1166088ca4e8223857508f2a19d88b58b)
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9  *
10  *   This program is free software; you can redistribute it and/or modify
11  *   it under the terms of version 2 of the GNU General Public License as
12  *   published by the Free Software Foundation.
13  *
14  *   BSD LICENSE
15  *
16  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18  *
19  *   Redistribution and use in source and binary forms, with or without
20  *   modification, are permitted provided that the following conditions
21  *   are met:
22  *
23  *     * Redistributions of source code must retain the above copyright
24  *       notice, this list of conditions and the following disclaimer.
25  *     * Redistributions in binary form must reproduce the above copy
26  *       notice, this list of conditions and the following disclaimer in
27  *       the documentation and/or other materials provided with the
28  *       distribution.
29  *     * Neither the name of Intel Corporation nor the names of its
30  *       contributors may be used to endorse or promote products derived
31  *       from this software without specific prior written permission.
32  *
33  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44  *
45  * PCIe NTB Network Linux driver
46  *
47  * Contact Information:
48  * Jon Mason <jon.mason@intel.com>
49  */
50 #include <linux/etherdevice.h>
51 #include <linux/ethtool.h>
52 #include <linux/module.h>
53 #include <linux/pci.h>
54 #include <linux/ntb.h>
55 #include <linux/ntb_transport.h>
56 #include <linux/slab.h>
57 
58 #define NTB_NETDEV_VER	"0.7"
59 
60 MODULE_DESCRIPTION(KBUILD_MODNAME);
61 MODULE_VERSION(NTB_NETDEV_VER);
62 MODULE_LICENSE("Dual BSD/GPL");
63 MODULE_AUTHOR("Intel Corporation");
64 
65 /* Time in usecs for tx resource reaper */
66 static unsigned int tx_time = 1;
67 
68 /* Number of descriptors to free before resuming tx */
69 static unsigned int tx_start = 10;
70 
71 /* Number of descriptors still available before stop upper layer tx */
72 static unsigned int tx_stop = 5;
73 
74 #define NTB_NETDEV_MAX_QUEUES		64
75 #define NTB_NETDEV_DEFAULT_QUEUES	1
76 
77 struct ntb_netdev;
78 
79 struct ntb_netdev_queue {
80 	struct ntb_netdev *ntdev;
81 	struct ntb_transport_qp *qp;
82 	struct timer_list tx_timer;
83 	u16 qid;
84 };
85 
86 struct ntb_netdev {
87 	struct pci_dev *pdev;
88 	struct device *client_dev;
89 	struct net_device *ndev;
90 	unsigned int num_queues;
91 	struct ntb_netdev_queue *queues;
92 };
93 
94 #define	NTB_TX_TIMEOUT_MS	1000
95 #define	NTB_RXQ_SIZE		100
96 
97 static void ntb_netdev_update_carrier(struct ntb_netdev *dev)
98 {
99 	struct net_device *ndev;
100 	bool any_up = false;
101 	unsigned int i;
102 
103 	ndev = dev->ndev;
104 
105 	for (i = 0; i < dev->num_queues; i++) {
106 		if (ntb_transport_link_query(dev->queues[i].qp)) {
107 			any_up = true;
108 			break;
109 		}
110 	}
111 
112 	if (any_up)
113 		netif_carrier_on(ndev);
114 	else
115 		netif_carrier_off(ndev);
116 }
117 
118 static void ntb_netdev_queue_rx_drain(struct ntb_netdev_queue *queue)
119 {
120 	struct sk_buff *skb;
121 	int len;
122 
123 	while ((skb = ntb_transport_rx_remove(queue->qp, &len)))
124 		dev_kfree_skb(skb);
125 }
126 
127 static int ntb_netdev_queue_rx_fill(struct net_device *ndev,
128 				    struct ntb_netdev_queue *queue)
129 {
130 	struct sk_buff *skb;
131 	int rc, i;
132 
133 	for (i = 0; i < NTB_RXQ_SIZE; i++) {
134 		skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
135 		if (!skb)
136 			return -ENOMEM;
137 
138 		rc = ntb_transport_rx_enqueue(queue->qp, skb, skb->data,
139 					      ndev->mtu + ETH_HLEN);
140 		if (rc) {
141 			dev_kfree_skb(skb);
142 			return rc;
143 		}
144 	}
145 
146 	return 0;
147 }
148 
149 static void ntb_netdev_event_handler(void *data, int link_is_up)
150 {
151 	struct ntb_netdev_queue *q = data;
152 	struct ntb_netdev *dev = q->ntdev;
153 	struct net_device *ndev;
154 
155 	ndev = dev->ndev;
156 
157 	netdev_dbg(ndev, "Event %x, Link %x, qp %u\n", link_is_up,
158 		   ntb_transport_link_query(q->qp), q->qid);
159 
160 	if (netif_running(ndev)) {
161 		if (link_is_up)
162 			netif_wake_subqueue(ndev, q->qid);
163 		else
164 			netif_stop_subqueue(ndev, q->qid);
165 	}
166 
167 	ntb_netdev_update_carrier(dev);
168 }
169 
170 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
171 				  void *data, int len)
172 {
173 	struct ntb_netdev_queue *q = qp_data;
174 	struct ntb_netdev *dev = q->ntdev;
175 	struct net_device *ndev;
176 	struct sk_buff *skb;
177 	int rc;
178 
179 	ndev = dev->ndev;
180 	skb = data;
181 	if (!skb)
182 		return;
183 
184 	netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
185 
186 	if (len < 0) {
187 		ndev->stats.rx_errors++;
188 		ndev->stats.rx_length_errors++;
189 		goto enqueue_again;
190 	}
191 
192 	skb_put(skb, len);
193 	skb->protocol = eth_type_trans(skb, ndev);
194 	skb->ip_summed = CHECKSUM_NONE;
195 	skb_record_rx_queue(skb, q->qid);
196 
197 	if (netif_rx(skb) == NET_RX_DROP) {
198 		ndev->stats.rx_errors++;
199 		ndev->stats.rx_dropped++;
200 	} else {
201 		ndev->stats.rx_packets++;
202 		ndev->stats.rx_bytes += len;
203 	}
204 
205 	skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
206 	if (!skb) {
207 		ndev->stats.rx_errors++;
208 		ndev->stats.rx_frame_errors++;
209 		return;
210 	}
211 
212 enqueue_again:
213 	rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
214 	if (rc) {
215 		dev_kfree_skb_any(skb);
216 		ndev->stats.rx_errors++;
217 		ndev->stats.rx_fifo_errors++;
218 	}
219 }
220 
221 static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev,
222 				      struct ntb_netdev_queue *q, int size)
223 {
224 	netif_stop_subqueue(netdev, q->qid);
225 
226 	/* Make sure to see the latest value of ntb_transport_tx_free_entry()
227 	 * since the queue was last started.
228 	 */
229 	smp_mb();
230 
231 	if (likely(ntb_transport_tx_free_entry(q->qp) < size)) {
232 		mod_timer(&q->tx_timer, jiffies + usecs_to_jiffies(tx_time));
233 		return -EBUSY;
234 	}
235 
236 	/* The subqueue must be kept stopped if the link is down */
237 	if (ntb_transport_link_query(q->qp))
238 		netif_start_subqueue(netdev, q->qid);
239 
240 	return 0;
241 }
242 
243 static int ntb_netdev_maybe_stop_tx(struct net_device *ndev,
244 				    struct ntb_netdev_queue *q, int size)
245 {
246 	if (__netif_subqueue_stopped(ndev, q->qid) ||
247 	    (ntb_transport_tx_free_entry(q->qp) >= size))
248 		return 0;
249 
250 	return __ntb_netdev_maybe_stop_tx(ndev, q, size);
251 }
252 
253 static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
254 				  void *data, int len)
255 {
256 	struct ntb_netdev_queue *q = qp_data;
257 	struct ntb_netdev *dev = q->ntdev;
258 	struct net_device *ndev;
259 	struct sk_buff *skb;
260 
261 	ndev = dev->ndev;
262 	skb = data;
263 	if (!skb || !ndev)
264 		return;
265 
266 	if (len > 0) {
267 		ndev->stats.tx_packets++;
268 		ndev->stats.tx_bytes += skb->len;
269 	} else {
270 		ndev->stats.tx_errors++;
271 		ndev->stats.tx_aborted_errors++;
272 	}
273 
274 	dev_kfree_skb_any(skb);
275 
276 	if (ntb_transport_tx_free_entry(qp) >= tx_start) {
277 		/* Make sure anybody stopping the queue after this sees the new
278 		 * value of ntb_transport_tx_free_entry()
279 		 */
280 		smp_mb();
281 		if (__netif_subqueue_stopped(ndev, q->qid) &&
282 		    ntb_transport_link_query(q->qp))
283 			netif_wake_subqueue(ndev, q->qid);
284 	}
285 }
286 
287 static const struct ntb_queue_handlers ntb_netdev_handlers = {
288 	.tx_handler = ntb_netdev_tx_handler,
289 	.rx_handler = ntb_netdev_rx_handler,
290 	.event_handler = ntb_netdev_event_handler,
291 };
292 
293 static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
294 					 struct net_device *ndev)
295 {
296 	struct ntb_netdev *dev = netdev_priv(ndev);
297 	u16 qid = skb_get_queue_mapping(skb);
298 	struct ntb_netdev_queue *q;
299 	int rc;
300 
301 	q = &dev->queues[qid];
302 
303 	ntb_netdev_maybe_stop_tx(ndev, q, tx_stop);
304 
305 	rc = ntb_transport_tx_enqueue(q->qp, skb, skb->data, skb->len);
306 	if (rc)
307 		goto err;
308 
309 	/* check for next submit */
310 	ntb_netdev_maybe_stop_tx(ndev, q, tx_stop);
311 
312 	return NETDEV_TX_OK;
313 
314 err:
315 	ndev->stats.tx_dropped++;
316 	ndev->stats.tx_errors++;
317 	return NETDEV_TX_BUSY;
318 }
319 
320 static void ntb_netdev_tx_timer(struct timer_list *t)
321 {
322 	struct ntb_netdev_queue *q = timer_container_of(q, t, tx_timer);
323 	struct ntb_netdev *dev = q->ntdev;
324 	struct net_device *ndev;
325 
326 	ndev = dev->ndev;
327 
328 	if (ntb_transport_tx_free_entry(q->qp) < tx_stop) {
329 		mod_timer(&q->tx_timer, jiffies + usecs_to_jiffies(tx_time));
330 	} else {
331 		/* Make sure anybody stopping the queue after this sees the new
332 		 * value of ntb_transport_tx_free_entry()
333 		 */
334 		smp_mb();
335 
336 		/* The subqueue must be kept stopped if the link is down */
337 		if (__netif_subqueue_stopped(ndev, q->qid) &&
338 		    ntb_transport_link_query(q->qp))
339 			netif_wake_subqueue(ndev, q->qid);
340 	}
341 }
342 
343 static int ntb_netdev_open(struct net_device *ndev)
344 {
345 	struct ntb_netdev *dev = netdev_priv(ndev);
346 	struct ntb_netdev_queue *queue;
347 	unsigned int q;
348 	int rc = 0;
349 
350 	/* Add some empty rx bufs for each queue */
351 	for (q = 0; q < dev->num_queues; q++) {
352 		queue = &dev->queues[q];
353 
354 		rc = ntb_netdev_queue_rx_fill(ndev, queue);
355 		if (rc)
356 			goto err;
357 
358 		timer_setup(&queue->tx_timer, ntb_netdev_tx_timer, 0);
359 	}
360 
361 	netif_carrier_off(ndev);
362 	netif_tx_stop_all_queues(ndev);
363 
364 	for (q = 0; q < dev->num_queues; q++)
365 		ntb_transport_link_up(dev->queues[q].qp);
366 
367 	return 0;
368 
369 err:
370 	for (q = 0; q < dev->num_queues; q++) {
371 		queue = &dev->queues[q];
372 		ntb_netdev_queue_rx_drain(queue);
373 	}
374 	return rc;
375 }
376 
377 static int ntb_netdev_close(struct net_device *ndev)
378 {
379 	struct ntb_netdev *dev = netdev_priv(ndev);
380 	struct ntb_netdev_queue *queue;
381 	unsigned int q;
382 
383 	netif_tx_stop_all_queues(ndev);
384 	netif_carrier_off(ndev);
385 
386 	for (q = 0; q < dev->num_queues; q++) {
387 		queue = &dev->queues[q];
388 
389 		ntb_transport_link_down(queue->qp);
390 		ntb_netdev_queue_rx_drain(queue);
391 		timer_delete_sync(&queue->tx_timer);
392 	}
393 
394 	return 0;
395 }
396 
397 static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
398 {
399 	struct ntb_netdev *dev = netdev_priv(ndev);
400 	struct ntb_netdev_queue *queue;
401 	struct sk_buff *skb;
402 	unsigned int q, i;
403 	int len, rc = 0;
404 
405 	if (new_mtu > ntb_transport_max_size(dev->queues[0].qp) - ETH_HLEN)
406 		return -EINVAL;
407 
408 	if (!netif_running(ndev)) {
409 		WRITE_ONCE(ndev->mtu, new_mtu);
410 		return 0;
411 	}
412 
413 	/* Bring down the link and dispose of posted rx entries */
414 	for (q = 0; q < dev->num_queues; q++)
415 		ntb_transport_link_down(dev->queues[q].qp);
416 
417 	if (ndev->mtu < new_mtu) {
418 		for (q = 0; q < dev->num_queues; q++) {
419 			queue = &dev->queues[q];
420 
421 			for (i = 0;
422 			     (skb = ntb_transport_rx_remove(queue->qp, &len));
423 			     i++)
424 				dev_kfree_skb(skb);
425 
426 			for (; i; i--) {
427 				skb = netdev_alloc_skb(ndev,
428 						       new_mtu + ETH_HLEN);
429 				if (!skb) {
430 					rc = -ENOMEM;
431 					goto err;
432 				}
433 
434 				rc = ntb_transport_rx_enqueue(queue->qp, skb,
435 							      skb->data,
436 							      new_mtu +
437 							      ETH_HLEN);
438 				if (rc) {
439 					dev_kfree_skb(skb);
440 					goto err;
441 				}
442 			}
443 		}
444 	}
445 
446 	WRITE_ONCE(ndev->mtu, new_mtu);
447 
448 	for (q = 0; q < dev->num_queues; q++)
449 		ntb_transport_link_up(dev->queues[q].qp);
450 
451 	return 0;
452 
453 err:
454 	for (q = 0; q < dev->num_queues; q++) {
455 		struct ntb_netdev_queue *queue = &dev->queues[q];
456 
457 		ntb_transport_link_down(queue->qp);
458 
459 		ntb_netdev_queue_rx_drain(queue);
460 	}
461 
462 	netdev_err(ndev, "Error changing MTU, device inoperable\n");
463 	return rc;
464 }
465 
466 static const struct net_device_ops ntb_netdev_ops = {
467 	.ndo_open = ntb_netdev_open,
468 	.ndo_stop = ntb_netdev_close,
469 	.ndo_start_xmit = ntb_netdev_start_xmit,
470 	.ndo_change_mtu = ntb_netdev_change_mtu,
471 	.ndo_set_mac_address = eth_mac_addr,
472 };
473 
474 static void ntb_get_drvinfo(struct net_device *ndev,
475 			    struct ethtool_drvinfo *info)
476 {
477 	struct ntb_netdev *dev = netdev_priv(ndev);
478 
479 	strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
480 	strscpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
481 	strscpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
482 }
483 
484 static int ntb_get_link_ksettings(struct net_device *dev,
485 				  struct ethtool_link_ksettings *cmd)
486 {
487 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
488 	ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
489 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
490 	ethtool_link_ksettings_add_link_mode(cmd, advertising, Backplane);
491 
492 	cmd->base.speed = SPEED_UNKNOWN;
493 	cmd->base.duplex = DUPLEX_FULL;
494 	cmd->base.port = PORT_OTHER;
495 	cmd->base.phy_address = 0;
496 	cmd->base.autoneg = AUTONEG_ENABLE;
497 
498 	return 0;
499 }
500 
501 static void ntb_get_channels(struct net_device *ndev,
502 			     struct ethtool_channels *channels)
503 {
504 	struct ntb_netdev *dev = netdev_priv(ndev);
505 
506 	channels->combined_count = dev->num_queues;
507 	channels->max_combined = ndev->num_tx_queues;
508 }
509 
510 static int ntb_inc_channels(struct net_device *ndev,
511 			    unsigned int old, unsigned int new)
512 {
513 	struct ntb_netdev *dev = netdev_priv(ndev);
514 	bool running = netif_running(ndev);
515 	struct ntb_netdev_queue *queue;
516 	unsigned int q, created;
517 	int rc;
518 
519 	created = old;
520 	for (q = old; q < new; q++) {
521 		queue = &dev->queues[q];
522 
523 		queue->ntdev = dev;
524 		queue->qid = q;
525 		queue->qp = ntb_transport_create_queue(queue, dev->client_dev,
526 						       &ntb_netdev_handlers);
527 		if (!queue->qp) {
528 			rc = -ENOSPC;
529 			goto err_new;
530 		}
531 		created++;
532 
533 		if (!running)
534 			continue;
535 
536 		timer_setup(&queue->tx_timer, ntb_netdev_tx_timer, 0);
537 
538 		rc = ntb_netdev_queue_rx_fill(ndev, queue);
539 		if (rc)
540 			goto err_new;
541 
542 		/*
543 		 * Carrier may already be on due to other QPs. Keep the new
544 		 * subqueue stopped until we get a Link Up event for this QP.
545 		 */
546 		netif_stop_subqueue(ndev, q);
547 	}
548 
549 	rc = netif_set_real_num_queues(ndev, new, new);
550 	if (rc)
551 		goto err_new;
552 
553 	dev->num_queues = new;
554 
555 	if (running)
556 		for (q = old; q < new; q++)
557 			ntb_transport_link_up(dev->queues[q].qp);
558 
559 	return 0;
560 
561 err_new:
562 	if (running) {
563 		unsigned int rollback = created;
564 
565 		while (rollback-- > old) {
566 			queue = &dev->queues[rollback];
567 			ntb_transport_link_down(queue->qp);
568 			ntb_netdev_queue_rx_drain(queue);
569 			timer_delete_sync(&queue->tx_timer);
570 		}
571 	}
572 	while (created-- > old) {
573 		queue = &dev->queues[created];
574 		ntb_transport_free_queue(queue->qp);
575 		queue->qp = NULL;
576 	}
577 	return rc;
578 }
579 
580 static int ntb_dec_channels(struct net_device *ndev,
581 			    unsigned int old, unsigned int new)
582 {
583 	struct ntb_netdev *dev = netdev_priv(ndev);
584 	bool running = netif_running(ndev);
585 	struct ntb_netdev_queue *queue;
586 	unsigned int q;
587 	int rc;
588 
589 	if (running)
590 		for (q = new; q < old; q++)
591 			netif_stop_subqueue(ndev, q);
592 
593 	rc = netif_set_real_num_queues(ndev, new, new);
594 	if (rc)
595 		goto err;
596 
597 	/* Publish new queue count before invalidating QP pointers */
598 	dev->num_queues = new;
599 
600 	for (q = new; q < old; q++) {
601 		queue = &dev->queues[q];
602 
603 		if (running) {
604 			ntb_transport_link_down(queue->qp);
605 			ntb_netdev_queue_rx_drain(queue);
606 			timer_delete_sync(&queue->tx_timer);
607 		}
608 
609 		ntb_transport_free_queue(queue->qp);
610 		queue->qp = NULL;
611 	}
612 
613 	/*
614 	 * It might be the case that the removed queues are the only queues that
615 	 * were up, so see if the global carrier needs to change.
616 	 */
617 	ntb_netdev_update_carrier(dev);
618 	return 0;
619 
620 err:
621 	if (running) {
622 		for (q = new; q < old; q++)
623 			netif_wake_subqueue(ndev, q);
624 	}
625 	return rc;
626 }
627 
628 static int ntb_set_channels(struct net_device *ndev,
629 			    struct ethtool_channels *channels)
630 {
631 	struct ntb_netdev *dev = netdev_priv(ndev);
632 	unsigned int new = channels->combined_count;
633 	unsigned int old = dev->num_queues;
634 
635 	if (new == old)
636 		return 0;
637 
638 	if (new < old)
639 		return ntb_dec_channels(ndev, old, new);
640 	else
641 		return ntb_inc_channels(ndev, old, new);
642 }
643 
644 static const struct ethtool_ops ntb_ethtool_ops = {
645 	.get_drvinfo = ntb_get_drvinfo,
646 	.get_link = ethtool_op_get_link,
647 	.get_link_ksettings = ntb_get_link_ksettings,
648 	.get_channels = ntb_get_channels,
649 	.set_channels = ntb_set_channels,
650 };
651 
652 static int ntb_netdev_probe(struct device *client_dev)
653 {
654 	struct ntb_dev *ntb;
655 	struct net_device *ndev;
656 	struct pci_dev *pdev;
657 	struct ntb_netdev *dev;
658 	unsigned int q;
659 	int rc;
660 
661 	ntb = dev_ntb(client_dev->parent);
662 	pdev = ntb->pdev;
663 	if (!pdev)
664 		return -ENODEV;
665 
666 	ndev = alloc_etherdev_mq(sizeof(*dev), NTB_NETDEV_MAX_QUEUES);
667 	if (!ndev)
668 		return -ENOMEM;
669 
670 	SET_NETDEV_DEV(ndev, client_dev);
671 
672 	dev = netdev_priv(ndev);
673 	dev->ndev = ndev;
674 	dev->pdev = pdev;
675 	dev->client_dev = client_dev;
676 	dev->num_queues = 0;
677 
678 	dev->queues = kzalloc_objs(*dev->queues, NTB_NETDEV_MAX_QUEUES,
679 				   GFP_KERNEL);
680 	if (!dev->queues) {
681 		rc = -ENOMEM;
682 		goto err_free_netdev;
683 	}
684 
685 	ndev->features = NETIF_F_HIGHDMA;
686 
687 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
688 
689 	ndev->hw_features = ndev->features;
690 	ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
691 
692 	eth_random_addr(ndev->perm_addr);
693 	dev_addr_set(ndev, ndev->perm_addr);
694 
695 	ndev->netdev_ops = &ntb_netdev_ops;
696 	ndev->ethtool_ops = &ntb_ethtool_ops;
697 
698 	ndev->min_mtu = 0;
699 	ndev->max_mtu = ETH_MAX_MTU;
700 
701 	for (q = 0; q < NTB_NETDEV_DEFAULT_QUEUES; q++) {
702 		struct ntb_netdev_queue *queue = &dev->queues[q];
703 
704 		queue->ntdev = dev;
705 		queue->qid = q;
706 		queue->qp = ntb_transport_create_queue(queue, client_dev,
707 						       &ntb_netdev_handlers);
708 		if (!queue->qp)
709 			break;
710 
711 		dev->num_queues++;
712 	}
713 
714 	if (!dev->num_queues) {
715 		rc = -EIO;
716 		goto err_free_queues;
717 	}
718 
719 	rc = netif_set_real_num_queues(ndev, dev->num_queues, dev->num_queues);
720 	if (rc)
721 		goto err_free_qps;
722 
723 	ndev->mtu = ntb_transport_max_size(dev->queues[0].qp) - ETH_HLEN;
724 
725 	rc = register_netdev(ndev);
726 	if (rc)
727 		goto err_free_qps;
728 
729 	dev_set_drvdata(client_dev, ndev);
730 	dev_info(&pdev->dev, "%s created with %u queue pairs\n",
731 		 ndev->name, dev->num_queues);
732 	return 0;
733 
734 err_free_qps:
735 	for (q = 0; q < dev->num_queues; q++)
736 		ntb_transport_free_queue(dev->queues[q].qp);
737 
738 err_free_queues:
739 	kfree(dev->queues);
740 
741 err_free_netdev:
742 	free_netdev(ndev);
743 	return rc;
744 }
745 
746 static void ntb_netdev_remove(struct device *client_dev)
747 {
748 	struct net_device *ndev = dev_get_drvdata(client_dev);
749 	struct ntb_netdev *dev = netdev_priv(ndev);
750 	unsigned int q;
751 
752 	unregister_netdev(ndev);
753 	for (q = 0; q < dev->num_queues; q++)
754 		ntb_transport_free_queue(dev->queues[q].qp);
755 
756 	kfree(dev->queues);
757 	free_netdev(ndev);
758 }
759 
760 static struct ntb_transport_client ntb_netdev_client = {
761 	.driver.name = KBUILD_MODNAME,
762 	.driver.owner = THIS_MODULE,
763 	.probe = ntb_netdev_probe,
764 	.remove = ntb_netdev_remove,
765 };
766 
767 static int __init ntb_netdev_init_module(void)
768 {
769 	int rc;
770 
771 	rc = ntb_transport_register_client_dev(KBUILD_MODNAME);
772 	if (rc)
773 		return rc;
774 
775 	rc = ntb_transport_register_client(&ntb_netdev_client);
776 	if (rc) {
777 		ntb_transport_unregister_client_dev(KBUILD_MODNAME);
778 		return rc;
779 	}
780 
781 	return 0;
782 }
783 late_initcall(ntb_netdev_init_module);
784 
785 static void __exit ntb_netdev_exit_module(void)
786 {
787 	ntb_transport_unregister_client(&ntb_netdev_client);
788 	ntb_transport_unregister_client_dev(KBUILD_MODNAME);
789 }
790 module_exit(ntb_netdev_exit_module);
791