xref: /linux/drivers/net/ntb_netdev.c (revision cc04a46f11ea046ed53e2c832ae29e4790f7e35f)
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9  *
10  *   This program is free software; you can redistribute it and/or modify
11  *   it under the terms of version 2 of the GNU General Public License as
12  *   published by the Free Software Foundation.
13  *
14  *   BSD LICENSE
15  *
16  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18  *
19  *   Redistribution and use in source and binary forms, with or without
20  *   modification, are permitted provided that the following conditions
21  *   are met:
22  *
23  *     * Redistributions of source code must retain the above copyright
24  *       notice, this list of conditions and the following disclaimer.
25  *     * Redistributions in binary form must reproduce the above copy
26  *       notice, this list of conditions and the following disclaimer in
27  *       the documentation and/or other materials provided with the
28  *       distribution.
29  *     * Neither the name of Intel Corporation nor the names of its
30  *       contributors may be used to endorse or promote products derived
31  *       from this software without specific prior written permission.
32  *
33  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44  *
45  * PCIe NTB Network Linux driver
46  *
47  * Contact Information:
48  * Jon Mason <jon.mason@intel.com>
49  */
50 #include <linux/etherdevice.h>
51 #include <linux/ethtool.h>
52 #include <linux/module.h>
53 #include <linux/pci.h>
54 #include <linux/ntb.h>
55 #include <linux/ntb_transport.h>
56 
57 #define NTB_NETDEV_VER	"0.7"
58 
59 MODULE_DESCRIPTION(KBUILD_MODNAME);
60 MODULE_VERSION(NTB_NETDEV_VER);
61 MODULE_LICENSE("Dual BSD/GPL");
62 MODULE_AUTHOR("Intel Corporation");
63 
64 /* Time in usecs for tx resource reaper */
65 static unsigned int tx_time = 1;
66 
67 /* Number of descriptors to free before resuming tx */
68 static unsigned int tx_start = 10;
69 
70 /* Number of descriptors still available before stop upper layer tx */
71 static unsigned int tx_stop = 5;
72 
73 struct ntb_netdev {
74 	struct list_head list;
75 	struct pci_dev *pdev;
76 	struct net_device *ndev;
77 	struct ntb_transport_qp *qp;
78 	struct timer_list tx_timer;
79 };
80 
81 #define	NTB_TX_TIMEOUT_MS	1000
82 #define	NTB_RXQ_SIZE		100
83 
84 static LIST_HEAD(dev_list);
85 
86 static void ntb_netdev_event_handler(void *data, int link_is_up)
87 {
88 	struct net_device *ndev = data;
89 	struct ntb_netdev *dev = netdev_priv(ndev);
90 
91 	netdev_dbg(ndev, "Event %x, Link %x\n", link_is_up,
92 		   ntb_transport_link_query(dev->qp));
93 
94 	if (link_is_up) {
95 		if (ntb_transport_link_query(dev->qp))
96 			netif_carrier_on(ndev);
97 	} else {
98 		netif_carrier_off(ndev);
99 	}
100 }
101 
102 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
103 				  void *data, int len)
104 {
105 	struct net_device *ndev = qp_data;
106 	struct sk_buff *skb;
107 	int rc;
108 
109 	skb = data;
110 	if (!skb)
111 		return;
112 
113 	netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
114 
115 	if (len < 0) {
116 		ndev->stats.rx_errors++;
117 		ndev->stats.rx_length_errors++;
118 		goto enqueue_again;
119 	}
120 
121 	skb_put(skb, len);
122 	skb->protocol = eth_type_trans(skb, ndev);
123 	skb->ip_summed = CHECKSUM_NONE;
124 
125 	if (netif_rx(skb) == NET_RX_DROP) {
126 		ndev->stats.rx_errors++;
127 		ndev->stats.rx_dropped++;
128 	} else {
129 		ndev->stats.rx_packets++;
130 		ndev->stats.rx_bytes += len;
131 	}
132 
133 	skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
134 	if (!skb) {
135 		ndev->stats.rx_errors++;
136 		ndev->stats.rx_frame_errors++;
137 		return;
138 	}
139 
140 enqueue_again:
141 	rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
142 	if (rc) {
143 		dev_kfree_skb(skb);
144 		ndev->stats.rx_errors++;
145 		ndev->stats.rx_fifo_errors++;
146 	}
147 }
148 
149 static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev,
150 				      struct ntb_transport_qp *qp, int size)
151 {
152 	struct ntb_netdev *dev = netdev_priv(netdev);
153 
154 	netif_stop_queue(netdev);
155 	/* Make sure to see the latest value of ntb_transport_tx_free_entry()
156 	 * since the queue was last started.
157 	 */
158 	smp_mb();
159 
160 	if (likely(ntb_transport_tx_free_entry(qp) < size)) {
161 		mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
162 		return -EBUSY;
163 	}
164 
165 	netif_start_queue(netdev);
166 	return 0;
167 }
168 
169 static int ntb_netdev_maybe_stop_tx(struct net_device *ndev,
170 				    struct ntb_transport_qp *qp, int size)
171 {
172 	if (netif_queue_stopped(ndev) ||
173 	    (ntb_transport_tx_free_entry(qp) >= size))
174 		return 0;
175 
176 	return __ntb_netdev_maybe_stop_tx(ndev, qp, size);
177 }
178 
179 static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
180 				  void *data, int len)
181 {
182 	struct net_device *ndev = qp_data;
183 	struct sk_buff *skb;
184 	struct ntb_netdev *dev = netdev_priv(ndev);
185 
186 	skb = data;
187 	if (!skb || !ndev)
188 		return;
189 
190 	if (len > 0) {
191 		ndev->stats.tx_packets++;
192 		ndev->stats.tx_bytes += skb->len;
193 	} else {
194 		ndev->stats.tx_errors++;
195 		ndev->stats.tx_aborted_errors++;
196 	}
197 
198 	dev_kfree_skb(skb);
199 
200 	if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
201 		/* Make sure anybody stopping the queue after this sees the new
202 		 * value of ntb_transport_tx_free_entry()
203 		 */
204 		smp_mb();
205 		if (netif_queue_stopped(ndev))
206 			netif_wake_queue(ndev);
207 	}
208 }
209 
210 static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
211 					 struct net_device *ndev)
212 {
213 	struct ntb_netdev *dev = netdev_priv(ndev);
214 	int rc;
215 
216 	ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
217 
218 	rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
219 	if (rc)
220 		goto err;
221 
222 	/* check for next submit */
223 	ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
224 
225 	return NETDEV_TX_OK;
226 
227 err:
228 	ndev->stats.tx_dropped++;
229 	ndev->stats.tx_errors++;
230 	return NETDEV_TX_BUSY;
231 }
232 
233 static void ntb_netdev_tx_timer(unsigned long data)
234 {
235 	struct net_device *ndev = (struct net_device *)data;
236 	struct ntb_netdev *dev = netdev_priv(ndev);
237 
238 	if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
239 		mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time));
240 	} else {
241 		/* Make sure anybody stopping the queue after this sees the new
242 		 * value of ntb_transport_tx_free_entry()
243 		 */
244 		smp_mb();
245 		if (netif_queue_stopped(ndev))
246 			netif_wake_queue(ndev);
247 	}
248 }
249 
250 static int ntb_netdev_open(struct net_device *ndev)
251 {
252 	struct ntb_netdev *dev = netdev_priv(ndev);
253 	struct sk_buff *skb;
254 	int rc, i, len;
255 
256 	/* Add some empty rx bufs */
257 	for (i = 0; i < NTB_RXQ_SIZE; i++) {
258 		skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
259 		if (!skb) {
260 			rc = -ENOMEM;
261 			goto err;
262 		}
263 
264 		rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
265 					      ndev->mtu + ETH_HLEN);
266 		if (rc) {
267 			dev_kfree_skb(skb);
268 			goto err;
269 		}
270 	}
271 
272 	setup_timer(&dev->tx_timer, ntb_netdev_tx_timer, (unsigned long)ndev);
273 
274 	netif_carrier_off(ndev);
275 	ntb_transport_link_up(dev->qp);
276 	netif_start_queue(ndev);
277 
278 	return 0;
279 
280 err:
281 	while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
282 		dev_kfree_skb(skb);
283 	return rc;
284 }
285 
286 static int ntb_netdev_close(struct net_device *ndev)
287 {
288 	struct ntb_netdev *dev = netdev_priv(ndev);
289 	struct sk_buff *skb;
290 	int len;
291 
292 	ntb_transport_link_down(dev->qp);
293 
294 	while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
295 		dev_kfree_skb(skb);
296 
297 	del_timer_sync(&dev->tx_timer);
298 
299 	return 0;
300 }
301 
302 static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
303 {
304 	struct ntb_netdev *dev = netdev_priv(ndev);
305 	struct sk_buff *skb;
306 	int len, rc;
307 
308 	if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN)
309 		return -EINVAL;
310 
311 	if (!netif_running(ndev)) {
312 		ndev->mtu = new_mtu;
313 		return 0;
314 	}
315 
316 	/* Bring down the link and dispose of posted rx entries */
317 	ntb_transport_link_down(dev->qp);
318 
319 	if (ndev->mtu < new_mtu) {
320 		int i;
321 
322 		for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
323 			dev_kfree_skb(skb);
324 
325 		for (; i; i--) {
326 			skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
327 			if (!skb) {
328 				rc = -ENOMEM;
329 				goto err;
330 			}
331 
332 			rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
333 						      new_mtu + ETH_HLEN);
334 			if (rc) {
335 				dev_kfree_skb(skb);
336 				goto err;
337 			}
338 		}
339 	}
340 
341 	ndev->mtu = new_mtu;
342 
343 	ntb_transport_link_up(dev->qp);
344 
345 	return 0;
346 
347 err:
348 	ntb_transport_link_down(dev->qp);
349 
350 	while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
351 		dev_kfree_skb(skb);
352 
353 	netdev_err(ndev, "Error changing MTU, device inoperable\n");
354 	return rc;
355 }
356 
357 static const struct net_device_ops ntb_netdev_ops = {
358 	.ndo_open = ntb_netdev_open,
359 	.ndo_stop = ntb_netdev_close,
360 	.ndo_start_xmit = ntb_netdev_start_xmit,
361 	.ndo_change_mtu = ntb_netdev_change_mtu,
362 	.ndo_set_mac_address = eth_mac_addr,
363 };
364 
365 static void ntb_get_drvinfo(struct net_device *ndev,
366 			    struct ethtool_drvinfo *info)
367 {
368 	struct ntb_netdev *dev = netdev_priv(ndev);
369 
370 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
371 	strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
372 	strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
373 }
374 
375 static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
376 {
377 	cmd->supported = SUPPORTED_Backplane;
378 	cmd->advertising = ADVERTISED_Backplane;
379 	ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
380 	cmd->duplex = DUPLEX_FULL;
381 	cmd->port = PORT_OTHER;
382 	cmd->phy_address = 0;
383 	cmd->transceiver = XCVR_DUMMY1;
384 	cmd->autoneg = AUTONEG_ENABLE;
385 	cmd->maxtxpkt = 0;
386 	cmd->maxrxpkt = 0;
387 
388 	return 0;
389 }
390 
391 static const struct ethtool_ops ntb_ethtool_ops = {
392 	.get_drvinfo = ntb_get_drvinfo,
393 	.get_link = ethtool_op_get_link,
394 	.get_settings = ntb_get_settings,
395 };
396 
397 static const struct ntb_queue_handlers ntb_netdev_handlers = {
398 	.tx_handler = ntb_netdev_tx_handler,
399 	.rx_handler = ntb_netdev_rx_handler,
400 	.event_handler = ntb_netdev_event_handler,
401 };
402 
403 static int ntb_netdev_probe(struct device *client_dev)
404 {
405 	struct ntb_dev *ntb;
406 	struct net_device *ndev;
407 	struct pci_dev *pdev;
408 	struct ntb_netdev *dev;
409 	int rc;
410 
411 	ntb = dev_ntb(client_dev->parent);
412 	pdev = ntb->pdev;
413 	if (!pdev)
414 		return -ENODEV;
415 
416 	ndev = alloc_etherdev(sizeof(*dev));
417 	if (!ndev)
418 		return -ENOMEM;
419 
420 	dev = netdev_priv(ndev);
421 	dev->ndev = ndev;
422 	dev->pdev = pdev;
423 	ndev->features = NETIF_F_HIGHDMA;
424 
425 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
426 
427 	ndev->hw_features = ndev->features;
428 	ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
429 
430 	random_ether_addr(ndev->perm_addr);
431 	memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
432 
433 	ndev->netdev_ops = &ntb_netdev_ops;
434 	ndev->ethtool_ops = &ntb_ethtool_ops;
435 
436 	dev->qp = ntb_transport_create_queue(ndev, client_dev,
437 					     &ntb_netdev_handlers);
438 	if (!dev->qp) {
439 		rc = -EIO;
440 		goto err;
441 	}
442 
443 	ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN;
444 
445 	rc = register_netdev(ndev);
446 	if (rc)
447 		goto err1;
448 
449 	list_add(&dev->list, &dev_list);
450 	dev_info(&pdev->dev, "%s created\n", ndev->name);
451 	return 0;
452 
453 err1:
454 	ntb_transport_free_queue(dev->qp);
455 err:
456 	free_netdev(ndev);
457 	return rc;
458 }
459 
460 static void ntb_netdev_remove(struct device *client_dev)
461 {
462 	struct ntb_dev *ntb;
463 	struct net_device *ndev;
464 	struct pci_dev *pdev;
465 	struct ntb_netdev *dev;
466 	bool found = false;
467 
468 	ntb = dev_ntb(client_dev->parent);
469 	pdev = ntb->pdev;
470 
471 	list_for_each_entry(dev, &dev_list, list) {
472 		if (dev->pdev == pdev) {
473 			found = true;
474 			break;
475 		}
476 	}
477 	if (!found)
478 		return;
479 
480 	list_del(&dev->list);
481 
482 	ndev = dev->ndev;
483 
484 	unregister_netdev(ndev);
485 	ntb_transport_free_queue(dev->qp);
486 	free_netdev(ndev);
487 }
488 
489 static struct ntb_transport_client ntb_netdev_client = {
490 	.driver.name = KBUILD_MODNAME,
491 	.driver.owner = THIS_MODULE,
492 	.probe = ntb_netdev_probe,
493 	.remove = ntb_netdev_remove,
494 };
495 
496 static int __init ntb_netdev_init_module(void)
497 {
498 	int rc;
499 
500 	rc = ntb_transport_register_client_dev(KBUILD_MODNAME);
501 	if (rc)
502 		return rc;
503 	return ntb_transport_register_client(&ntb_netdev_client);
504 }
505 module_init(ntb_netdev_init_module);
506 
507 static void __exit ntb_netdev_exit_module(void)
508 {
509 	ntb_transport_unregister_client(&ntb_netdev_client);
510 	ntb_transport_unregister_client_dev(KBUILD_MODNAME);
511 }
512 module_exit(ntb_netdev_exit_module);
513