xref: /freebsd/sys/dev/xen/netfront/netfront.c (revision e3514747256465c52c3b2aedc9795f52c0d3efe9)
1 /*-
2  * Copyright (c) 2004-2006 Kip Macy
3  * Copyright (c) 2015 Wei Liu <wei.liu2@citrix.com>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/sockio.h>
36 #include <sys/limits.h>
37 #include <sys/mbuf.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/kernel.h>
41 #include <sys/socket.h>
42 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
44 
45 #include <net/if.h>
46 #include <net/if_var.h>
47 #include <net/if_arp.h>
48 #include <net/ethernet.h>
49 #include <net/if_media.h>
50 #include <net/bpf.h>
51 #include <net/if_types.h>
52 
53 #include <netinet/in.h>
54 #include <netinet/ip.h>
55 #include <netinet/if_ether.h>
56 #include <netinet/tcp.h>
57 #include <netinet/tcp_lro.h>
58 
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 
62 #include <sys/bus.h>
63 
64 #include <xen/xen-os.h>
65 #include <xen/hypervisor.h>
66 #include <xen/xen_intr.h>
67 #include <xen/gnttab.h>
68 #include <xen/interface/memory.h>
69 #include <xen/interface/io/netif.h>
70 #include <xen/xenbus/xenbusvar.h>
71 
72 #include "xenbus_if.h"
73 
74 /* Features supported by all backends.  TSO and LRO can be negotiated */
75 #define XN_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
76 
77 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
78 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
79 
80 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
81 
82 /*
83  * Should the driver do LRO on the RX end
84  *  this can be toggled on the fly, but the
85  *  interface must be reset (down/up) for it
86  *  to take effect.
87  */
88 static int xn_enable_lro = 1;
89 TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro);
90 
91 /*
92  * Number of pairs of queues.
93  */
94 static unsigned long xn_num_queues = 4;
95 TUNABLE_ULONG("hw.xn.num_queues", &xn_num_queues);
96 
97 /**
98  * \brief The maximum allowed data fragments in a single transmit
99  *        request.
100  *
101  * This limit is imposed by the backend driver.  We assume here that
102  * we are dealing with a Linux driver domain and have set our limit
103  * to mirror the Linux MAX_SKB_FRAGS constant.
104  */
105 #define	MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2)
106 
107 #define RX_COPY_THRESHOLD 256
108 
109 #define net_ratelimit() 0
110 
111 struct netfront_rxq;
112 struct netfront_txq;
113 struct netfront_info;
114 struct netfront_rx_info;
115 
116 static void xn_txeof(struct netfront_txq *);
117 static void xn_rxeof(struct netfront_rxq *);
118 static void xn_alloc_rx_buffers(struct netfront_rxq *);
119 static void xn_alloc_rx_buffers_callout(void *arg);
120 
121 static void xn_release_rx_bufs(struct netfront_rxq *);
122 static void xn_release_tx_bufs(struct netfront_txq *);
123 
124 static void xn_rxq_intr(struct netfront_rxq *);
125 static void xn_txq_intr(struct netfront_txq *);
126 static void xn_intr(void *);
127 static inline int xn_count_frags(struct mbuf *m);
128 static int xn_assemble_tx_request(struct netfront_txq *, struct mbuf *);
129 static int xn_ioctl(struct ifnet *, u_long, caddr_t);
130 static void xn_ifinit_locked(struct netfront_info *);
131 static void xn_ifinit(void *);
132 static void xn_stop(struct netfront_info *);
133 static void xn_query_features(struct netfront_info *np);
134 static int xn_configure_features(struct netfront_info *np);
135 static void netif_free(struct netfront_info *info);
136 static int netfront_detach(device_t dev);
137 
138 static int xn_txq_mq_start_locked(struct netfront_txq *, struct mbuf *);
139 static int xn_txq_mq_start(struct ifnet *, struct mbuf *);
140 
141 static int talk_to_backend(device_t dev, struct netfront_info *info);
142 static int create_netdev(device_t dev);
143 static void netif_disconnect_backend(struct netfront_info *info);
144 static int setup_device(device_t dev, struct netfront_info *info,
145     unsigned long);
146 static int xn_ifmedia_upd(struct ifnet *ifp);
147 static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
148 
149 static int xn_connect(struct netfront_info *);
150 static void xn_kick_rings(struct netfront_info *);
151 
152 static int xn_get_responses(struct netfront_rxq *,
153     struct netfront_rx_info *, RING_IDX, RING_IDX *,
154     struct mbuf **);
155 
156 #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT)
157 
158 #define INVALID_P2M_ENTRY (~0UL)
159 #define XN_QUEUE_NAME_LEN  8	/* xn{t,r}x_%u, allow for two digits */
160 struct netfront_rxq {
161 	struct netfront_info 	*info;
162 	u_int			id;
163 	char			name[XN_QUEUE_NAME_LEN];
164 	struct mtx		lock;
165 
166 	int			ring_ref;
167 	netif_rx_front_ring_t 	ring;
168 	xen_intr_handle_t	xen_intr_handle;
169 
170 	grant_ref_t 		gref_head;
171 	grant_ref_t 		grant_ref[NET_RX_RING_SIZE + 1];
172 
173 	struct mbuf		*mbufs[NET_RX_RING_SIZE + 1];
174 
175 	struct lro_ctrl		lro;
176 
177 	struct callout		rx_refill;
178 };
179 
180 struct netfront_txq {
181 	struct netfront_info 	*info;
182 	u_int 			id;
183 	char			name[XN_QUEUE_NAME_LEN];
184 	struct mtx		lock;
185 
186 	int			ring_ref;
187 	netif_tx_front_ring_t	ring;
188 	xen_intr_handle_t 	xen_intr_handle;
189 
190 	grant_ref_t		gref_head;
191 	grant_ref_t		grant_ref[NET_TX_RING_SIZE + 1];
192 
193 	struct mbuf		*mbufs[NET_TX_RING_SIZE + 1];
194 	int			mbufs_cnt;
195 	struct buf_ring		*br;
196 
197 	struct taskqueue 	*tq;
198 	struct task       	defrtask;
199 
200 	bool			full;
201 };
202 
203 struct netfront_info {
204 	struct ifnet 		*xn_ifp;
205 
206 	struct mtx   		sc_lock;
207 
208 	u_int  num_queues;
209 	struct netfront_rxq 	*rxq;
210 	struct netfront_txq 	*txq;
211 
212 	u_int			carrier;
213 	u_int			maxfrags;
214 
215 	device_t		xbdev;
216 	uint8_t			mac[ETHER_ADDR_LEN];
217 
218 	int			xn_if_flags;
219 
220 	struct ifmedia		sc_media;
221 
222 	bool			xn_reset;
223 };
224 
225 struct netfront_rx_info {
226 	struct netif_rx_response rx;
227 	struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
228 };
229 
230 #define XN_RX_LOCK(_q)         mtx_lock(&(_q)->lock)
231 #define XN_RX_UNLOCK(_q)       mtx_unlock(&(_q)->lock)
232 
233 #define XN_TX_LOCK(_q)         mtx_lock(&(_q)->lock)
234 #define XN_TX_TRYLOCK(_q)      mtx_trylock(&(_q)->lock)
235 #define XN_TX_UNLOCK(_q)       mtx_unlock(&(_q)->lock)
236 
237 #define XN_LOCK(_sc)           mtx_lock(&(_sc)->sc_lock);
238 #define XN_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_lock);
239 
240 #define XN_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->sc_lock, MA_OWNED);
241 #define XN_RX_LOCK_ASSERT(_q)  mtx_assert(&(_q)->lock, MA_OWNED);
242 #define XN_TX_LOCK_ASSERT(_q)  mtx_assert(&(_q)->lock, MA_OWNED);
243 
244 #define netfront_carrier_on(netif)	((netif)->carrier = 1)
245 #define netfront_carrier_off(netif)	((netif)->carrier = 0)
246 #define netfront_carrier_ok(netif)	((netif)->carrier)
247 
248 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
249 
250 static inline void
251 add_id_to_freelist(struct mbuf **list, uintptr_t id)
252 {
253 
254 	KASSERT(id != 0,
255 		("%s: the head item (0) must always be free.", __func__));
256 	list[id] = list[0];
257 	list[0]  = (struct mbuf *)id;
258 }
259 
260 static inline unsigned short
261 get_id_from_freelist(struct mbuf **list)
262 {
263 	uintptr_t id;
264 
265 	id = (uintptr_t)list[0];
266 	KASSERT(id != 0,
267 		("%s: the head item (0) must always remain free.", __func__));
268 	list[0] = list[id];
269 	return (id);
270 }
271 
272 static inline int
273 xn_rxidx(RING_IDX idx)
274 {
275 
276 	return idx & (NET_RX_RING_SIZE - 1);
277 }
278 
279 static inline struct mbuf *
280 xn_get_rx_mbuf(struct netfront_rxq *rxq, RING_IDX ri)
281 {
282 	int i;
283 	struct mbuf *m;
284 
285 	i = xn_rxidx(ri);
286 	m = rxq->mbufs[i];
287 	rxq->mbufs[i] = NULL;
288 	return (m);
289 }
290 
291 static inline grant_ref_t
292 xn_get_rx_ref(struct netfront_rxq *rxq, RING_IDX ri)
293 {
294 	int i = xn_rxidx(ri);
295 	grant_ref_t ref = rxq->grant_ref[i];
296 
297 	KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n"));
298 	rxq->grant_ref[i] = GRANT_REF_INVALID;
299 	return (ref);
300 }
301 
302 #define IPRINTK(fmt, args...) \
303     printf("[XEN] " fmt, ##args)
304 #ifdef INVARIANTS
305 #define WPRINTK(fmt, args...) \
306     printf("[XEN] " fmt, ##args)
307 #else
308 #define WPRINTK(fmt, args...)
309 #endif
310 #ifdef DEBUG
311 #define DPRINTK(fmt, args...) \
312     printf("[XEN] %s: " fmt, __func__, ##args)
313 #else
314 #define DPRINTK(fmt, args...)
315 #endif
316 
317 /**
318  * Read the 'mac' node at the given device's node in the store, and parse that
319  * as colon-separated octets, placing result the given mac array.  mac must be
320  * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
321  * Return 0 on success, or errno on error.
322  */
323 static int
324 xen_net_read_mac(device_t dev, uint8_t mac[])
325 {
326 	int error, i;
327 	char *s, *e, *macstr;
328 	const char *path;
329 
330 	path = xenbus_get_node(dev);
331 	error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
332 	if (error == ENOENT) {
333 		/*
334 		 * Deal with missing mac XenStore nodes on devices with
335 		 * HVM emulation (the 'ioemu' configuration attribute)
336 		 * enabled.
337 		 *
338 		 * The HVM emulator may execute in a stub device model
339 		 * domain which lacks the permission, only given to Dom0,
340 		 * to update the guest's XenStore tree.  For this reason,
341 		 * the HVM emulator doesn't even attempt to write the
342 		 * front-side mac node, even when operating in Dom0.
343 		 * However, there should always be a mac listed in the
344 		 * backend tree.  Fallback to this version if our query
345 		 * of the front side XenStore location doesn't find
346 		 * anything.
347 		 */
348 		path = xenbus_get_otherend_path(dev);
349 		error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
350 	}
351 	if (error != 0) {
352 		xenbus_dev_fatal(dev, error, "parsing %s/mac", path);
353 		return (error);
354 	}
355 
356 	s = macstr;
357 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
358 		mac[i] = strtoul(s, &e, 16);
359 		if (s == e || (e[0] != ':' && e[0] != 0)) {
360 			free(macstr, M_XENBUS);
361 			return (ENOENT);
362 		}
363 		s = &e[1];
364 	}
365 	free(macstr, M_XENBUS);
366 	return (0);
367 }
368 
369 /**
370  * Entry point to this code when a new device is created.  Allocate the basic
371  * structures and the ring buffers for communication with the backend, and
372  * inform the backend of the appropriate details for those.  Switch to
373  * Connected state.
374  */
375 static int
376 netfront_probe(device_t dev)
377 {
378 
379 	if (xen_hvm_domain() && xen_disable_pv_nics != 0)
380 		return (ENXIO);
381 
382 	if (!strcmp(xenbus_get_type(dev), "vif")) {
383 		device_set_desc(dev, "Virtual Network Interface");
384 		return (0);
385 	}
386 
387 	return (ENXIO);
388 }
389 
390 static int
391 netfront_attach(device_t dev)
392 {
393 	int err;
394 
395 	err = create_netdev(dev);
396 	if (err != 0) {
397 		xenbus_dev_fatal(dev, err, "creating netdev");
398 		return (err);
399 	}
400 
401 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
402 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
403 	    OID_AUTO, "enable_lro", CTLFLAG_RW,
404 	    &xn_enable_lro, 0, "Large Receive Offload");
405 
406 	SYSCTL_ADD_ULONG(device_get_sysctl_ctx(dev),
407 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
408 	    OID_AUTO, "num_queues", CTLFLAG_RD,
409 	    &xn_num_queues, "Number of pairs of queues");
410 
411 	return (0);
412 }
413 
414 static int
415 netfront_suspend(device_t dev)
416 {
417 	struct netfront_info *np = device_get_softc(dev);
418 	u_int i;
419 
420 	for (i = 0; i < np->num_queues; i++) {
421 		XN_RX_LOCK(&np->rxq[i]);
422 		XN_TX_LOCK(&np->txq[i]);
423 	}
424 	netfront_carrier_off(np);
425 	for (i = 0; i < np->num_queues; i++) {
426 		XN_RX_UNLOCK(&np->rxq[i]);
427 		XN_TX_UNLOCK(&np->txq[i]);
428 	}
429 	return (0);
430 }
431 
432 /**
433  * We are reconnecting to the backend, due to a suspend/resume, or a backend
434  * driver restart.  We tear down our netif structure and recreate it, but
435  * leave the device-layer structures intact so that this is transparent to the
436  * rest of the kernel.
437  */
438 static int
439 netfront_resume(device_t dev)
440 {
441 	struct netfront_info *info = device_get_softc(dev);
442 	u_int i;
443 
444 	if (xen_suspend_cancelled) {
445 		for (i = 0; i < info->num_queues; i++) {
446 			XN_RX_LOCK(&info->rxq[i]);
447 			XN_TX_LOCK(&info->txq[i]);
448 		}
449 		netfront_carrier_on(info);
450 		for (i = 0; i < info->num_queues; i++) {
451 			XN_RX_UNLOCK(&info->rxq[i]);
452 			XN_TX_UNLOCK(&info->txq[i]);
453 		}
454 		return (0);
455 	}
456 
457 	netif_disconnect_backend(info);
458 	return (0);
459 }
460 
461 static int
462 write_queue_xenstore_keys(device_t dev,
463     struct netfront_rxq *rxq,
464     struct netfront_txq *txq,
465     struct xs_transaction *xst, bool hierarchy)
466 {
467 	int err;
468 	const char *message;
469 	const char *node = xenbus_get_node(dev);
470 	char *path;
471 	size_t path_size;
472 
473 	KASSERT(rxq->id == txq->id, ("Mismatch between RX and TX queue ids"));
474 	/* Split event channel support is not yet there. */
475 	KASSERT(rxq->xen_intr_handle == txq->xen_intr_handle,
476 	    ("Split event channels are not supported"));
477 
478 	if (hierarchy) {
479 		path_size = strlen(node) + 10;
480 		path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO);
481 		snprintf(path, path_size, "%s/queue-%u", node, rxq->id);
482 	} else {
483 		path_size = strlen(node) + 1;
484 		path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO);
485 		snprintf(path, path_size, "%s", node);
486 	}
487 
488 	err = xs_printf(*xst, path, "tx-ring-ref","%u", txq->ring_ref);
489 	if (err != 0) {
490 		message = "writing tx ring-ref";
491 		goto error;
492 	}
493 	err = xs_printf(*xst, path, "rx-ring-ref","%u", rxq->ring_ref);
494 	if (err != 0) {
495 		message = "writing rx ring-ref";
496 		goto error;
497 	}
498 	err = xs_printf(*xst, path, "event-channel", "%u",
499 	    xen_intr_port(rxq->xen_intr_handle));
500 	if (err != 0) {
501 		message = "writing event-channel";
502 		goto error;
503 	}
504 
505 	free(path, M_DEVBUF);
506 
507 	return (0);
508 
509 error:
510 	free(path, M_DEVBUF);
511 	xenbus_dev_fatal(dev, err, "%s", message);
512 
513 	return (err);
514 }
515 
516 /* Common code used when first setting up, and when resuming. */
517 static int
518 talk_to_backend(device_t dev, struct netfront_info *info)
519 {
520 	const char *message;
521 	struct xs_transaction xst;
522 	const char *node = xenbus_get_node(dev);
523 	int err;
524 	unsigned long num_queues, max_queues = 0;
525 	unsigned int i;
526 
527 	err = xen_net_read_mac(dev, info->mac);
528 	if (err != 0) {
529 		xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
530 		goto out;
531 	}
532 
533 	err = xs_scanf(XST_NIL, xenbus_get_otherend_path(info->xbdev),
534 	    "multi-queue-max-queues", NULL, "%lu", &max_queues);
535 	if (err != 0)
536 		max_queues = 1;
537 	num_queues = xn_num_queues;
538 	if (num_queues > max_queues)
539 		num_queues = max_queues;
540 
541 	err = setup_device(dev, info, num_queues);
542 	if (err != 0)
543 		goto out;
544 
545  again:
546 	err = xs_transaction_start(&xst);
547 	if (err != 0) {
548 		xenbus_dev_fatal(dev, err, "starting transaction");
549 		goto free;
550 	}
551 
552 	if (info->num_queues == 1) {
553 		err = write_queue_xenstore_keys(dev, &info->rxq[0],
554 		    &info->txq[0], &xst, false);
555 		if (err != 0)
556 			goto abort_transaction_no_def_error;
557 	} else {
558 		err = xs_printf(xst, node, "multi-queue-num-queues",
559 		    "%u", info->num_queues);
560 		if (err != 0) {
561 			message = "writing multi-queue-num-queues";
562 			goto abort_transaction;
563 		}
564 
565 		for (i = 0; i < info->num_queues; i++) {
566 			err = write_queue_xenstore_keys(dev, &info->rxq[i],
567 			    &info->txq[i], &xst, true);
568 			if (err != 0)
569 				goto abort_transaction_no_def_error;
570 		}
571 	}
572 
573 	err = xs_printf(xst, node, "request-rx-copy", "%u", 1);
574 	if (err != 0) {
575 		message = "writing request-rx-copy";
576 		goto abort_transaction;
577 	}
578 	err = xs_printf(xst, node, "feature-rx-notify", "%d", 1);
579 	if (err != 0) {
580 		message = "writing feature-rx-notify";
581 		goto abort_transaction;
582 	}
583 	err = xs_printf(xst, node, "feature-sg", "%d", 1);
584 	if (err != 0) {
585 		message = "writing feature-sg";
586 		goto abort_transaction;
587 	}
588 	if ((info->xn_ifp->if_capenable & IFCAP_LRO) != 0) {
589 		err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1);
590 		if (err != 0) {
591 			message = "writing feature-gso-tcpv4";
592 			goto abort_transaction;
593 		}
594 	}
595 	if ((info->xn_ifp->if_capenable & IFCAP_RXCSUM) == 0) {
596 		err = xs_printf(xst, node, "feature-no-csum-offload", "%d", 1);
597 		if (err != 0) {
598 			message = "writing feature-no-csum-offload";
599 			goto abort_transaction;
600 		}
601 	}
602 
603 	err = xs_transaction_end(xst, 0);
604 	if (err != 0) {
605 		if (err == EAGAIN)
606 			goto again;
607 		xenbus_dev_fatal(dev, err, "completing transaction");
608 		goto free;
609 	}
610 
611 	return 0;
612 
613  abort_transaction:
614 	xenbus_dev_fatal(dev, err, "%s", message);
615  abort_transaction_no_def_error:
616 	xs_transaction_end(xst, 1);
617  free:
618 	netif_free(info);
619  out:
620 	return (err);
621 }
622 
623 static void
624 xn_rxq_intr(struct netfront_rxq *rxq)
625 {
626 
627 	XN_RX_LOCK(rxq);
628 	xn_rxeof(rxq);
629 	XN_RX_UNLOCK(rxq);
630 }
631 
632 static void
633 xn_txq_start(struct netfront_txq *txq)
634 {
635 	struct netfront_info *np = txq->info;
636 	struct ifnet *ifp = np->xn_ifp;
637 
638 	XN_TX_LOCK_ASSERT(txq);
639 	if (!drbr_empty(ifp, txq->br))
640 		xn_txq_mq_start_locked(txq, NULL);
641 }
642 
643 static void
644 xn_txq_intr(struct netfront_txq *txq)
645 {
646 
647 	XN_TX_LOCK(txq);
648 	if (RING_HAS_UNCONSUMED_RESPONSES(&txq->ring))
649 		xn_txeof(txq);
650 	xn_txq_start(txq);
651 	XN_TX_UNLOCK(txq);
652 }
653 
654 static void
655 xn_txq_tq_deferred(void *xtxq, int pending)
656 {
657 	struct netfront_txq *txq = xtxq;
658 
659 	XN_TX_LOCK(txq);
660 	xn_txq_start(txq);
661 	XN_TX_UNLOCK(txq);
662 }
663 
664 static void
665 disconnect_rxq(struct netfront_rxq *rxq)
666 {
667 
668 	xn_release_rx_bufs(rxq);
669 	gnttab_free_grant_references(rxq->gref_head);
670 	gnttab_end_foreign_access(rxq->ring_ref, NULL);
671 	/*
672 	 * No split event channel support at the moment, handle will
673 	 * be unbound in tx. So no need to call xen_intr_unbind here,
674 	 * but we do want to reset the handler to 0.
675 	 */
676 	rxq->xen_intr_handle = 0;
677 }
678 
679 static void
680 destroy_rxq(struct netfront_rxq *rxq)
681 {
682 
683 	callout_drain(&rxq->rx_refill);
684 	free(rxq->ring.sring, M_DEVBUF);
685 }
686 
687 static void
688 destroy_rxqs(struct netfront_info *np)
689 {
690 	int i;
691 
692 	for (i = 0; i < np->num_queues; i++)
693 		destroy_rxq(&np->rxq[i]);
694 
695 	free(np->rxq, M_DEVBUF);
696 	np->rxq = NULL;
697 }
698 
699 static int
700 setup_rxqs(device_t dev, struct netfront_info *info,
701 	   unsigned long num_queues)
702 {
703 	int q, i;
704 	int error;
705 	netif_rx_sring_t *rxs;
706 	struct netfront_rxq *rxq;
707 
708 	info->rxq = malloc(sizeof(struct netfront_rxq) * num_queues,
709 	    M_DEVBUF, M_WAITOK|M_ZERO);
710 
711 	for (q = 0; q < num_queues; q++) {
712 		rxq = &info->rxq[q];
713 
714 		rxq->id = q;
715 		rxq->info = info;
716 		rxq->ring_ref = GRANT_REF_INVALID;
717 		rxq->ring.sring = NULL;
718 		snprintf(rxq->name, XN_QUEUE_NAME_LEN, "xnrx_%u", q);
719 		mtx_init(&rxq->lock, rxq->name, "netfront receive lock",
720 		    MTX_DEF);
721 
722 		for (i = 0; i <= NET_RX_RING_SIZE; i++) {
723 			rxq->mbufs[i] = NULL;
724 			rxq->grant_ref[i] = GRANT_REF_INVALID;
725 		}
726 
727 		/* Start resources allocation */
728 
729 		if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
730 		    &rxq->gref_head) != 0) {
731 			device_printf(dev, "allocating rx gref");
732 			error = ENOMEM;
733 			goto fail;
734 		}
735 
736 		rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF,
737 		    M_WAITOK|M_ZERO);
738 		SHARED_RING_INIT(rxs);
739 		FRONT_RING_INIT(&rxq->ring, rxs, PAGE_SIZE);
740 
741 		error = xenbus_grant_ring(dev, virt_to_mfn(rxs),
742 		    &rxq->ring_ref);
743 		if (error != 0) {
744 			device_printf(dev, "granting rx ring page");
745 			goto fail_grant_ring;
746 		}
747 
748 		callout_init(&rxq->rx_refill, 1);
749 	}
750 
751 	return (0);
752 
753 fail_grant_ring:
754 	gnttab_free_grant_references(rxq->gref_head);
755 	free(rxq->ring.sring, M_DEVBUF);
756 fail:
757 	for (; q >= 0; q--) {
758 		disconnect_rxq(&info->rxq[q]);
759 		destroy_rxq(&info->rxq[q]);
760 	}
761 
762 	free(info->rxq, M_DEVBUF);
763 	return (error);
764 }
765 
766 static void
767 disconnect_txq(struct netfront_txq *txq)
768 {
769 
770 	xn_release_tx_bufs(txq);
771 	gnttab_free_grant_references(txq->gref_head);
772 	gnttab_end_foreign_access(txq->ring_ref, NULL);
773 	xen_intr_unbind(&txq->xen_intr_handle);
774 }
775 
776 static void
777 destroy_txq(struct netfront_txq *txq)
778 {
779 
780 	free(txq->ring.sring, M_DEVBUF);
781 	buf_ring_free(txq->br, M_DEVBUF);
782 	taskqueue_drain_all(txq->tq);
783 	taskqueue_free(txq->tq);
784 }
785 
786 static void
787 destroy_txqs(struct netfront_info *np)
788 {
789 	int i;
790 
791 	for (i = 0; i < np->num_queues; i++)
792 		destroy_txq(&np->txq[i]);
793 
794 	free(np->txq, M_DEVBUF);
795 	np->txq = NULL;
796 }
797 
798 static int
799 setup_txqs(device_t dev, struct netfront_info *info,
800 	   unsigned long num_queues)
801 {
802 	int q, i;
803 	int error;
804 	netif_tx_sring_t *txs;
805 	struct netfront_txq *txq;
806 
807 	info->txq = malloc(sizeof(struct netfront_txq) * num_queues,
808 	    M_DEVBUF, M_WAITOK|M_ZERO);
809 
810 	for (q = 0; q < num_queues; q++) {
811 		txq = &info->txq[q];
812 
813 		txq->id = q;
814 		txq->info = info;
815 
816 		txq->ring_ref = GRANT_REF_INVALID;
817 		txq->ring.sring = NULL;
818 
819 		snprintf(txq->name, XN_QUEUE_NAME_LEN, "xntx_%u", q);
820 
821 		mtx_init(&txq->lock, txq->name, "netfront transmit lock",
822 		    MTX_DEF);
823 
824 		for (i = 0; i <= NET_TX_RING_SIZE; i++) {
825 			txq->mbufs[i] = (void *) ((u_long) i+1);
826 			txq->grant_ref[i] = GRANT_REF_INVALID;
827 		}
828 		txq->mbufs[NET_TX_RING_SIZE] = (void *)0;
829 
830 		/* Start resources allocation. */
831 
832 		if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
833 		    &txq->gref_head) != 0) {
834 			device_printf(dev, "failed to allocate tx grant refs\n");
835 			error = ENOMEM;
836 			goto fail;
837 		}
838 
839 		txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF,
840 		    M_WAITOK|M_ZERO);
841 		SHARED_RING_INIT(txs);
842 		FRONT_RING_INIT(&txq->ring, txs, PAGE_SIZE);
843 
844 		error = xenbus_grant_ring(dev, virt_to_mfn(txs),
845 		    &txq->ring_ref);
846 		if (error != 0) {
847 			device_printf(dev, "failed to grant tx ring\n");
848 			goto fail_grant_ring;
849 		}
850 
851 		txq->br = buf_ring_alloc(NET_TX_RING_SIZE, M_DEVBUF,
852 		    M_WAITOK, &txq->lock);
853 		TASK_INIT(&txq->defrtask, 0, xn_txq_tq_deferred, txq);
854 
855 		txq->tq = taskqueue_create(txq->name, M_WAITOK,
856 		    taskqueue_thread_enqueue, &txq->tq);
857 
858 		error = taskqueue_start_threads(&txq->tq, 1, PI_NET,
859 		    "%s txq %d", device_get_nameunit(dev), txq->id);
860 		if (error != 0) {
861 			device_printf(dev, "failed to start tx taskq %d\n",
862 			    txq->id);
863 			goto fail_start_thread;
864 		}
865 
866 		error = xen_intr_alloc_and_bind_local_port(dev,
867 		    xenbus_get_otherend_id(dev), /* filter */ NULL, xn_intr,
868 		    &info->txq[q], INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY,
869 		    &txq->xen_intr_handle);
870 
871 		if (error != 0) {
872 			device_printf(dev, "xen_intr_alloc_and_bind_local_port failed\n");
873 			goto fail_bind_port;
874 		}
875 	}
876 
877 	return (0);
878 
879 fail_bind_port:
880 	taskqueue_drain_all(txq->tq);
881 fail_start_thread:
882 	buf_ring_free(txq->br, M_DEVBUF);
883 	taskqueue_free(txq->tq);
884 	gnttab_end_foreign_access(txq->ring_ref, NULL);
885 fail_grant_ring:
886 	gnttab_free_grant_references(txq->gref_head);
887 	free(txq->ring.sring, M_DEVBUF);
888 fail:
889 	for (; q >= 0; q--) {
890 		disconnect_txq(&info->txq[q]);
891 		destroy_txq(&info->txq[q]);
892 	}
893 
894 	free(info->txq, M_DEVBUF);
895 	return (error);
896 }
897 
898 static int
899 setup_device(device_t dev, struct netfront_info *info,
900     unsigned long num_queues)
901 {
902 	int error;
903 	int q;
904 
905 	if (info->txq)
906 		destroy_txqs(info);
907 
908 	if (info->rxq)
909 		destroy_rxqs(info);
910 
911 	info->num_queues = 0;
912 
913 	error = setup_rxqs(dev, info, num_queues);
914 	if (error != 0)
915 		goto out;
916 	error = setup_txqs(dev, info, num_queues);
917 	if (error != 0)
918 		goto out;
919 
920 	info->num_queues = num_queues;
921 
922 	/* No split event channel at the moment. */
923 	for (q = 0; q < num_queues; q++)
924 		info->rxq[q].xen_intr_handle = info->txq[q].xen_intr_handle;
925 
926 	return (0);
927 
928 out:
929 	KASSERT(error != 0, ("Error path taken without providing an error code"));
930 	return (error);
931 }
932 
933 #ifdef INET
934 /**
935  * If this interface has an ipv4 address, send an arp for it. This
936  * helps to get the network going again after migrating hosts.
937  */
938 static void
939 netfront_send_fake_arp(device_t dev, struct netfront_info *info)
940 {
941 	struct ifnet *ifp;
942 	struct ifaddr *ifa;
943 
944 	ifp = info->xn_ifp;
945 	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
946 		if (ifa->ifa_addr->sa_family == AF_INET) {
947 			arp_ifinit(ifp, ifa);
948 		}
949 	}
950 }
951 #endif
952 
953 /**
954  * Callback received when the backend's state changes.
955  */
956 static void
957 netfront_backend_changed(device_t dev, XenbusState newstate)
958 {
959 	struct netfront_info *sc = device_get_softc(dev);
960 
961 	DPRINTK("newstate=%d\n", newstate);
962 
963 	switch (newstate) {
964 	case XenbusStateInitialising:
965 	case XenbusStateInitialised:
966 	case XenbusStateUnknown:
967 	case XenbusStateReconfigured:
968 	case XenbusStateReconfiguring:
969 		break;
970 	case XenbusStateInitWait:
971 		if (xenbus_get_state(dev) != XenbusStateInitialising)
972 			break;
973 		if (xn_connect(sc) != 0)
974 			break;
975 		/* Switch to connected state before kicking the rings. */
976 		xenbus_set_state(sc->xbdev, XenbusStateConnected);
977 		xn_kick_rings(sc);
978 		break;
979 	case XenbusStateClosing:
980 		xenbus_set_state(dev, XenbusStateClosed);
981 		break;
982 	case XenbusStateClosed:
983 		if (sc->xn_reset) {
984 			netif_disconnect_backend(sc);
985 			xenbus_set_state(dev, XenbusStateInitialising);
986 			sc->xn_reset = false;
987 		}
988 		break;
989 	case XenbusStateConnected:
990 #ifdef INET
991 		netfront_send_fake_arp(dev, sc);
992 #endif
993 		break;
994 	}
995 }
996 
997 /**
998  * \brief Verify that there is sufficient space in the Tx ring
999  *        buffer for a maximally sized request to be enqueued.
1000  *
1001  * A transmit request requires a transmit descriptor for each packet
1002  * fragment, plus up to 2 entries for "options" (e.g. TSO).
1003  */
1004 static inline int
1005 xn_tx_slot_available(struct netfront_txq *txq)
1006 {
1007 
1008 	return (RING_FREE_REQUESTS(&txq->ring) > (MAX_TX_REQ_FRAGS + 2));
1009 }
1010 
1011 static void
1012 xn_release_tx_bufs(struct netfront_txq *txq)
1013 {
1014 	int i;
1015 
1016 	for (i = 1; i <= NET_TX_RING_SIZE; i++) {
1017 		struct mbuf *m;
1018 
1019 		m = txq->mbufs[i];
1020 
1021 		/*
1022 		 * We assume that no kernel addresses are
1023 		 * less than NET_TX_RING_SIZE.  Any entry
1024 		 * in the table that is below this number
1025 		 * must be an index from free-list tracking.
1026 		 */
1027 		if (((uintptr_t)m) <= NET_TX_RING_SIZE)
1028 			continue;
1029 		gnttab_end_foreign_access_ref(txq->grant_ref[i]);
1030 		gnttab_release_grant_reference(&txq->gref_head,
1031 		    txq->grant_ref[i]);
1032 		txq->grant_ref[i] = GRANT_REF_INVALID;
1033 		add_id_to_freelist(txq->mbufs, i);
1034 		txq->mbufs_cnt--;
1035 		if (txq->mbufs_cnt < 0) {
1036 			panic("%s: tx_chain_cnt must be >= 0", __func__);
1037 		}
1038 		m_free(m);
1039 	}
1040 }
1041 
1042 static struct mbuf *
1043 xn_alloc_one_rx_buffer(struct netfront_rxq *rxq)
1044 {
1045 	struct mbuf *m;
1046 
1047 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1048 	if (m == NULL)
1049 		return NULL;
1050 	m->m_len = m->m_pkthdr.len = MJUMPAGESIZE;
1051 
1052 	return (m);
1053 }
1054 
1055 static void
1056 xn_alloc_rx_buffers(struct netfront_rxq *rxq)
1057 {
1058 	RING_IDX req_prod;
1059 	int notify;
1060 
1061 	XN_RX_LOCK_ASSERT(rxq);
1062 
1063 	if (__predict_false(rxq->info->carrier == 0))
1064 		return;
1065 
1066 	for (req_prod = rxq->ring.req_prod_pvt;
1067 	     req_prod - rxq->ring.rsp_cons < NET_RX_RING_SIZE;
1068 	     req_prod++) {
1069 		struct mbuf *m;
1070 		unsigned short id;
1071 		grant_ref_t ref;
1072 		struct netif_rx_request *req;
1073 		unsigned long pfn;
1074 
1075 		m = xn_alloc_one_rx_buffer(rxq);
1076 		if (m == NULL)
1077 			break;
1078 
1079 		id = xn_rxidx(req_prod);
1080 
1081 		KASSERT(rxq->mbufs[id] == NULL, ("non-NULL xn_rx_chain"));
1082 		rxq->mbufs[id] = m;
1083 
1084 		ref = gnttab_claim_grant_reference(&rxq->gref_head);
1085 		KASSERT(ref != GNTTAB_LIST_END,
1086 		    ("reserved grant references exhuasted"));
1087 		rxq->grant_ref[id] = ref;
1088 
1089 		pfn = atop(vtophys(mtod(m, vm_offset_t)));
1090 		req = RING_GET_REQUEST(&rxq->ring, req_prod);
1091 
1092 		gnttab_grant_foreign_access_ref(ref,
1093 		    xenbus_get_otherend_id(rxq->info->xbdev), pfn, 0);
1094 		req->id = id;
1095 		req->gref = ref;
1096 	}
1097 
1098 	rxq->ring.req_prod_pvt = req_prod;
1099 
1100 	/* Not enough requests? Try again later. */
1101 	if (req_prod - rxq->ring.rsp_cons < NET_RX_SLOTS_MIN) {
1102 		callout_reset_curcpu(&rxq->rx_refill, hz/10,
1103 		    xn_alloc_rx_buffers_callout, rxq);
1104 		return;
1105 	}
1106 
1107 	wmb();		/* barrier so backend seens requests */
1108 
1109 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rxq->ring, notify);
1110 	if (notify)
1111 		xen_intr_signal(rxq->xen_intr_handle);
1112 }
1113 
1114 static void xn_alloc_rx_buffers_callout(void *arg)
1115 {
1116 	struct netfront_rxq *rxq;
1117 
1118 	rxq = (struct netfront_rxq *)arg;
1119 	XN_RX_LOCK(rxq);
1120 	xn_alloc_rx_buffers(rxq);
1121 	XN_RX_UNLOCK(rxq);
1122 }
1123 
1124 static void
1125 xn_release_rx_bufs(struct netfront_rxq *rxq)
1126 {
1127 	int i,  ref;
1128 	struct mbuf *m;
1129 
1130 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1131 		m = rxq->mbufs[i];
1132 
1133 		if (m == NULL)
1134 			continue;
1135 
1136 		ref = rxq->grant_ref[i];
1137 		if (ref == GRANT_REF_INVALID)
1138 			continue;
1139 
1140 		gnttab_end_foreign_access_ref(ref);
1141 		gnttab_release_grant_reference(&rxq->gref_head, ref);
1142 		rxq->mbufs[i] = NULL;
1143 		rxq->grant_ref[i] = GRANT_REF_INVALID;
1144 		m_freem(m);
1145 	}
1146 }
1147 
1148 static void
1149 xn_rxeof(struct netfront_rxq *rxq)
1150 {
1151 	struct ifnet *ifp;
1152 	struct netfront_info *np = rxq->info;
1153 #if (defined(INET) || defined(INET6))
1154 	struct lro_ctrl *lro = &rxq->lro;
1155 #endif
1156 	struct netfront_rx_info rinfo;
1157 	struct netif_rx_response *rx = &rinfo.rx;
1158 	struct netif_extra_info *extras = rinfo.extras;
1159 	RING_IDX i, rp;
1160 	struct mbuf *m;
1161 	struct mbufq mbufq_rxq, mbufq_errq;
1162 	int err, work_to_do;
1163 
1164 	do {
1165 		XN_RX_LOCK_ASSERT(rxq);
1166 		if (!netfront_carrier_ok(np))
1167 			return;
1168 
1169 		/* XXX: there should be some sane limit. */
1170 		mbufq_init(&mbufq_errq, INT_MAX);
1171 		mbufq_init(&mbufq_rxq, INT_MAX);
1172 
1173 		ifp = np->xn_ifp;
1174 
1175 		rp = rxq->ring.sring->rsp_prod;
1176 		rmb();	/* Ensure we see queued responses up to 'rp'. */
1177 
1178 		i = rxq->ring.rsp_cons;
1179 		while ((i != rp)) {
1180 			memcpy(rx, RING_GET_RESPONSE(&rxq->ring, i), sizeof(*rx));
1181 			memset(extras, 0, sizeof(rinfo.extras));
1182 
1183 			m = NULL;
1184 			err = xn_get_responses(rxq, &rinfo, rp, &i, &m);
1185 
1186 			if (__predict_false(err)) {
1187 				if (m)
1188 					(void )mbufq_enqueue(&mbufq_errq, m);
1189 				if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1190 				continue;
1191 			}
1192 
1193 			m->m_pkthdr.rcvif = ifp;
1194 			if ( rx->flags & NETRXF_data_validated ) {
1195 				/*
1196 				 * According to mbuf(9) the correct way to tell
1197 				 * the stack that the checksum of an inbound
1198 				 * packet is correct, without it actually being
1199 				 * present (because the underlying interface
1200 				 * doesn't provide it), is to set the
1201 				 * CSUM_DATA_VALID and CSUM_PSEUDO_HDR flags,
1202 				 * and the csum_data field to 0xffff.
1203 				 */
1204 				m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
1205 				    | CSUM_PSEUDO_HDR);
1206 				m->m_pkthdr.csum_data = 0xffff;
1207 			}
1208 			if ((rx->flags & NETRXF_extra_info) != 0 &&
1209 			    (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type ==
1210 			    XEN_NETIF_EXTRA_TYPE_GSO)) {
1211 				m->m_pkthdr.tso_segsz =
1212 				extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].u.gso.size;
1213 				m->m_pkthdr.csum_flags |= CSUM_TSO;
1214 			}
1215 
1216 			(void )mbufq_enqueue(&mbufq_rxq, m);
1217 			rxq->ring.rsp_cons = i;
1218 		}
1219 
1220 		mbufq_drain(&mbufq_errq);
1221 
1222 		/*
1223 		 * Process all the mbufs after the remapping is complete.
1224 		 * Break the mbuf chain first though.
1225 		 */
1226 		while ((m = mbufq_dequeue(&mbufq_rxq)) != NULL) {
1227 			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1228 
1229 			/* XXX: Do we really need to drop the rx lock? */
1230 			XN_RX_UNLOCK(rxq);
1231 #if (defined(INET) || defined(INET6))
1232 			/* Use LRO if possible */
1233 			if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
1234 			    lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
1235 				/*
1236 				 * If LRO fails, pass up to the stack
1237 				 * directly.
1238 				 */
1239 				(*ifp->if_input)(ifp, m);
1240 			}
1241 #else
1242 			(*ifp->if_input)(ifp, m);
1243 #endif
1244 
1245 			XN_RX_LOCK(rxq);
1246 		}
1247 
1248 		rxq->ring.rsp_cons = i;
1249 
1250 #if (defined(INET) || defined(INET6))
1251 		/*
1252 		 * Flush any outstanding LRO work
1253 		 */
1254 		tcp_lro_flush_all(lro);
1255 #endif
1256 
1257 		xn_alloc_rx_buffers(rxq);
1258 
1259 		RING_FINAL_CHECK_FOR_RESPONSES(&rxq->ring, work_to_do);
1260 	} while (work_to_do);
1261 }
1262 
1263 static void
1264 xn_txeof(struct netfront_txq *txq)
1265 {
1266 	RING_IDX i, prod;
1267 	unsigned short id;
1268 	struct ifnet *ifp;
1269 	netif_tx_response_t *txr;
1270 	struct mbuf *m;
1271 	struct netfront_info *np = txq->info;
1272 
1273 	XN_TX_LOCK_ASSERT(txq);
1274 
1275 	if (!netfront_carrier_ok(np))
1276 		return;
1277 
1278 	ifp = np->xn_ifp;
1279 
1280 	do {
1281 		prod = txq->ring.sring->rsp_prod;
1282 		rmb(); /* Ensure we see responses up to 'rp'. */
1283 
1284 		for (i = txq->ring.rsp_cons; i != prod; i++) {
1285 			txr = RING_GET_RESPONSE(&txq->ring, i);
1286 			if (txr->status == NETIF_RSP_NULL)
1287 				continue;
1288 
1289 			if (txr->status != NETIF_RSP_OKAY) {
1290 				printf("%s: WARNING: response is %d!\n",
1291 				       __func__, txr->status);
1292 			}
1293 			id = txr->id;
1294 			m = txq->mbufs[id];
1295 			KASSERT(m != NULL, ("mbuf not found in chain"));
1296 			KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
1297 				("mbuf already on the free list, but we're "
1298 				"trying to free it again!"));
1299 			M_ASSERTVALID(m);
1300 
1301 			if (__predict_false(gnttab_query_foreign_access(
1302 			    txq->grant_ref[id]) != 0)) {
1303 				panic("%s: grant id %u still in use by the "
1304 				    "backend", __func__, id);
1305 			}
1306 			gnttab_end_foreign_access_ref(txq->grant_ref[id]);
1307 			gnttab_release_grant_reference(
1308 				&txq->gref_head, txq->grant_ref[id]);
1309 			txq->grant_ref[id] = GRANT_REF_INVALID;
1310 
1311 			txq->mbufs[id] = NULL;
1312 			add_id_to_freelist(txq->mbufs, id);
1313 			txq->mbufs_cnt--;
1314 			m_free(m);
1315 			/* Only mark the txq active if we've freed up at least one slot to try */
1316 			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1317 		}
1318 		txq->ring.rsp_cons = prod;
1319 
1320 		/*
1321 		 * Set a new event, then check for race with update of
1322 		 * tx_cons. Note that it is essential to schedule a
1323 		 * callback, no matter how few buffers are pending. Even if
1324 		 * there is space in the transmit ring, higher layers may
1325 		 * be blocked because too much data is outstanding: in such
1326 		 * cases notification from Xen is likely to be the only kick
1327 		 * that we'll get.
1328 		 */
1329 		txq->ring.sring->rsp_event =
1330 		    prod + ((txq->ring.sring->req_prod - prod) >> 1) + 1;
1331 
1332 		mb();
1333 	} while (prod != txq->ring.sring->rsp_prod);
1334 
1335 	if (txq->full &&
1336 	    ((txq->ring.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1337 		txq->full = false;
1338 		xn_txq_start(txq);
1339 	}
1340 }
1341 
1342 static void
1343 xn_intr(void *xsc)
1344 {
1345 	struct netfront_txq *txq = xsc;
1346 	struct netfront_info *np = txq->info;
1347 	struct netfront_rxq *rxq = &np->rxq[txq->id];
1348 
1349 	/* kick both tx and rx */
1350 	xn_rxq_intr(rxq);
1351 	xn_txq_intr(txq);
1352 }
1353 
1354 static void
1355 xn_move_rx_slot(struct netfront_rxq *rxq, struct mbuf *m,
1356     grant_ref_t ref)
1357 {
1358 	int new = xn_rxidx(rxq->ring.req_prod_pvt);
1359 
1360 	KASSERT(rxq->mbufs[new] == NULL, ("mbufs != NULL"));
1361 	rxq->mbufs[new] = m;
1362 	rxq->grant_ref[new] = ref;
1363 	RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->id = new;
1364 	RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->gref = ref;
1365 	rxq->ring.req_prod_pvt++;
1366 }
1367 
1368 static int
1369 xn_get_extras(struct netfront_rxq *rxq,
1370     struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons)
1371 {
1372 	struct netif_extra_info *extra;
1373 
1374 	int err = 0;
1375 
1376 	do {
1377 		struct mbuf *m;
1378 		grant_ref_t ref;
1379 
1380 		if (__predict_false(*cons + 1 == rp)) {
1381 			err = EINVAL;
1382 			break;
1383 		}
1384 
1385 		extra = (struct netif_extra_info *)
1386 		RING_GET_RESPONSE(&rxq->ring, ++(*cons));
1387 
1388 		if (__predict_false(!extra->type ||
1389 			extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1390 			err = EINVAL;
1391 		} else {
1392 			memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1393 		}
1394 
1395 		m = xn_get_rx_mbuf(rxq, *cons);
1396 		ref = xn_get_rx_ref(rxq,  *cons);
1397 		xn_move_rx_slot(rxq, m, ref);
1398 	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1399 
1400 	return err;
1401 }
1402 
1403 static int
1404 xn_get_responses(struct netfront_rxq *rxq,
1405     struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
1406     struct mbuf  **list)
1407 {
1408 	struct netif_rx_response *rx = &rinfo->rx;
1409 	struct netif_extra_info *extras = rinfo->extras;
1410 	struct mbuf *m, *m0, *m_prev;
1411 	grant_ref_t ref = xn_get_rx_ref(rxq, *cons);
1412 	RING_IDX ref_cons = *cons;
1413 	int frags = 1;
1414 	int err = 0;
1415 	u_long ret;
1416 
1417 	m0 = m = m_prev = xn_get_rx_mbuf(rxq, *cons);
1418 
1419 	if (rx->flags & NETRXF_extra_info) {
1420 		err = xn_get_extras(rxq, extras, rp, cons);
1421 	}
1422 
1423 	if (m0 != NULL) {
1424 		m0->m_pkthdr.len = 0;
1425 		m0->m_next = NULL;
1426 	}
1427 
1428 	for (;;) {
1429 #if 0
1430 		DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
1431 			rx->status, rx->offset, frags);
1432 #endif
1433 		if (__predict_false(rx->status < 0 ||
1434 			rx->offset + rx->status > PAGE_SIZE)) {
1435 
1436 			xn_move_rx_slot(rxq, m, ref);
1437 			if (m0 == m)
1438 				m0 = NULL;
1439 			m = NULL;
1440 			err = EINVAL;
1441 			goto next_skip_queue;
1442 		}
1443 
1444 		/*
1445 		 * This definitely indicates a bug, either in this driver or in
1446 		 * the backend driver. In future this should flag the bad
1447 		 * situation to the system controller to reboot the backed.
1448 		 */
1449 		if (ref == GRANT_REF_INVALID) {
1450 			printf("%s: Bad rx response id %d.\n", __func__, rx->id);
1451 			err = EINVAL;
1452 			goto next;
1453 		}
1454 
1455 		ret = gnttab_end_foreign_access_ref(ref);
1456 		KASSERT(ret, ("Unable to end access to grant references"));
1457 
1458 		gnttab_release_grant_reference(&rxq->gref_head, ref);
1459 
1460 next:
1461 		if (m == NULL)
1462 			break;
1463 
1464 		m->m_len = rx->status;
1465 		m->m_data += rx->offset;
1466 		m0->m_pkthdr.len += rx->status;
1467 
1468 next_skip_queue:
1469 		if (!(rx->flags & NETRXF_more_data))
1470 			break;
1471 
1472 		if (*cons + frags == rp) {
1473 			if (net_ratelimit())
1474 				WPRINTK("Need more frags\n");
1475 			err = ENOENT;
1476 			printf("%s: cons %u frags %u rp %u, not enough frags\n",
1477 			       __func__, *cons, frags, rp);
1478 			break;
1479 		}
1480 		/*
1481 		 * Note that m can be NULL, if rx->status < 0 or if
1482 		 * rx->offset + rx->status > PAGE_SIZE above.
1483 		 */
1484 		m_prev = m;
1485 
1486 		rx = RING_GET_RESPONSE(&rxq->ring, *cons + frags);
1487 		m = xn_get_rx_mbuf(rxq, *cons + frags);
1488 
1489 		/*
1490 		 * m_prev == NULL can happen if rx->status < 0 or if
1491 		 * rx->offset + * rx->status > PAGE_SIZE above.
1492 		 */
1493 		if (m_prev != NULL)
1494 			m_prev->m_next = m;
1495 
1496 		/*
1497 		 * m0 can be NULL if rx->status < 0 or if * rx->offset +
1498 		 * rx->status > PAGE_SIZE above.
1499 		 */
1500 		if (m0 == NULL)
1501 			m0 = m;
1502 		m->m_next = NULL;
1503 		ref = xn_get_rx_ref(rxq, *cons + frags);
1504 		ref_cons = *cons + frags;
1505 		frags++;
1506 	}
1507 	*list = m0;
1508 	*cons += frags;
1509 
1510 	return (err);
1511 }
1512 
1513 /**
1514  * \brief Count the number of fragments in an mbuf chain.
1515  *
1516  * Surprisingly, there isn't an M* macro for this.
1517  */
1518 static inline int
1519 xn_count_frags(struct mbuf *m)
1520 {
1521 	int nfrags;
1522 
1523 	for (nfrags = 0; m != NULL; m = m->m_next)
1524 		nfrags++;
1525 
1526 	return (nfrags);
1527 }
1528 
1529 /**
1530  * Given an mbuf chain, make sure we have enough room and then push
1531  * it onto the transmit ring.
1532  */
1533 static int
1534 xn_assemble_tx_request(struct netfront_txq *txq, struct mbuf *m_head)
1535 {
1536 	struct mbuf *m;
1537 	struct netfront_info *np = txq->info;
1538 	struct ifnet *ifp = np->xn_ifp;
1539 	u_int nfrags;
1540 	int otherend_id;
1541 
1542 	/**
1543 	 * Defragment the mbuf if necessary.
1544 	 */
1545 	nfrags = xn_count_frags(m_head);
1546 
1547 	/*
1548 	 * Check to see whether this request is longer than netback
1549 	 * can handle, and try to defrag it.
1550 	 */
1551 	/**
1552 	 * It is a bit lame, but the netback driver in Linux can't
1553 	 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of
1554 	 * the Linux network stack.
1555 	 */
1556 	if (nfrags > np->maxfrags) {
1557 		m = m_defrag(m_head, M_NOWAIT);
1558 		if (!m) {
1559 			/*
1560 			 * Defrag failed, so free the mbuf and
1561 			 * therefore drop the packet.
1562 			 */
1563 			m_freem(m_head);
1564 			return (EMSGSIZE);
1565 		}
1566 		m_head = m;
1567 	}
1568 
1569 	/* Determine how many fragments now exist */
1570 	nfrags = xn_count_frags(m_head);
1571 
1572 	/*
1573 	 * Check to see whether the defragmented packet has too many
1574 	 * segments for the Linux netback driver.
1575 	 */
1576 	/**
1577 	 * The FreeBSD TCP stack, with TSO enabled, can produce a chain
1578 	 * of mbufs longer than Linux can handle.  Make sure we don't
1579 	 * pass a too-long chain over to the other side by dropping the
1580 	 * packet.  It doesn't look like there is currently a way to
1581 	 * tell the TCP stack to generate a shorter chain of packets.
1582 	 */
1583 	if (nfrags > MAX_TX_REQ_FRAGS) {
1584 #ifdef DEBUG
1585 		printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback "
1586 		       "won't be able to handle it, dropping\n",
1587 		       __func__, nfrags, MAX_TX_REQ_FRAGS);
1588 #endif
1589 		m_freem(m_head);
1590 		return (EMSGSIZE);
1591 	}
1592 
1593 	/*
1594 	 * This check should be redundant.  We've already verified that we
1595 	 * have enough slots in the ring to handle a packet of maximum
1596 	 * size, and that our packet is less than the maximum size.  Keep
1597 	 * it in here as an assert for now just to make certain that
1598 	 * chain_cnt is accurate.
1599 	 */
1600 	KASSERT((txq->mbufs_cnt + nfrags) <= NET_TX_RING_SIZE,
1601 		("%s: chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE "
1602 		 "(%d)!", __func__, (int) txq->mbufs_cnt,
1603                     (int) nfrags, (int) NET_TX_RING_SIZE));
1604 
1605 	/*
1606 	 * Start packing the mbufs in this chain into
1607 	 * the fragment pointers. Stop when we run out
1608 	 * of fragments or hit the end of the mbuf chain.
1609 	 */
1610 	m = m_head;
1611 	otherend_id = xenbus_get_otherend_id(np->xbdev);
1612 	for (m = m_head; m; m = m->m_next) {
1613 		netif_tx_request_t *tx;
1614 		uintptr_t id;
1615 		grant_ref_t ref;
1616 		u_long mfn; /* XXX Wrong type? */
1617 
1618 		tx = RING_GET_REQUEST(&txq->ring, txq->ring.req_prod_pvt);
1619 		id = get_id_from_freelist(txq->mbufs);
1620 		if (id == 0)
1621 			panic("%s: was allocated the freelist head!\n",
1622 			    __func__);
1623 		txq->mbufs_cnt++;
1624 		if (txq->mbufs_cnt > NET_TX_RING_SIZE)
1625 			panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n",
1626 			    __func__);
1627 		txq->mbufs[id] = m;
1628 		tx->id = id;
1629 		ref = gnttab_claim_grant_reference(&txq->gref_head);
1630 		KASSERT((short)ref >= 0, ("Negative ref"));
1631 		mfn = virt_to_mfn(mtod(m, vm_offset_t));
1632 		gnttab_grant_foreign_access_ref(ref, otherend_id,
1633 		    mfn, GNTMAP_readonly);
1634 		tx->gref = txq->grant_ref[id] = ref;
1635 		tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1);
1636 		tx->flags = 0;
1637 		if (m == m_head) {
1638 			/*
1639 			 * The first fragment has the entire packet
1640 			 * size, subsequent fragments have just the
1641 			 * fragment size. The backend works out the
1642 			 * true size of the first fragment by
1643 			 * subtracting the sizes of the other
1644 			 * fragments.
1645 			 */
1646 			tx->size = m->m_pkthdr.len;
1647 
1648 			/*
1649 			 * The first fragment contains the checksum flags
1650 			 * and is optionally followed by extra data for
1651 			 * TSO etc.
1652 			 */
1653 			/**
1654 			 * CSUM_TSO requires checksum offloading.
1655 			 * Some versions of FreeBSD fail to
1656 			 * set CSUM_TCP in the CSUM_TSO case,
1657 			 * so we have to test for CSUM_TSO
1658 			 * explicitly.
1659 			 */
1660 			if (m->m_pkthdr.csum_flags
1661 			    & (CSUM_DELAY_DATA | CSUM_TSO)) {
1662 				tx->flags |= (NETTXF_csum_blank
1663 				    | NETTXF_data_validated);
1664 			}
1665 			if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1666 				struct netif_extra_info *gso =
1667 					(struct netif_extra_info *)
1668 					RING_GET_REQUEST(&txq->ring,
1669 							 ++txq->ring.req_prod_pvt);
1670 
1671 				tx->flags |= NETTXF_extra_info;
1672 
1673 				gso->u.gso.size = m->m_pkthdr.tso_segsz;
1674 				gso->u.gso.type =
1675 					XEN_NETIF_GSO_TYPE_TCPV4;
1676 				gso->u.gso.pad = 0;
1677 				gso->u.gso.features = 0;
1678 
1679 				gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1680 				gso->flags = 0;
1681 			}
1682 		} else {
1683 			tx->size = m->m_len;
1684 		}
1685 		if (m->m_next)
1686 			tx->flags |= NETTXF_more_data;
1687 
1688 		txq->ring.req_prod_pvt++;
1689 	}
1690 	BPF_MTAP(ifp, m_head);
1691 
1692 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1693 	if_inc_counter(ifp, IFCOUNTER_OBYTES, m_head->m_pkthdr.len);
1694 	if (m_head->m_flags & M_MCAST)
1695 		if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1696 
1697 	xn_txeof(txq);
1698 
1699 	return (0);
1700 }
1701 
1702 /* equivalent of network_open() in Linux */
1703 static void
1704 xn_ifinit_locked(struct netfront_info *np)
1705 {
1706 	struct ifnet *ifp;
1707 	int i;
1708 	struct netfront_rxq *rxq;
1709 
1710 	XN_LOCK_ASSERT(np);
1711 
1712 	ifp = np->xn_ifp;
1713 
1714 	if (ifp->if_drv_flags & IFF_DRV_RUNNING || !netfront_carrier_ok(np))
1715 		return;
1716 
1717 	xn_stop(np);
1718 
1719 	for (i = 0; i < np->num_queues; i++) {
1720 		rxq = &np->rxq[i];
1721 		XN_RX_LOCK(rxq);
1722 		xn_alloc_rx_buffers(rxq);
1723 		rxq->ring.sring->rsp_event = rxq->ring.rsp_cons + 1;
1724 		if (RING_HAS_UNCONSUMED_RESPONSES(&rxq->ring))
1725 			xn_rxeof(rxq);
1726 		XN_RX_UNLOCK(rxq);
1727 	}
1728 
1729 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1730 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1731 	if_link_state_change(ifp, LINK_STATE_UP);
1732 }
1733 
1734 static void
1735 xn_ifinit(void *xsc)
1736 {
1737 	struct netfront_info *sc = xsc;
1738 
1739 	XN_LOCK(sc);
1740 	xn_ifinit_locked(sc);
1741 	XN_UNLOCK(sc);
1742 }
1743 
1744 static int
1745 xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1746 {
1747 	struct netfront_info *sc = ifp->if_softc;
1748 	struct ifreq *ifr = (struct ifreq *) data;
1749 	device_t dev;
1750 #ifdef INET
1751 	struct ifaddr *ifa = (struct ifaddr *)data;
1752 #endif
1753 	int mask, error = 0, reinit;
1754 
1755 	dev = sc->xbdev;
1756 
1757 	switch(cmd) {
1758 	case SIOCSIFADDR:
1759 #ifdef INET
1760 		XN_LOCK(sc);
1761 		if (ifa->ifa_addr->sa_family == AF_INET) {
1762 			ifp->if_flags |= IFF_UP;
1763 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1764 				xn_ifinit_locked(sc);
1765 			arp_ifinit(ifp, ifa);
1766 			XN_UNLOCK(sc);
1767 		} else {
1768 			XN_UNLOCK(sc);
1769 #endif
1770 			error = ether_ioctl(ifp, cmd, data);
1771 #ifdef INET
1772 		}
1773 #endif
1774 		break;
1775 	case SIOCSIFMTU:
1776 		ifp->if_mtu = ifr->ifr_mtu;
1777 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1778 		xn_ifinit(sc);
1779 		break;
1780 	case SIOCSIFFLAGS:
1781 		XN_LOCK(sc);
1782 		if (ifp->if_flags & IFF_UP) {
1783 			/*
1784 			 * If only the state of the PROMISC flag changed,
1785 			 * then just use the 'set promisc mode' command
1786 			 * instead of reinitializing the entire NIC. Doing
1787 			 * a full re-init means reloading the firmware and
1788 			 * waiting for it to start up, which may take a
1789 			 * second or two.
1790 			 */
1791 			xn_ifinit_locked(sc);
1792 		} else {
1793 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1794 				xn_stop(sc);
1795 			}
1796 		}
1797 		sc->xn_if_flags = ifp->if_flags;
1798 		XN_UNLOCK(sc);
1799 		break;
1800 	case SIOCSIFCAP:
1801 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1802 		reinit = 0;
1803 
1804 		if (mask & IFCAP_TXCSUM) {
1805 			ifp->if_capenable ^= IFCAP_TXCSUM;
1806 			ifp->if_hwassist ^= XN_CSUM_FEATURES;
1807 		}
1808 		if (mask & IFCAP_TSO4) {
1809 			ifp->if_capenable ^= IFCAP_TSO4;
1810 			ifp->if_hwassist ^= CSUM_TSO;
1811 		}
1812 
1813 		if (mask & (IFCAP_RXCSUM | IFCAP_LRO)) {
1814 			/* These Rx features require us to renegotiate. */
1815 			reinit = 1;
1816 
1817 			if (mask & IFCAP_RXCSUM)
1818 				ifp->if_capenable ^= IFCAP_RXCSUM;
1819 			if (mask & IFCAP_LRO)
1820 				ifp->if_capenable ^= IFCAP_LRO;
1821 		}
1822 
1823 		if (reinit == 0)
1824 			break;
1825 
1826 		/*
1827 		 * We must reset the interface so the backend picks up the
1828 		 * new features.
1829 		 */
1830 		device_printf(sc->xbdev,
1831 		    "performing interface reset due to feature change\n");
1832 		XN_LOCK(sc);
1833 		netfront_carrier_off(sc);
1834 		sc->xn_reset = true;
1835 		/*
1836 		 * NB: the pending packet queue is not flushed, since
1837 		 * the interface should still support the old options.
1838 		 */
1839 		XN_UNLOCK(sc);
1840 		/*
1841 		 * Delete the xenstore nodes that export features.
1842 		 *
1843 		 * NB: There's a xenbus state called
1844 		 * "XenbusStateReconfiguring", which is what we should set
1845 		 * here. Sadly none of the backends know how to handle it,
1846 		 * and simply disconnect from the frontend, so we will just
1847 		 * switch back to XenbusStateInitialising in order to force
1848 		 * a reconnection.
1849 		 */
1850 		xs_rm(XST_NIL, xenbus_get_node(dev), "feature-gso-tcpv4");
1851 		xs_rm(XST_NIL, xenbus_get_node(dev), "feature-no-csum-offload");
1852 		xenbus_set_state(dev, XenbusStateClosing);
1853 
1854 		/*
1855 		 * Wait for the frontend to reconnect before returning
1856 		 * from the ioctl. 30s should be more than enough for any
1857 		 * sane backend to reconnect.
1858 		 */
1859 		error = tsleep(sc, 0, "xn_rst", 30*hz);
1860 		break;
1861 	case SIOCADDMULTI:
1862 	case SIOCDELMULTI:
1863 		break;
1864 	case SIOCSIFMEDIA:
1865 	case SIOCGIFMEDIA:
1866 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1867 		break;
1868 	default:
1869 		error = ether_ioctl(ifp, cmd, data);
1870 	}
1871 
1872 	return (error);
1873 }
1874 
1875 static void
1876 xn_stop(struct netfront_info *sc)
1877 {
1878 	struct ifnet *ifp;
1879 
1880 	XN_LOCK_ASSERT(sc);
1881 
1882 	ifp = sc->xn_ifp;
1883 
1884 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1885 	if_link_state_change(ifp, LINK_STATE_DOWN);
1886 }
1887 
1888 static void
1889 xn_rebuild_rx_bufs(struct netfront_rxq *rxq)
1890 {
1891 	int requeue_idx, i;
1892 	grant_ref_t ref;
1893 	netif_rx_request_t *req;
1894 
1895 	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1896 		struct mbuf *m;
1897 		u_long pfn;
1898 
1899 		if (rxq->mbufs[i] == NULL)
1900 			continue;
1901 
1902 		m = rxq->mbufs[requeue_idx] = xn_get_rx_mbuf(rxq, i);
1903 		ref = rxq->grant_ref[requeue_idx] = xn_get_rx_ref(rxq, i);
1904 
1905 		req = RING_GET_REQUEST(&rxq->ring, requeue_idx);
1906 		pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
1907 
1908 		gnttab_grant_foreign_access_ref(ref,
1909 		    xenbus_get_otherend_id(rxq->info->xbdev),
1910 		    pfn, 0);
1911 
1912 		req->gref = ref;
1913 		req->id   = requeue_idx;
1914 
1915 		requeue_idx++;
1916 	}
1917 
1918 	rxq->ring.req_prod_pvt = requeue_idx;
1919 }
1920 
1921 /* START of Xenolinux helper functions adapted to FreeBSD */
1922 static int
1923 xn_connect(struct netfront_info *np)
1924 {
1925 	int i, error;
1926 	u_int feature_rx_copy;
1927 	struct netfront_rxq *rxq;
1928 	struct netfront_txq *txq;
1929 
1930 	error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1931 	    "feature-rx-copy", NULL, "%u", &feature_rx_copy);
1932 	if (error != 0)
1933 		feature_rx_copy = 0;
1934 
1935 	/* We only support rx copy. */
1936 	if (!feature_rx_copy)
1937 		return (EPROTONOSUPPORT);
1938 
1939 	/* Recovery procedure: */
1940 	error = talk_to_backend(np->xbdev, np);
1941 	if (error != 0)
1942 		return (error);
1943 
1944 	/* Step 1: Reinitialise variables. */
1945 	xn_query_features(np);
1946 	xn_configure_features(np);
1947 
1948 	/* Step 2: Release TX buffer */
1949 	for (i = 0; i < np->num_queues; i++) {
1950 		txq = &np->txq[i];
1951 		xn_release_tx_bufs(txq);
1952 	}
1953 
1954 	/* Step 3: Rebuild the RX buffer freelist and the RX ring itself. */
1955 	for (i = 0; i < np->num_queues; i++) {
1956 		rxq = &np->rxq[i];
1957 		xn_rebuild_rx_bufs(rxq);
1958 	}
1959 
1960 	/* Step 4: All public and private state should now be sane.  Get
1961 	 * ready to start sending and receiving packets and give the driver
1962 	 * domain a kick because we've probably just requeued some
1963 	 * packets.
1964 	 */
1965 	netfront_carrier_on(np);
1966 	wakeup(np);
1967 
1968 	return (0);
1969 }
1970 
1971 static void
1972 xn_kick_rings(struct netfront_info *np)
1973 {
1974 	struct netfront_rxq *rxq;
1975 	struct netfront_txq *txq;
1976 	int i;
1977 
1978 	for (i = 0; i < np->num_queues; i++) {
1979 		txq = &np->txq[i];
1980 		rxq = &np->rxq[i];
1981 		xen_intr_signal(txq->xen_intr_handle);
1982 		XN_TX_LOCK(txq);
1983 		xn_txeof(txq);
1984 		XN_TX_UNLOCK(txq);
1985 		XN_RX_LOCK(rxq);
1986 		xn_alloc_rx_buffers(rxq);
1987 		XN_RX_UNLOCK(rxq);
1988 	}
1989 }
1990 
1991 static void
1992 xn_query_features(struct netfront_info *np)
1993 {
1994 	int val;
1995 
1996 	device_printf(np->xbdev, "backend features:");
1997 
1998 	if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1999 		"feature-sg", NULL, "%d", &val) != 0)
2000 		val = 0;
2001 
2002 	np->maxfrags = 1;
2003 	if (val) {
2004 		np->maxfrags = MAX_TX_REQ_FRAGS;
2005 		printf(" feature-sg");
2006 	}
2007 
2008 	if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2009 		"feature-gso-tcpv4", NULL, "%d", &val) != 0)
2010 		val = 0;
2011 
2012 	np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO);
2013 	if (val) {
2014 		np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO;
2015 		printf(" feature-gso-tcp4");
2016 	}
2017 
2018 	/*
2019 	 * HW CSUM offload is assumed to be available unless
2020 	 * feature-no-csum-offload is set in xenstore.
2021 	 */
2022 	if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2023 		"feature-no-csum-offload", NULL, "%d", &val) != 0)
2024 		val = 0;
2025 
2026 	np->xn_ifp->if_capabilities |= IFCAP_HWCSUM;
2027 	if (val) {
2028 		np->xn_ifp->if_capabilities &= ~(IFCAP_HWCSUM);
2029 		printf(" feature-no-csum-offload");
2030 	}
2031 
2032 	printf("\n");
2033 }
2034 
2035 static int
2036 xn_configure_features(struct netfront_info *np)
2037 {
2038 	int err, cap_enabled;
2039 #if (defined(INET) || defined(INET6))
2040 	int i;
2041 #endif
2042 	struct ifnet *ifp;
2043 
2044 	ifp = np->xn_ifp;
2045 	err = 0;
2046 
2047 	if ((ifp->if_capenable & ifp->if_capabilities) == ifp->if_capenable) {
2048 		/* Current options are available, no need to do anything. */
2049 		return (0);
2050 	}
2051 
2052 	/* Try to preserve as many options as possible. */
2053 	cap_enabled = ifp->if_capenable;
2054 	ifp->if_capenable = ifp->if_hwassist = 0;
2055 
2056 #if (defined(INET) || defined(INET6))
2057 	if ((cap_enabled & IFCAP_LRO) != 0)
2058 		for (i = 0; i < np->num_queues; i++)
2059 			tcp_lro_free(&np->rxq[i].lro);
2060 	if (xn_enable_lro &&
2061 	    (ifp->if_capabilities & cap_enabled & IFCAP_LRO) != 0) {
2062 	    	ifp->if_capenable |= IFCAP_LRO;
2063 		for (i = 0; i < np->num_queues; i++) {
2064 			err = tcp_lro_init(&np->rxq[i].lro);
2065 			if (err != 0) {
2066 				device_printf(np->xbdev,
2067 				    "LRO initialization failed\n");
2068 				ifp->if_capenable &= ~IFCAP_LRO;
2069 				break;
2070 			}
2071 			np->rxq[i].lro.ifp = ifp;
2072 		}
2073 	}
2074 	if ((ifp->if_capabilities & cap_enabled & IFCAP_TSO4) != 0) {
2075 		ifp->if_capenable |= IFCAP_TSO4;
2076 		ifp->if_hwassist |= CSUM_TSO;
2077 	}
2078 #endif
2079 	if ((ifp->if_capabilities & cap_enabled & IFCAP_TXCSUM) != 0) {
2080 		ifp->if_capenable |= IFCAP_TXCSUM;
2081 		ifp->if_hwassist |= XN_CSUM_FEATURES;
2082 	}
2083 	if ((ifp->if_capabilities & cap_enabled & IFCAP_RXCSUM) != 0)
2084 		ifp->if_capenable |= IFCAP_RXCSUM;
2085 
2086 	return (err);
2087 }
2088 
2089 static int
2090 xn_txq_mq_start_locked(struct netfront_txq *txq, struct mbuf *m)
2091 {
2092 	struct netfront_info *np;
2093 	struct ifnet *ifp;
2094 	struct buf_ring *br;
2095 	int error, notify;
2096 
2097 	np = txq->info;
2098 	br = txq->br;
2099 	ifp = np->xn_ifp;
2100 	error = 0;
2101 
2102 	XN_TX_LOCK_ASSERT(txq);
2103 
2104 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2105 	    !netfront_carrier_ok(np)) {
2106 		if (m != NULL)
2107 			error = drbr_enqueue(ifp, br, m);
2108 		return (error);
2109 	}
2110 
2111 	if (m != NULL) {
2112 		error = drbr_enqueue(ifp, br, m);
2113 		if (error != 0)
2114 			return (error);
2115 	}
2116 
2117 	while ((m = drbr_peek(ifp, br)) != NULL) {
2118 		if (!xn_tx_slot_available(txq)) {
2119 			drbr_putback(ifp, br, m);
2120 			break;
2121 		}
2122 
2123 		error = xn_assemble_tx_request(txq, m);
2124 		/* xn_assemble_tx_request always consumes the mbuf*/
2125 		if (error != 0) {
2126 			drbr_advance(ifp, br);
2127 			break;
2128 		}
2129 
2130 		RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&txq->ring, notify);
2131 		if (notify)
2132 			xen_intr_signal(txq->xen_intr_handle);
2133 
2134 		drbr_advance(ifp, br);
2135 	}
2136 
2137 	if (RING_FULL(&txq->ring))
2138 		txq->full = true;
2139 
2140 	return (0);
2141 }
2142 
2143 static int
2144 xn_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
2145 {
2146 	struct netfront_info *np;
2147 	struct netfront_txq *txq;
2148 	int i, npairs, error;
2149 
2150 	np = ifp->if_softc;
2151 	npairs = np->num_queues;
2152 
2153 	if (!netfront_carrier_ok(np))
2154 		return (ENOBUFS);
2155 
2156 	KASSERT(npairs != 0, ("called with 0 available queues"));
2157 
2158 	/* check if flowid is set */
2159 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2160 		i = m->m_pkthdr.flowid % npairs;
2161 	else
2162 		i = curcpu % npairs;
2163 
2164 	txq = &np->txq[i];
2165 
2166 	if (XN_TX_TRYLOCK(txq) != 0) {
2167 		error = xn_txq_mq_start_locked(txq, m);
2168 		XN_TX_UNLOCK(txq);
2169 	} else {
2170 		error = drbr_enqueue(ifp, txq->br, m);
2171 		taskqueue_enqueue(txq->tq, &txq->defrtask);
2172 	}
2173 
2174 	return (error);
2175 }
2176 
2177 static void
2178 xn_qflush(struct ifnet *ifp)
2179 {
2180 	struct netfront_info *np;
2181 	struct netfront_txq *txq;
2182 	struct mbuf *m;
2183 	int i;
2184 
2185 	np = ifp->if_softc;
2186 
2187 	for (i = 0; i < np->num_queues; i++) {
2188 		txq = &np->txq[i];
2189 
2190 		XN_TX_LOCK(txq);
2191 		while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
2192 			m_freem(m);
2193 		XN_TX_UNLOCK(txq);
2194 	}
2195 
2196 	if_qflush(ifp);
2197 }
2198 
2199 /**
2200  * Create a network device.
2201  * @param dev  Newbus device representing this virtual NIC.
2202  */
2203 int
2204 create_netdev(device_t dev)
2205 {
2206 	struct netfront_info *np;
2207 	int err;
2208 	struct ifnet *ifp;
2209 
2210 	np = device_get_softc(dev);
2211 
2212 	np->xbdev         = dev;
2213 
2214 	mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF);
2215 
2216 	ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
2217 	ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
2218 	ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
2219 
2220 	err = xen_net_read_mac(dev, np->mac);
2221 	if (err != 0)
2222 		goto error;
2223 
2224 	/* Set up ifnet structure */
2225 	ifp = np->xn_ifp = if_alloc(IFT_ETHER);
2226     	ifp->if_softc = np;
2227     	if_initname(ifp, "xn",  device_get_unit(dev));
2228     	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2229     	ifp->if_ioctl = xn_ioctl;
2230 
2231 	ifp->if_transmit = xn_txq_mq_start;
2232 	ifp->if_qflush = xn_qflush;
2233 
2234     	ifp->if_init = xn_ifinit;
2235 
2236     	ifp->if_hwassist = XN_CSUM_FEATURES;
2237 	/* Enable all supported features at device creation. */
2238 	ifp->if_capenable = ifp->if_capabilities =
2239 	    IFCAP_HWCSUM|IFCAP_TSO4|IFCAP_LRO;
2240 	ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2241 	ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS;
2242 	ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
2243 
2244     	ether_ifattach(ifp, np->mac);
2245 	netfront_carrier_off(np);
2246 
2247 	return (0);
2248 
2249 error:
2250 	KASSERT(err != 0, ("Error path with no error code specified"));
2251 	return (err);
2252 }
2253 
2254 static int
2255 netfront_detach(device_t dev)
2256 {
2257 	struct netfront_info *info = device_get_softc(dev);
2258 
2259 	DPRINTK("%s\n", xenbus_get_node(dev));
2260 
2261 	netif_free(info);
2262 
2263 	return 0;
2264 }
2265 
2266 static void
2267 netif_free(struct netfront_info *np)
2268 {
2269 
2270 	XN_LOCK(np);
2271 	xn_stop(np);
2272 	XN_UNLOCK(np);
2273 	netif_disconnect_backend(np);
2274 	ether_ifdetach(np->xn_ifp);
2275 	free(np->rxq, M_DEVBUF);
2276 	free(np->txq, M_DEVBUF);
2277 	if_free(np->xn_ifp);
2278 	np->xn_ifp = NULL;
2279 	ifmedia_removeall(&np->sc_media);
2280 }
2281 
2282 static void
2283 netif_disconnect_backend(struct netfront_info *np)
2284 {
2285 	u_int i;
2286 
2287 	for (i = 0; i < np->num_queues; i++) {
2288 		XN_RX_LOCK(&np->rxq[i]);
2289 		XN_TX_LOCK(&np->txq[i]);
2290 	}
2291 	netfront_carrier_off(np);
2292 	for (i = 0; i < np->num_queues; i++) {
2293 		XN_RX_UNLOCK(&np->rxq[i]);
2294 		XN_TX_UNLOCK(&np->txq[i]);
2295 	}
2296 
2297 	for (i = 0; i < np->num_queues; i++) {
2298 		disconnect_rxq(&np->rxq[i]);
2299 		disconnect_txq(&np->txq[i]);
2300 	}
2301 }
2302 
2303 static int
2304 xn_ifmedia_upd(struct ifnet *ifp)
2305 {
2306 
2307 	return (0);
2308 }
2309 
2310 static void
2311 xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2312 {
2313 
2314 	ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2315 	ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2316 }
2317 
2318 /* ** Driver registration ** */
2319 static device_method_t netfront_methods[] = {
2320 	/* Device interface */
2321 	DEVMETHOD(device_probe,         netfront_probe),
2322 	DEVMETHOD(device_attach,        netfront_attach),
2323 	DEVMETHOD(device_detach,        netfront_detach),
2324 	DEVMETHOD(device_shutdown,      bus_generic_shutdown),
2325 	DEVMETHOD(device_suspend,       netfront_suspend),
2326 	DEVMETHOD(device_resume,        netfront_resume),
2327 
2328 	/* Xenbus interface */
2329 	DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed),
2330 
2331 	DEVMETHOD_END
2332 };
2333 
2334 static driver_t netfront_driver = {
2335 	"xn",
2336 	netfront_methods,
2337 	sizeof(struct netfront_info),
2338 };
2339 devclass_t netfront_devclass;
2340 
2341 DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL,
2342     NULL);
2343