xref: /freebsd/sys/dev/xen/netfront/netfront.c (revision a4dc509f723944821bcfcc52005ff87c9a5dee5b)
1 /*-
2  * Copyright (c) 2004-2006 Kip Macy
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_inet.h"
31 #include "opt_inet6.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sockio.h>
36 #include <sys/limits.h>
37 #include <sys/mbuf.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/kernel.h>
41 #include <sys/socket.h>
42 #include <sys/sysctl.h>
43 #include <sys/queue.h>
44 #include <sys/lock.h>
45 #include <sys/sx.h>
46 
47 #include <net/if.h>
48 #include <net/if_var.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 
54 #include <net/bpf.h>
55 
56 #include <net/if_types.h>
57 
58 #include <netinet/in_systm.h>
59 #include <netinet/in.h>
60 #include <netinet/ip.h>
61 #include <netinet/if_ether.h>
62 #include <netinet/tcp.h>
63 #include <netinet/tcp_lro.h>
64 
65 #include <vm/vm.h>
66 #include <vm/pmap.h>
67 
68 #include <machine/clock.h>      /* for DELAY */
69 #include <machine/bus.h>
70 #include <machine/resource.h>
71 #include <machine/frame.h>
72 #include <machine/vmparam.h>
73 
74 #include <sys/bus.h>
75 #include <sys/rman.h>
76 
77 #include <machine/intr_machdep.h>
78 
79 #include <xen/xen-os.h>
80 #include <xen/hypervisor.h>
81 #include <xen/xen_intr.h>
82 #include <xen/gnttab.h>
83 #include <xen/interface/memory.h>
84 #include <xen/interface/io/netif.h>
85 #include <xen/xenbus/xenbusvar.h>
86 
87 #include "xenbus_if.h"
88 
89 /* Features supported by all backends.  TSO and LRO can be negotiated */
90 #define XN_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
91 
92 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
93 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
94 
95 /*
96  * Should the driver do LRO on the RX end
97  *  this can be toggled on the fly, but the
98  *  interface must be reset (down/up) for it
99  *  to take effect.
100  */
101 static int xn_enable_lro = 1;
102 TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro);
103 
104 /**
105  * \brief The maximum allowed data fragments in a single transmit
106  *        request.
107  *
108  * This limit is imposed by the backend driver.  We assume here that
109  * we are dealing with a Linux driver domain and have set our limit
110  * to mirror the Linux MAX_SKB_FRAGS constant.
111  */
112 #define	MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2)
113 
114 #define RX_COPY_THRESHOLD 256
115 
116 #define net_ratelimit() 0
117 
118 struct netfront_info;
119 struct netfront_rx_info;
120 
121 static void xn_txeof(struct netfront_info *);
122 static void xn_rxeof(struct netfront_info *);
123 static void network_alloc_rx_buffers(struct netfront_info *);
124 
125 static void xn_tick_locked(struct netfront_info *);
126 static void xn_tick(void *);
127 
128 static void xn_intr(void *);
129 static inline int xn_count_frags(struct mbuf *m);
130 static int  xn_assemble_tx_request(struct netfront_info *sc,
131 				   struct mbuf *m_head);
132 static void xn_start_locked(struct ifnet *);
133 static void xn_start(struct ifnet *);
134 static int  xn_ioctl(struct ifnet *, u_long, caddr_t);
135 static void xn_ifinit_locked(struct netfront_info *);
136 static void xn_ifinit(void *);
137 static void xn_stop(struct netfront_info *);
138 static void xn_query_features(struct netfront_info *np);
139 static int  xn_configure_features(struct netfront_info *np);
140 #ifdef notyet
141 static void xn_watchdog(struct ifnet *);
142 #endif
143 
144 #ifdef notyet
145 static void netfront_closing(device_t dev);
146 #endif
147 static void netif_free(struct netfront_info *info);
148 static int netfront_detach(device_t dev);
149 
150 static int talk_to_backend(device_t dev, struct netfront_info *info);
151 static int create_netdev(device_t dev);
152 static void netif_disconnect_backend(struct netfront_info *info);
153 static int setup_device(device_t dev, struct netfront_info *info);
154 static void free_ring(int *ref, void *ring_ptr_ref);
155 
156 static int  xn_ifmedia_upd(struct ifnet *ifp);
157 static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
158 
159 /* Xenolinux helper functions */
160 int network_connect(struct netfront_info *);
161 
162 static void xn_free_rx_ring(struct netfront_info *);
163 
164 static void xn_free_tx_ring(struct netfront_info *);
165 
166 static int xennet_get_responses(struct netfront_info *np,
167 	struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
168 	struct mbuf **list);
169 
170 #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT)
171 
172 #define INVALID_P2M_ENTRY (~0UL)
173 
174 /*
175  * Mbuf pointers. We need these to keep track of the virtual addresses
176  * of our mbuf chains since we can only convert from virtual to physical,
177  * not the other way around.  The size must track the free index arrays.
178  */
179 struct xn_chain_data {
180 	struct mbuf    *xn_tx_chain[NET_TX_RING_SIZE+1];
181 	int		xn_tx_chain_cnt;
182 	struct mbuf    *xn_rx_chain[NET_RX_RING_SIZE+1];
183 };
184 
185 struct netfront_stats
186 {
187 	u_long	rx_packets;		/* total packets received	*/
188 	u_long	tx_packets;		/* total packets transmitted	*/
189 	u_long	rx_bytes;		/* total bytes received 	*/
190 	u_long	tx_bytes;		/* total bytes transmitted	*/
191 	u_long	rx_errors;		/* bad packets received		*/
192 	u_long	tx_errors;		/* packet transmit problems	*/
193 };
194 
195 struct netfront_info {
196 	struct ifnet *xn_ifp;
197 	struct lro_ctrl xn_lro;
198 
199 	struct netfront_stats stats;
200 	u_int tx_full;
201 
202 	netif_tx_front_ring_t tx;
203 	netif_rx_front_ring_t rx;
204 
205 	struct mtx   tx_lock;
206 	struct mtx   rx_lock;
207 	struct mtx   sc_lock;
208 
209 	xen_intr_handle_t xen_intr_handle;
210 	u_int carrier;
211 	u_int maxfrags;
212 
213 	/* Receive-ring batched refills. */
214 #define RX_MIN_TARGET 32
215 #define RX_MAX_TARGET NET_RX_RING_SIZE
216 	int rx_min_target;
217 	int rx_max_target;
218 	int rx_target;
219 
220 	grant_ref_t gref_tx_head;
221 	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
222 	grant_ref_t gref_rx_head;
223 	grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
224 
225 	device_t		xbdev;
226 	int			tx_ring_ref;
227 	int			rx_ring_ref;
228 	uint8_t			mac[ETHER_ADDR_LEN];
229 	struct xn_chain_data	xn_cdata;	/* mbufs */
230 	struct mbufq		xn_rx_batch;	/* batch queue */
231 
232 	int			xn_if_flags;
233 	struct callout	        xn_stat_ch;
234 
235 	xen_pfn_t		rx_pfn_array[NET_RX_RING_SIZE];
236 	struct ifmedia		sc_media;
237 
238 	bool			xn_resume;
239 };
240 
241 #define rx_mbufs xn_cdata.xn_rx_chain
242 #define tx_mbufs xn_cdata.xn_tx_chain
243 
244 #define XN_RX_LOCK(_sc)           mtx_lock(&(_sc)->rx_lock)
245 #define XN_RX_UNLOCK(_sc)         mtx_unlock(&(_sc)->rx_lock)
246 
247 #define XN_TX_LOCK(_sc)           mtx_lock(&(_sc)->tx_lock)
248 #define XN_TX_UNLOCK(_sc)         mtx_unlock(&(_sc)->tx_lock)
249 
250 #define XN_LOCK(_sc)           mtx_lock(&(_sc)->sc_lock);
251 #define XN_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_lock);
252 
253 #define XN_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->sc_lock, MA_OWNED);
254 #define XN_RX_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->rx_lock, MA_OWNED);
255 #define XN_TX_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->tx_lock, MA_OWNED);
256 
257 struct netfront_rx_info {
258 	struct netif_rx_response rx;
259 	struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
260 };
261 
262 #define netfront_carrier_on(netif)	((netif)->carrier = 1)
263 #define netfront_carrier_off(netif)	((netif)->carrier = 0)
264 #define netfront_carrier_ok(netif)	((netif)->carrier)
265 
266 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
267 
268 static inline void
269 add_id_to_freelist(struct mbuf **list, uintptr_t id)
270 {
271 	KASSERT(id != 0,
272 		("%s: the head item (0) must always be free.", __func__));
273 	list[id] = list[0];
274 	list[0]  = (struct mbuf *)id;
275 }
276 
277 static inline unsigned short
278 get_id_from_freelist(struct mbuf **list)
279 {
280 	uintptr_t id;
281 
282 	id = (uintptr_t)list[0];
283 	KASSERT(id != 0,
284 		("%s: the head item (0) must always remain free.", __func__));
285 	list[0] = list[id];
286 	return (id);
287 }
288 
289 static inline int
290 xennet_rxidx(RING_IDX idx)
291 {
292 	return idx & (NET_RX_RING_SIZE - 1);
293 }
294 
295 static inline struct mbuf *
296 xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri)
297 {
298 	int i = xennet_rxidx(ri);
299 	struct mbuf *m;
300 
301 	m = np->rx_mbufs[i];
302 	np->rx_mbufs[i] = NULL;
303 	return (m);
304 }
305 
306 static inline grant_ref_t
307 xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri)
308 {
309 	int i = xennet_rxidx(ri);
310 	grant_ref_t ref = np->grant_rx_ref[i];
311 	KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n"));
312 	np->grant_rx_ref[i] = GRANT_REF_INVALID;
313 	return ref;
314 }
315 
316 #define IPRINTK(fmt, args...) \
317     printf("[XEN] " fmt, ##args)
318 #ifdef INVARIANTS
319 #define WPRINTK(fmt, args...) \
320     printf("[XEN] " fmt, ##args)
321 #else
322 #define WPRINTK(fmt, args...)
323 #endif
324 #ifdef DEBUG
325 #define DPRINTK(fmt, args...) \
326     printf("[XEN] %s: " fmt, __func__, ##args)
327 #else
328 #define DPRINTK(fmt, args...)
329 #endif
330 
331 /**
332  * Read the 'mac' node at the given device's node in the store, and parse that
333  * as colon-separated octets, placing result the given mac array.  mac must be
334  * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
335  * Return 0 on success, or errno on error.
336  */
337 static int
338 xen_net_read_mac(device_t dev, uint8_t mac[])
339 {
340 	int error, i;
341 	char *s, *e, *macstr;
342 	const char *path;
343 
344 	path = xenbus_get_node(dev);
345 	error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
346 	if (error == ENOENT) {
347 		/*
348 		 * Deal with missing mac XenStore nodes on devices with
349 		 * HVM emulation (the 'ioemu' configuration attribute)
350 		 * enabled.
351 		 *
352 		 * The HVM emulator may execute in a stub device model
353 		 * domain which lacks the permission, only given to Dom0,
354 		 * to update the guest's XenStore tree.  For this reason,
355 		 * the HVM emulator doesn't even attempt to write the
356 		 * front-side mac node, even when operating in Dom0.
357 		 * However, there should always be a mac listed in the
358 		 * backend tree.  Fallback to this version if our query
359 		 * of the front side XenStore location doesn't find
360 		 * anything.
361 		 */
362 		path = xenbus_get_otherend_path(dev);
363 		error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
364 	}
365 	if (error != 0) {
366 		xenbus_dev_fatal(dev, error, "parsing %s/mac", path);
367 		return (error);
368 	}
369 
370 	s = macstr;
371 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
372 		mac[i] = strtoul(s, &e, 16);
373 		if (s == e || (e[0] != ':' && e[0] != 0)) {
374 			free(macstr, M_XENBUS);
375 			return (ENOENT);
376 		}
377 		s = &e[1];
378 	}
379 	free(macstr, M_XENBUS);
380 	return (0);
381 }
382 
383 /**
384  * Entry point to this code when a new device is created.  Allocate the basic
385  * structures and the ring buffers for communication with the backend, and
386  * inform the backend of the appropriate details for those.  Switch to
387  * Connected state.
388  */
389 static int
390 netfront_probe(device_t dev)
391 {
392 
393 	if (xen_hvm_domain() && xen_disable_pv_nics != 0)
394 		return (ENXIO);
395 
396 	if (!strcmp(xenbus_get_type(dev), "vif")) {
397 		device_set_desc(dev, "Virtual Network Interface");
398 		return (0);
399 	}
400 
401 	return (ENXIO);
402 }
403 
404 static int
405 netfront_attach(device_t dev)
406 {
407 	int err;
408 
409 	err = create_netdev(dev);
410 	if (err) {
411 		xenbus_dev_fatal(dev, err, "creating netdev");
412 		return (err);
413 	}
414 
415 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
416 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
417 	    OID_AUTO, "enable_lro", CTLFLAG_RW,
418 	    &xn_enable_lro, 0, "Large Receive Offload");
419 
420 	return (0);
421 }
422 
423 static int
424 netfront_suspend(device_t dev)
425 {
426 	struct netfront_info *info = device_get_softc(dev);
427 
428 	XN_RX_LOCK(info);
429 	XN_TX_LOCK(info);
430 	netfront_carrier_off(info);
431 	XN_TX_UNLOCK(info);
432 	XN_RX_UNLOCK(info);
433 	return (0);
434 }
435 
436 /**
437  * We are reconnecting to the backend, due to a suspend/resume, or a backend
438  * driver restart.  We tear down our netif structure and recreate it, but
439  * leave the device-layer structures intact so that this is transparent to the
440  * rest of the kernel.
441  */
442 static int
443 netfront_resume(device_t dev)
444 {
445 	struct netfront_info *info = device_get_softc(dev);
446 
447 	info->xn_resume = true;
448 	netif_disconnect_backend(info);
449 	return (0);
450 }
451 
452 /* Common code used when first setting up, and when resuming. */
453 static int
454 talk_to_backend(device_t dev, struct netfront_info *info)
455 {
456 	const char *message;
457 	struct xs_transaction xst;
458 	const char *node = xenbus_get_node(dev);
459 	int err;
460 
461 	err = xen_net_read_mac(dev, info->mac);
462 	if (err) {
463 		xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
464 		goto out;
465 	}
466 
467 	/* Create shared ring, alloc event channel. */
468 	err = setup_device(dev, info);
469 	if (err)
470 		goto out;
471 
472  again:
473 	err = xs_transaction_start(&xst);
474 	if (err) {
475 		xenbus_dev_fatal(dev, err, "starting transaction");
476 		goto destroy_ring;
477 	}
478 	err = xs_printf(xst, node, "tx-ring-ref","%u",
479 			info->tx_ring_ref);
480 	if (err) {
481 		message = "writing tx ring-ref";
482 		goto abort_transaction;
483 	}
484 	err = xs_printf(xst, node, "rx-ring-ref","%u",
485 			info->rx_ring_ref);
486 	if (err) {
487 		message = "writing rx ring-ref";
488 		goto abort_transaction;
489 	}
490 	err = xs_printf(xst, node,
491 			"event-channel", "%u",
492 			xen_intr_port(info->xen_intr_handle));
493 	if (err) {
494 		message = "writing event-channel";
495 		goto abort_transaction;
496 	}
497 	err = xs_printf(xst, node, "request-rx-copy", "%u", 1);
498 	if (err) {
499 		message = "writing request-rx-copy";
500 		goto abort_transaction;
501 	}
502 	err = xs_printf(xst, node, "feature-rx-notify", "%d", 1);
503 	if (err) {
504 		message = "writing feature-rx-notify";
505 		goto abort_transaction;
506 	}
507 	err = xs_printf(xst, node, "feature-sg", "%d", 1);
508 	if (err) {
509 		message = "writing feature-sg";
510 		goto abort_transaction;
511 	}
512 	err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1);
513 	if (err) {
514 		message = "writing feature-gso-tcpv4";
515 		goto abort_transaction;
516 	}
517 
518 	err = xs_transaction_end(xst, 0);
519 	if (err) {
520 		if (err == EAGAIN)
521 			goto again;
522 		xenbus_dev_fatal(dev, err, "completing transaction");
523 		goto destroy_ring;
524 	}
525 
526 	return 0;
527 
528  abort_transaction:
529 	xs_transaction_end(xst, 1);
530 	xenbus_dev_fatal(dev, err, "%s", message);
531  destroy_ring:
532 	netif_free(info);
533  out:
534 	return err;
535 }
536 
537 static int
538 setup_device(device_t dev, struct netfront_info *info)
539 {
540 	netif_tx_sring_t *txs;
541 	netif_rx_sring_t *rxs;
542 	int error;
543 
544 	info->tx_ring_ref = GRANT_REF_INVALID;
545 	info->rx_ring_ref = GRANT_REF_INVALID;
546 	info->rx.sring = NULL;
547 	info->tx.sring = NULL;
548 
549 	txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
550 	if (!txs) {
551 		error = ENOMEM;
552 		xenbus_dev_fatal(dev, error, "allocating tx ring page");
553 		goto fail;
554 	}
555 	SHARED_RING_INIT(txs);
556 	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
557 	error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref);
558 	if (error)
559 		goto fail;
560 
561 	rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
562 	if (!rxs) {
563 		error = ENOMEM;
564 		xenbus_dev_fatal(dev, error, "allocating rx ring page");
565 		goto fail;
566 	}
567 	SHARED_RING_INIT(rxs);
568 	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
569 
570 	error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref);
571 	if (error)
572 		goto fail;
573 
574 	error = xen_intr_alloc_and_bind_local_port(dev,
575 	    xenbus_get_otherend_id(dev), /*filter*/NULL, xn_intr, info,
576 	    INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, &info->xen_intr_handle);
577 
578 	if (error) {
579 		xenbus_dev_fatal(dev, error,
580 				 "xen_intr_alloc_and_bind_local_port failed");
581 		goto fail;
582 	}
583 
584 	return (0);
585 
586  fail:
587 	netif_free(info);
588 	return (error);
589 }
590 
591 #ifdef INET
592 /**
593  * If this interface has an ipv4 address, send an arp for it. This
594  * helps to get the network going again after migrating hosts.
595  */
596 static void
597 netfront_send_fake_arp(device_t dev, struct netfront_info *info)
598 {
599 	struct ifnet *ifp;
600 	struct ifaddr *ifa;
601 
602 	ifp = info->xn_ifp;
603 	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
604 		if (ifa->ifa_addr->sa_family == AF_INET) {
605 			arp_ifinit(ifp, ifa);
606 		}
607 	}
608 }
609 #endif
610 
611 /**
612  * Callback received when the backend's state changes.
613  */
614 static void
615 netfront_backend_changed(device_t dev, XenbusState newstate)
616 {
617 	struct netfront_info *sc = device_get_softc(dev);
618 
619 	DPRINTK("newstate=%d\n", newstate);
620 
621 	switch (newstate) {
622 	case XenbusStateInitialising:
623 	case XenbusStateInitialised:
624 	case XenbusStateUnknown:
625 	case XenbusStateClosed:
626 	case XenbusStateReconfigured:
627 	case XenbusStateReconfiguring:
628 		break;
629 	case XenbusStateInitWait:
630 		if (xenbus_get_state(dev) != XenbusStateInitialising)
631 			break;
632 		if (network_connect(sc) != 0)
633 			break;
634 		xenbus_set_state(dev, XenbusStateConnected);
635 		break;
636 	case XenbusStateClosing:
637 		xenbus_set_state(dev, XenbusStateClosed);
638 		break;
639 	case XenbusStateConnected:
640 #ifdef INET
641 		netfront_send_fake_arp(dev, sc);
642 #endif
643 		break;
644 	}
645 }
646 
647 static void
648 xn_free_rx_ring(struct netfront_info *sc)
649 {
650 #if 0
651 	int i;
652 
653 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
654 		if (sc->xn_cdata.rx_mbufs[i] != NULL) {
655 			m_freem(sc->rx_mbufs[i]);
656 			sc->rx_mbufs[i] = NULL;
657 		}
658 	}
659 
660 	sc->rx.rsp_cons = 0;
661 	sc->xn_rx_if->req_prod = 0;
662 	sc->xn_rx_if->event = sc->rx.rsp_cons ;
663 #endif
664 }
665 
666 static void
667 xn_free_tx_ring(struct netfront_info *sc)
668 {
669 #if 0
670 	int i;
671 
672 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
673 		if (sc->tx_mbufs[i] != NULL) {
674 			m_freem(sc->tx_mbufs[i]);
675 			sc->xn_cdata.xn_tx_chain[i] = NULL;
676 		}
677 	}
678 
679 	return;
680 #endif
681 }
682 
683 /**
684  * \brief Verify that there is sufficient space in the Tx ring
685  *        buffer for a maximally sized request to be enqueued.
686  *
687  * A transmit request requires a transmit descriptor for each packet
688  * fragment, plus up to 2 entries for "options" (e.g. TSO).
689  */
690 static inline int
691 xn_tx_slot_available(struct netfront_info *np)
692 {
693 	return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2));
694 }
695 
696 static void
697 netif_release_tx_bufs(struct netfront_info *np)
698 {
699 	int i;
700 
701 	for (i = 1; i <= NET_TX_RING_SIZE; i++) {
702 		struct mbuf *m;
703 
704 		m = np->tx_mbufs[i];
705 
706 		/*
707 		 * We assume that no kernel addresses are
708 		 * less than NET_TX_RING_SIZE.  Any entry
709 		 * in the table that is below this number
710 		 * must be an index from free-list tracking.
711 		 */
712 		if (((uintptr_t)m) <= NET_TX_RING_SIZE)
713 			continue;
714 		gnttab_end_foreign_access_ref(np->grant_tx_ref[i]);
715 		gnttab_release_grant_reference(&np->gref_tx_head,
716 		    np->grant_tx_ref[i]);
717 		np->grant_tx_ref[i] = GRANT_REF_INVALID;
718 		add_id_to_freelist(np->tx_mbufs, i);
719 		np->xn_cdata.xn_tx_chain_cnt--;
720 		if (np->xn_cdata.xn_tx_chain_cnt < 0) {
721 			panic("%s: tx_chain_cnt must be >= 0", __func__);
722 		}
723 		m_free(m);
724 	}
725 }
726 
727 static void
728 network_alloc_rx_buffers(struct netfront_info *sc)
729 {
730 	int otherend_id = xenbus_get_otherend_id(sc->xbdev);
731 	unsigned short id;
732 	struct mbuf *m_new;
733 	int i, batch_target, notify;
734 	RING_IDX req_prod;
735 	grant_ref_t ref;
736 	netif_rx_request_t *req;
737 	vm_offset_t vaddr;
738 	u_long pfn;
739 
740 	req_prod = sc->rx.req_prod_pvt;
741 
742 	if (__predict_false(sc->carrier == 0))
743 		return;
744 
745 	/*
746 	 * Allocate mbufs greedily, even though we batch updates to the
747 	 * receive ring. This creates a less bursty demand on the memory
748 	 * allocator, and so should reduce the chance of failed allocation
749 	 * requests both for ourself and for other kernel subsystems.
750 	 *
751 	 * Here we attempt to maintain rx_target buffers in flight, counting
752 	 * buffers that we have yet to process in the receive ring.
753 	 */
754 	batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons);
755 	for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) {
756 		m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
757 		if (m_new == NULL) {
758 			if (i != 0)
759 				goto refill;
760 			/*
761 			 * XXX set timer
762 			 */
763 			break;
764 		}
765 		m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
766 
767 		/* queue the mbufs allocated */
768 		(void )mbufq_enqueue(&sc->xn_rx_batch, m_new);
769 	}
770 
771 	/*
772 	 * If we've allocated at least half of our target number of entries,
773 	 * submit them to the backend - we have enough to make the overhead
774 	 * of submission worthwhile.  Otherwise wait for more mbufs and
775 	 * request entries to become available.
776 	 */
777 	if (i < (sc->rx_target/2)) {
778 		if (req_prod >sc->rx.sring->req_prod)
779 			goto push;
780 		return;
781 	}
782 
783 	/*
784 	 * Double floating fill target if we risked having the backend
785 	 * run out of empty buffers for receive traffic.  We define "running
786 	 * low" as having less than a fourth of our target buffers free
787 	 * at the time we refilled the queue.
788 	 */
789 	if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) {
790 		sc->rx_target *= 2;
791 		if (sc->rx_target > sc->rx_max_target)
792 			sc->rx_target = sc->rx_max_target;
793 	}
794 
795 refill:
796 	for (i = 0; ; i++) {
797 		if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
798 			break;
799 
800 		m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)(
801 				vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT);
802 
803 		id = xennet_rxidx(req_prod + i);
804 
805 		KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain"));
806 		sc->rx_mbufs[id] = m_new;
807 
808 		ref = gnttab_claim_grant_reference(&sc->gref_rx_head);
809 		KASSERT(ref != GNTTAB_LIST_END,
810 			("reserved grant references exhuasted"));
811 		sc->grant_rx_ref[id] = ref;
812 
813 		vaddr = mtod(m_new, vm_offset_t);
814 		pfn = vtophys(vaddr) >> PAGE_SHIFT;
815 		req = RING_GET_REQUEST(&sc->rx, req_prod + i);
816 
817 		gnttab_grant_foreign_access_ref(ref, otherend_id, pfn, 0);
818 		req->id = id;
819 		req->gref = ref;
820 
821 		sc->rx_pfn_array[i] =
822 		    vtophys(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
823 	}
824 
825 	KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
826 	KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed"));
827 	/*
828 	 * We may have allocated buffers which have entries outstanding
829 	 * in the page * update queue -- make sure we flush those first!
830 	 */
831 	wmb();
832 
833 	/* Above is a suitable barrier to ensure backend will see requests. */
834 	sc->rx.req_prod_pvt = req_prod + i;
835 push:
836 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify);
837 	if (notify)
838 		xen_intr_signal(sc->xen_intr_handle);
839 }
840 
841 static void
842 xn_rxeof(struct netfront_info *np)
843 {
844 	struct ifnet *ifp;
845 #if (defined(INET) || defined(INET6))
846 	struct lro_ctrl *lro = &np->xn_lro;
847 	struct lro_entry *queued;
848 #endif
849 	struct netfront_rx_info rinfo;
850 	struct netif_rx_response *rx = &rinfo.rx;
851 	struct netif_extra_info *extras = rinfo.extras;
852 	RING_IDX i, rp;
853 	struct mbuf *m;
854 	struct mbufq rxq, errq;
855 	int err, work_to_do;
856 
857 	do {
858 		XN_RX_LOCK_ASSERT(np);
859 		if (!netfront_carrier_ok(np))
860 			return;
861 
862 		/* XXX: there should be some sane limit. */
863 		mbufq_init(&errq, INT_MAX);
864 		mbufq_init(&rxq, INT_MAX);
865 
866 		ifp = np->xn_ifp;
867 
868 		rp = np->rx.sring->rsp_prod;
869 		rmb();	/* Ensure we see queued responses up to 'rp'. */
870 
871 		i = np->rx.rsp_cons;
872 		while ((i != rp)) {
873 			memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
874 			memset(extras, 0, sizeof(rinfo.extras));
875 
876 			m = NULL;
877 			err = xennet_get_responses(np, &rinfo, rp, &i, &m);
878 
879 			if (__predict_false(err)) {
880 				if (m)
881 					(void )mbufq_enqueue(&errq, m);
882 				np->stats.rx_errors++;
883 				continue;
884 			}
885 
886 			m->m_pkthdr.rcvif = ifp;
887 			if ( rx->flags & NETRXF_data_validated ) {
888 				/* Tell the stack the checksums are okay */
889 				/*
890 				 * XXX this isn't necessarily the case - need to add
891 				 * check
892 				 */
893 
894 				m->m_pkthdr.csum_flags |=
895 					(CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID
896 					    | CSUM_PSEUDO_HDR);
897 				m->m_pkthdr.csum_data = 0xffff;
898 			}
899 
900 			np->stats.rx_packets++;
901 			np->stats.rx_bytes += m->m_pkthdr.len;
902 
903 			(void )mbufq_enqueue(&rxq, m);
904 			np->rx.rsp_cons = i;
905 		}
906 
907 		mbufq_drain(&errq);
908 
909 		/*
910 		 * Process all the mbufs after the remapping is complete.
911 		 * Break the mbuf chain first though.
912 		 */
913 		while ((m = mbufq_dequeue(&rxq)) != NULL) {
914 			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
915 
916 			/*
917 			 * Do we really need to drop the rx lock?
918 			 */
919 			XN_RX_UNLOCK(np);
920 #if (defined(INET) || defined(INET6))
921 			/* Use LRO if possible */
922 			if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
923 			    lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
924 				/*
925 				 * If LRO fails, pass up to the stack
926 				 * directly.
927 				 */
928 				(*ifp->if_input)(ifp, m);
929 			}
930 #else
931 			(*ifp->if_input)(ifp, m);
932 #endif
933 			XN_RX_LOCK(np);
934 		}
935 
936 		np->rx.rsp_cons = i;
937 
938 #if (defined(INET) || defined(INET6))
939 		/*
940 		 * Flush any outstanding LRO work
941 		 */
942 		while (!SLIST_EMPTY(&lro->lro_active)) {
943 			queued = SLIST_FIRST(&lro->lro_active);
944 			SLIST_REMOVE_HEAD(&lro->lro_active, next);
945 			tcp_lro_flush(lro, queued);
946 		}
947 #endif
948 
949 #if 0
950 		/* If we get a callback with very few responses, reduce fill target. */
951 		/* NB. Note exponential increase, linear decrease. */
952 		if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
953 			((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
954 			np->rx_target = np->rx_min_target;
955 #endif
956 
957 		network_alloc_rx_buffers(np);
958 
959 		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do);
960 	} while (work_to_do);
961 }
962 
963 static void
964 xn_txeof(struct netfront_info *np)
965 {
966 	RING_IDX i, prod;
967 	unsigned short id;
968 	struct ifnet *ifp;
969 	netif_tx_response_t *txr;
970 	struct mbuf *m;
971 
972 	XN_TX_LOCK_ASSERT(np);
973 
974 	if (!netfront_carrier_ok(np))
975 		return;
976 
977 	ifp = np->xn_ifp;
978 
979 	do {
980 		prod = np->tx.sring->rsp_prod;
981 		rmb(); /* Ensure we see responses up to 'rp'. */
982 
983 		for (i = np->tx.rsp_cons; i != prod; i++) {
984 			txr = RING_GET_RESPONSE(&np->tx, i);
985 			if (txr->status == NETIF_RSP_NULL)
986 				continue;
987 
988 			if (txr->status != NETIF_RSP_OKAY) {
989 				printf("%s: WARNING: response is %d!\n",
990 				       __func__, txr->status);
991 			}
992 			id = txr->id;
993 			m = np->tx_mbufs[id];
994 			KASSERT(m != NULL, ("mbuf not found in xn_tx_chain"));
995 			KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
996 				("mbuf already on the free list, but we're "
997 				"trying to free it again!"));
998 			M_ASSERTVALID(m);
999 
1000 			/*
1001 			 * Increment packet count if this is the last
1002 			 * mbuf of the chain.
1003 			 */
1004 			if (!m->m_next)
1005 				if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1006 			if (__predict_false(gnttab_query_foreign_access(
1007 			    np->grant_tx_ref[id]) != 0)) {
1008 				panic("%s: grant id %u still in use by the "
1009 				    "backend", __func__, id);
1010 			}
1011 			gnttab_end_foreign_access_ref(
1012 				np->grant_tx_ref[id]);
1013 			gnttab_release_grant_reference(
1014 				&np->gref_tx_head, np->grant_tx_ref[id]);
1015 			np->grant_tx_ref[id] = GRANT_REF_INVALID;
1016 
1017 			np->tx_mbufs[id] = NULL;
1018 			add_id_to_freelist(np->tx_mbufs, id);
1019 			np->xn_cdata.xn_tx_chain_cnt--;
1020 			m_free(m);
1021 			/* Only mark the queue active if we've freed up at least one slot to try */
1022 			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1023 		}
1024 		np->tx.rsp_cons = prod;
1025 
1026 		/*
1027 		 * Set a new event, then check for race with update of
1028 		 * tx_cons. Note that it is essential to schedule a
1029 		 * callback, no matter how few buffers are pending. Even if
1030 		 * there is space in the transmit ring, higher layers may
1031 		 * be blocked because too much data is outstanding: in such
1032 		 * cases notification from Xen is likely to be the only kick
1033 		 * that we'll get.
1034 		 */
1035 		np->tx.sring->rsp_event =
1036 		    prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
1037 
1038 		mb();
1039 	} while (prod != np->tx.sring->rsp_prod);
1040 
1041 	if (np->tx_full &&
1042 	    ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1043 		np->tx_full = 0;
1044 #if 0
1045 		if (np->user_state == UST_OPEN)
1046 			netif_wake_queue(dev);
1047 #endif
1048 	}
1049 }
1050 
1051 static void
1052 xn_intr(void *xsc)
1053 {
1054 	struct netfront_info *np = xsc;
1055 	struct ifnet *ifp = np->xn_ifp;
1056 
1057 #if 0
1058 	if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod &&
1059 	    likely(netfront_carrier_ok(np)) &&
1060 	    ifp->if_drv_flags & IFF_DRV_RUNNING))
1061 		return;
1062 #endif
1063 	if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) {
1064 		XN_TX_LOCK(np);
1065 		xn_txeof(np);
1066 		XN_TX_UNLOCK(np);
1067 	}
1068 
1069 	XN_RX_LOCK(np);
1070 	xn_rxeof(np);
1071 	XN_RX_UNLOCK(np);
1072 
1073 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1074 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1075 		xn_start(ifp);
1076 }
1077 
1078 static void
1079 xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m,
1080 	grant_ref_t ref)
1081 {
1082 	int new = xennet_rxidx(np->rx.req_prod_pvt);
1083 
1084 	KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL"));
1085 	np->rx_mbufs[new] = m;
1086 	np->grant_rx_ref[new] = ref;
1087 	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
1088 	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
1089 	np->rx.req_prod_pvt++;
1090 }
1091 
1092 static int
1093 xennet_get_extras(struct netfront_info *np,
1094     struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons)
1095 {
1096 	struct netif_extra_info *extra;
1097 
1098 	int err = 0;
1099 
1100 	do {
1101 		struct mbuf *m;
1102 		grant_ref_t ref;
1103 
1104 		if (__predict_false(*cons + 1 == rp)) {
1105 #if 0
1106 			if (net_ratelimit())
1107 				WPRINTK("Missing extra info\n");
1108 #endif
1109 			err = EINVAL;
1110 			break;
1111 		}
1112 
1113 		extra = (struct netif_extra_info *)
1114 		RING_GET_RESPONSE(&np->rx, ++(*cons));
1115 
1116 		if (__predict_false(!extra->type ||
1117 			extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1118 #if 0
1119 			if (net_ratelimit())
1120 				WPRINTK("Invalid extra type: %d\n",
1121 					extra->type);
1122 #endif
1123 			err = EINVAL;
1124 		} else {
1125 			memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1126 		}
1127 
1128 		m = xennet_get_rx_mbuf(np, *cons);
1129 		ref = xennet_get_rx_ref(np, *cons);
1130 		xennet_move_rx_slot(np, m, ref);
1131 	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1132 
1133 	return err;
1134 }
1135 
1136 static int
1137 xennet_get_responses(struct netfront_info *np,
1138 	struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
1139 	struct mbuf  **list)
1140 {
1141 	struct netif_rx_response *rx = &rinfo->rx;
1142 	struct netif_extra_info *extras = rinfo->extras;
1143 	struct mbuf *m, *m0, *m_prev;
1144 	grant_ref_t ref = xennet_get_rx_ref(np, *cons);
1145 	RING_IDX ref_cons = *cons;
1146 	int frags = 1;
1147 	int err = 0;
1148 	u_long ret;
1149 
1150 	m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons);
1151 
1152 	if (rx->flags & NETRXF_extra_info) {
1153 		err = xennet_get_extras(np, extras, rp, cons);
1154 	}
1155 
1156 	if (m0 != NULL) {
1157 		m0->m_pkthdr.len = 0;
1158 		m0->m_next = NULL;
1159 	}
1160 
1161 	for (;;) {
1162 #if 0
1163 		DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
1164 			rx->status, rx->offset, frags);
1165 #endif
1166 		if (__predict_false(rx->status < 0 ||
1167 			rx->offset + rx->status > PAGE_SIZE)) {
1168 
1169 #if 0
1170 			if (net_ratelimit())
1171 				WPRINTK("rx->offset: %x, size: %u\n",
1172 					rx->offset, rx->status);
1173 #endif
1174 			xennet_move_rx_slot(np, m, ref);
1175 			if (m0 == m)
1176 				m0 = NULL;
1177 			m = NULL;
1178 			err = EINVAL;
1179 			goto next_skip_queue;
1180 		}
1181 
1182 		/*
1183 		 * This definitely indicates a bug, either in this driver or in
1184 		 * the backend driver. In future this should flag the bad
1185 		 * situation to the system controller to reboot the backed.
1186 		 */
1187 		if (ref == GRANT_REF_INVALID) {
1188 
1189 #if 0
1190 			if (net_ratelimit())
1191 				WPRINTK("Bad rx response id %d.\n", rx->id);
1192 #endif
1193 			printf("%s: Bad rx response id %d.\n", __func__,rx->id);
1194 			err = EINVAL;
1195 			goto next;
1196 		}
1197 
1198 		ret = gnttab_end_foreign_access_ref(ref);
1199 		KASSERT(ret, ("Unable to end access to grant references"));
1200 
1201 		gnttab_release_grant_reference(&np->gref_rx_head, ref);
1202 
1203 next:
1204 		if (m == NULL)
1205 			break;
1206 
1207 		m->m_len = rx->status;
1208 		m->m_data += rx->offset;
1209 		m0->m_pkthdr.len += rx->status;
1210 
1211 next_skip_queue:
1212 		if (!(rx->flags & NETRXF_more_data))
1213 			break;
1214 
1215 		if (*cons + frags == rp) {
1216 			if (net_ratelimit())
1217 				WPRINTK("Need more frags\n");
1218 			err = ENOENT;
1219 			printf("%s: cons %u frags %u rp %u, not enough frags\n",
1220 			       __func__, *cons, frags, rp);
1221 			break;
1222 		}
1223 		/*
1224 		 * Note that m can be NULL, if rx->status < 0 or if
1225 		 * rx->offset + rx->status > PAGE_SIZE above.
1226 		 */
1227 		m_prev = m;
1228 
1229 		rx = RING_GET_RESPONSE(&np->rx, *cons + frags);
1230 		m = xennet_get_rx_mbuf(np, *cons + frags);
1231 
1232 		/*
1233 		 * m_prev == NULL can happen if rx->status < 0 or if
1234 		 * rx->offset + * rx->status > PAGE_SIZE above.
1235 		 */
1236 		if (m_prev != NULL)
1237 			m_prev->m_next = m;
1238 
1239 		/*
1240 		 * m0 can be NULL if rx->status < 0 or if * rx->offset +
1241 		 * rx->status > PAGE_SIZE above.
1242 		 */
1243 		if (m0 == NULL)
1244 			m0 = m;
1245 		m->m_next = NULL;
1246 		ref = xennet_get_rx_ref(np, *cons + frags);
1247 		ref_cons = *cons + frags;
1248 		frags++;
1249 	}
1250 	*list = m0;
1251 	*cons += frags;
1252 
1253 	return (err);
1254 }
1255 
1256 static void
1257 xn_tick_locked(struct netfront_info *sc)
1258 {
1259 	XN_RX_LOCK_ASSERT(sc);
1260 	callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1261 
1262 	/* XXX placeholder for printing debug information */
1263 }
1264 
1265 static void
1266 xn_tick(void *xsc)
1267 {
1268 	struct netfront_info *sc;
1269 
1270 	sc = xsc;
1271 	XN_RX_LOCK(sc);
1272 	xn_tick_locked(sc);
1273 	XN_RX_UNLOCK(sc);
1274 }
1275 
1276 /**
1277  * \brief Count the number of fragments in an mbuf chain.
1278  *
1279  * Surprisingly, there isn't an M* macro for this.
1280  */
1281 static inline int
1282 xn_count_frags(struct mbuf *m)
1283 {
1284 	int nfrags;
1285 
1286 	for (nfrags = 0; m != NULL; m = m->m_next)
1287 		nfrags++;
1288 
1289 	return (nfrags);
1290 }
1291 
1292 /**
1293  * Given an mbuf chain, make sure we have enough room and then push
1294  * it onto the transmit ring.
1295  */
1296 static int
1297 xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head)
1298 {
1299 	struct ifnet *ifp;
1300 	struct mbuf *m;
1301 	u_int nfrags;
1302 	int otherend_id;
1303 
1304 	ifp = sc->xn_ifp;
1305 
1306 	/**
1307 	 * Defragment the mbuf if necessary.
1308 	 */
1309 	nfrags = xn_count_frags(m_head);
1310 
1311 	/*
1312 	 * Check to see whether this request is longer than netback
1313 	 * can handle, and try to defrag it.
1314 	 */
1315 	/**
1316 	 * It is a bit lame, but the netback driver in Linux can't
1317 	 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of
1318 	 * the Linux network stack.
1319 	 */
1320 	if (nfrags > sc->maxfrags) {
1321 		m = m_defrag(m_head, M_NOWAIT);
1322 		if (!m) {
1323 			/*
1324 			 * Defrag failed, so free the mbuf and
1325 			 * therefore drop the packet.
1326 			 */
1327 			m_freem(m_head);
1328 			return (EMSGSIZE);
1329 		}
1330 		m_head = m;
1331 	}
1332 
1333 	/* Determine how many fragments now exist */
1334 	nfrags = xn_count_frags(m_head);
1335 
1336 	/*
1337 	 * Check to see whether the defragmented packet has too many
1338 	 * segments for the Linux netback driver.
1339 	 */
1340 	/**
1341 	 * The FreeBSD TCP stack, with TSO enabled, can produce a chain
1342 	 * of mbufs longer than Linux can handle.  Make sure we don't
1343 	 * pass a too-long chain over to the other side by dropping the
1344 	 * packet.  It doesn't look like there is currently a way to
1345 	 * tell the TCP stack to generate a shorter chain of packets.
1346 	 */
1347 	if (nfrags > MAX_TX_REQ_FRAGS) {
1348 #ifdef DEBUG
1349 		printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback "
1350 		       "won't be able to handle it, dropping\n",
1351 		       __func__, nfrags, MAX_TX_REQ_FRAGS);
1352 #endif
1353 		m_freem(m_head);
1354 		return (EMSGSIZE);
1355 	}
1356 
1357 	/*
1358 	 * This check should be redundant.  We've already verified that we
1359 	 * have enough slots in the ring to handle a packet of maximum
1360 	 * size, and that our packet is less than the maximum size.  Keep
1361 	 * it in here as an assert for now just to make certain that
1362 	 * xn_tx_chain_cnt is accurate.
1363 	 */
1364 	KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE,
1365 		("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE "
1366 		 "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt,
1367                     (int) nfrags, (int) NET_TX_RING_SIZE));
1368 
1369 	/*
1370 	 * Start packing the mbufs in this chain into
1371 	 * the fragment pointers. Stop when we run out
1372 	 * of fragments or hit the end of the mbuf chain.
1373 	 */
1374 	m = m_head;
1375 	otherend_id = xenbus_get_otherend_id(sc->xbdev);
1376 	for (m = m_head; m; m = m->m_next) {
1377 		netif_tx_request_t *tx;
1378 		uintptr_t id;
1379 		grant_ref_t ref;
1380 		u_long mfn; /* XXX Wrong type? */
1381 
1382 		tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt);
1383 		id = get_id_from_freelist(sc->tx_mbufs);
1384 		if (id == 0)
1385 			panic("%s: was allocated the freelist head!\n",
1386 			    __func__);
1387 		sc->xn_cdata.xn_tx_chain_cnt++;
1388 		if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE)
1389 			panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n",
1390 			    __func__);
1391 		sc->tx_mbufs[id] = m;
1392 		tx->id = id;
1393 		ref = gnttab_claim_grant_reference(&sc->gref_tx_head);
1394 		KASSERT((short)ref >= 0, ("Negative ref"));
1395 		mfn = virt_to_mfn(mtod(m, vm_offset_t));
1396 		gnttab_grant_foreign_access_ref(ref, otherend_id,
1397 		    mfn, GNTMAP_readonly);
1398 		tx->gref = sc->grant_tx_ref[id] = ref;
1399 		tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1);
1400 		tx->flags = 0;
1401 		if (m == m_head) {
1402 			/*
1403 			 * The first fragment has the entire packet
1404 			 * size, subsequent fragments have just the
1405 			 * fragment size. The backend works out the
1406 			 * true size of the first fragment by
1407 			 * subtracting the sizes of the other
1408 			 * fragments.
1409 			 */
1410 			tx->size = m->m_pkthdr.len;
1411 
1412 			/*
1413 			 * The first fragment contains the checksum flags
1414 			 * and is optionally followed by extra data for
1415 			 * TSO etc.
1416 			 */
1417 			/**
1418 			 * CSUM_TSO requires checksum offloading.
1419 			 * Some versions of FreeBSD fail to
1420 			 * set CSUM_TCP in the CSUM_TSO case,
1421 			 * so we have to test for CSUM_TSO
1422 			 * explicitly.
1423 			 */
1424 			if (m->m_pkthdr.csum_flags
1425 			    & (CSUM_DELAY_DATA | CSUM_TSO)) {
1426 				tx->flags |= (NETTXF_csum_blank
1427 				    | NETTXF_data_validated);
1428 			}
1429 			if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1430 				struct netif_extra_info *gso =
1431 					(struct netif_extra_info *)
1432 					RING_GET_REQUEST(&sc->tx,
1433 							 ++sc->tx.req_prod_pvt);
1434 
1435 				tx->flags |= NETTXF_extra_info;
1436 
1437 				gso->u.gso.size = m->m_pkthdr.tso_segsz;
1438 				gso->u.gso.type =
1439 					XEN_NETIF_GSO_TYPE_TCPV4;
1440 				gso->u.gso.pad = 0;
1441 				gso->u.gso.features = 0;
1442 
1443 				gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1444 				gso->flags = 0;
1445 			}
1446 		} else {
1447 			tx->size = m->m_len;
1448 		}
1449 		if (m->m_next)
1450 			tx->flags |= NETTXF_more_data;
1451 
1452 		sc->tx.req_prod_pvt++;
1453 	}
1454 	BPF_MTAP(ifp, m_head);
1455 
1456 	sc->stats.tx_bytes += m_head->m_pkthdr.len;
1457 	sc->stats.tx_packets++;
1458 
1459 	return (0);
1460 }
1461 
1462 static void
1463 xn_start_locked(struct ifnet *ifp)
1464 {
1465 	struct netfront_info *sc;
1466 	struct mbuf *m_head;
1467 	int notify;
1468 
1469 	sc = ifp->if_softc;
1470 
1471 	if (!netfront_carrier_ok(sc))
1472 		return;
1473 
1474 	/*
1475 	 * While we have enough transmit slots available for at least one
1476 	 * maximum-sized packet, pull mbufs off the queue and put them on
1477 	 * the transmit ring.
1478 	 */
1479 	while (xn_tx_slot_available(sc)) {
1480 		IF_DEQUEUE(&ifp->if_snd, m_head);
1481 		if (m_head == NULL)
1482 			break;
1483 
1484 		if (xn_assemble_tx_request(sc, m_head) != 0)
1485 			break;
1486 	}
1487 
1488 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify);
1489 	if (notify)
1490 		xen_intr_signal(sc->xen_intr_handle);
1491 
1492 	if (RING_FULL(&sc->tx)) {
1493 		sc->tx_full = 1;
1494 #if 0
1495 		netif_stop_queue(dev);
1496 #endif
1497 	}
1498 }
1499 
1500 static void
1501 xn_start(struct ifnet *ifp)
1502 {
1503 	struct netfront_info *sc;
1504 	sc = ifp->if_softc;
1505 	XN_TX_LOCK(sc);
1506 	xn_start_locked(ifp);
1507 	XN_TX_UNLOCK(sc);
1508 }
1509 
1510 /* equivalent of network_open() in Linux */
1511 static void
1512 xn_ifinit_locked(struct netfront_info *sc)
1513 {
1514 	struct ifnet *ifp;
1515 
1516 	XN_LOCK_ASSERT(sc);
1517 
1518 	ifp = sc->xn_ifp;
1519 
1520 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1521 		return;
1522 
1523 	xn_stop(sc);
1524 
1525 	network_alloc_rx_buffers(sc);
1526 	sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1;
1527 
1528 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1529 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1530 	if_link_state_change(ifp, LINK_STATE_UP);
1531 
1532 	callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1533 }
1534 
1535 static void
1536 xn_ifinit(void *xsc)
1537 {
1538 	struct netfront_info *sc = xsc;
1539 
1540 	XN_LOCK(sc);
1541 	xn_ifinit_locked(sc);
1542 	XN_UNLOCK(sc);
1543 }
1544 
1545 static int
1546 xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1547 {
1548 	struct netfront_info *sc = ifp->if_softc;
1549 	struct ifreq *ifr = (struct ifreq *) data;
1550 #ifdef INET
1551 	struct ifaddr *ifa = (struct ifaddr *)data;
1552 #endif
1553 
1554 	int mask, error = 0;
1555 	switch(cmd) {
1556 	case SIOCSIFADDR:
1557 #ifdef INET
1558 		XN_LOCK(sc);
1559 		if (ifa->ifa_addr->sa_family == AF_INET) {
1560 			ifp->if_flags |= IFF_UP;
1561 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1562 				xn_ifinit_locked(sc);
1563 			arp_ifinit(ifp, ifa);
1564 			XN_UNLOCK(sc);
1565 		} else {
1566 			XN_UNLOCK(sc);
1567 #endif
1568 			error = ether_ioctl(ifp, cmd, data);
1569 #ifdef INET
1570 		}
1571 #endif
1572 		break;
1573 	case SIOCSIFMTU:
1574 		/* XXX can we alter the MTU on a VN ?*/
1575 #ifdef notyet
1576 		if (ifr->ifr_mtu > XN_JUMBO_MTU)
1577 			error = EINVAL;
1578 		else
1579 #endif
1580 		{
1581 			ifp->if_mtu = ifr->ifr_mtu;
1582 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1583 			xn_ifinit(sc);
1584 		}
1585 		break;
1586 	case SIOCSIFFLAGS:
1587 		XN_LOCK(sc);
1588 		if (ifp->if_flags & IFF_UP) {
1589 			/*
1590 			 * If only the state of the PROMISC flag changed,
1591 			 * then just use the 'set promisc mode' command
1592 			 * instead of reinitializing the entire NIC. Doing
1593 			 * a full re-init means reloading the firmware and
1594 			 * waiting for it to start up, which may take a
1595 			 * second or two.
1596 			 */
1597 #ifdef notyet
1598 			/* No promiscuous mode with Xen */
1599 			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1600 			    ifp->if_flags & IFF_PROMISC &&
1601 			    !(sc->xn_if_flags & IFF_PROMISC)) {
1602 				XN_SETBIT(sc, XN_RX_MODE,
1603 					  XN_RXMODE_RX_PROMISC);
1604 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1605 				   !(ifp->if_flags & IFF_PROMISC) &&
1606 				   sc->xn_if_flags & IFF_PROMISC) {
1607 				XN_CLRBIT(sc, XN_RX_MODE,
1608 					  XN_RXMODE_RX_PROMISC);
1609 			} else
1610 #endif
1611 				xn_ifinit_locked(sc);
1612 		} else {
1613 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1614 				xn_stop(sc);
1615 			}
1616 		}
1617 		sc->xn_if_flags = ifp->if_flags;
1618 		XN_UNLOCK(sc);
1619 		error = 0;
1620 		break;
1621 	case SIOCSIFCAP:
1622 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1623 		if (mask & IFCAP_TXCSUM) {
1624 			if (IFCAP_TXCSUM & ifp->if_capenable) {
1625 				ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
1626 				ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
1627 				    | CSUM_IP | CSUM_TSO);
1628 			} else {
1629 				ifp->if_capenable |= IFCAP_TXCSUM;
1630 				ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
1631 				    | CSUM_IP);
1632 			}
1633 		}
1634 		if (mask & IFCAP_RXCSUM) {
1635 			ifp->if_capenable ^= IFCAP_RXCSUM;
1636 		}
1637 		if (mask & IFCAP_TSO4) {
1638 			if (IFCAP_TSO4 & ifp->if_capenable) {
1639 				ifp->if_capenable &= ~IFCAP_TSO4;
1640 				ifp->if_hwassist &= ~CSUM_TSO;
1641 			} else if (IFCAP_TXCSUM & ifp->if_capenable) {
1642 				ifp->if_capenable |= IFCAP_TSO4;
1643 				ifp->if_hwassist |= CSUM_TSO;
1644 			} else {
1645 				IPRINTK("Xen requires tx checksum offload"
1646 				    " be enabled to use TSO\n");
1647 				error = EINVAL;
1648 			}
1649 		}
1650 		if (mask & IFCAP_LRO) {
1651 			ifp->if_capenable ^= IFCAP_LRO;
1652 
1653 		}
1654 		error = 0;
1655 		break;
1656 	case SIOCADDMULTI:
1657 	case SIOCDELMULTI:
1658 #ifdef notyet
1659 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1660 			XN_LOCK(sc);
1661 			xn_setmulti(sc);
1662 			XN_UNLOCK(sc);
1663 			error = 0;
1664 		}
1665 #endif
1666 		/* FALLTHROUGH */
1667 	case SIOCSIFMEDIA:
1668 	case SIOCGIFMEDIA:
1669 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1670 		break;
1671 	default:
1672 		error = ether_ioctl(ifp, cmd, data);
1673 	}
1674 
1675 	return (error);
1676 }
1677 
1678 static void
1679 xn_stop(struct netfront_info *sc)
1680 {
1681 	struct ifnet *ifp;
1682 
1683 	XN_LOCK_ASSERT(sc);
1684 
1685 	ifp = sc->xn_ifp;
1686 
1687 	callout_stop(&sc->xn_stat_ch);
1688 
1689 	xn_free_rx_ring(sc);
1690 	xn_free_tx_ring(sc);
1691 
1692 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1693 	if_link_state_change(ifp, LINK_STATE_DOWN);
1694 }
1695 
1696 /* START of Xenolinux helper functions adapted to FreeBSD */
1697 int
1698 network_connect(struct netfront_info *np)
1699 {
1700 	int i, requeue_idx, error;
1701 	grant_ref_t ref;
1702 	netif_rx_request_t *req;
1703 	u_int feature_rx_copy;
1704 
1705 	error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1706 	    "feature-rx-copy", NULL, "%u", &feature_rx_copy);
1707 	if (error)
1708 		feature_rx_copy = 0;
1709 
1710 	/* We only support rx copy. */
1711 	if (!feature_rx_copy)
1712 		return (EPROTONOSUPPORT);
1713 
1714 	/* Recovery procedure: */
1715 	error = talk_to_backend(np->xbdev, np);
1716 	if (error)
1717 		return (error);
1718 
1719 	/* Step 1: Reinitialise variables. */
1720 	xn_query_features(np);
1721 	xn_configure_features(np);
1722 	netif_release_tx_bufs(np);
1723 
1724 	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1725 	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1726 		struct mbuf *m;
1727 		u_long pfn;
1728 
1729 		if (np->rx_mbufs[i] == NULL)
1730 			continue;
1731 
1732 		m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i);
1733 		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1734 
1735 		req = RING_GET_REQUEST(&np->rx, requeue_idx);
1736 		pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
1737 
1738 		gnttab_grant_foreign_access_ref(ref,
1739 		    xenbus_get_otherend_id(np->xbdev),
1740 		    pfn, 0);
1741 
1742 		req->gref = ref;
1743 		req->id   = requeue_idx;
1744 
1745 		requeue_idx++;
1746 	}
1747 
1748 	np->rx.req_prod_pvt = requeue_idx;
1749 
1750 	/* Step 3: All public and private state should now be sane.  Get
1751 	 * ready to start sending and receiving packets and give the driver
1752 	 * domain a kick because we've probably just requeued some
1753 	 * packets.
1754 	 */
1755 	netfront_carrier_on(np);
1756 	xen_intr_signal(np->xen_intr_handle);
1757 	XN_TX_LOCK(np);
1758 	xn_txeof(np);
1759 	XN_TX_UNLOCK(np);
1760 	network_alloc_rx_buffers(np);
1761 
1762 	return (0);
1763 }
1764 
1765 static void
1766 xn_query_features(struct netfront_info *np)
1767 {
1768 	int val;
1769 
1770 	device_printf(np->xbdev, "backend features:");
1771 
1772 	if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1773 		"feature-sg", NULL, "%d", &val) < 0)
1774 		val = 0;
1775 
1776 	np->maxfrags = 1;
1777 	if (val) {
1778 		np->maxfrags = MAX_TX_REQ_FRAGS;
1779 		printf(" feature-sg");
1780 	}
1781 
1782 	if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1783 		"feature-gso-tcpv4", NULL, "%d", &val) < 0)
1784 		val = 0;
1785 
1786 	np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO);
1787 	if (val) {
1788 		np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO;
1789 		printf(" feature-gso-tcp4");
1790 	}
1791 
1792 	printf("\n");
1793 }
1794 
1795 static int
1796 xn_configure_features(struct netfront_info *np)
1797 {
1798 	int err, cap_enabled;
1799 
1800 	err = 0;
1801 
1802 	if (np->xn_resume &&
1803 	    ((np->xn_ifp->if_capenable & np->xn_ifp->if_capabilities)
1804 	    == np->xn_ifp->if_capenable)) {
1805 		/* Current options are available, no need to do anything. */
1806 		return (0);
1807 	}
1808 
1809 	/* Try to preserve as many options as possible. */
1810 	if (np->xn_resume)
1811 		cap_enabled = np->xn_ifp->if_capenable;
1812 	else
1813 		cap_enabled = UINT_MAX;
1814 
1815 #if (defined(INET) || defined(INET6))
1816 	if ((np->xn_ifp->if_capenable & IFCAP_LRO) == (cap_enabled & IFCAP_LRO))
1817 		tcp_lro_free(&np->xn_lro);
1818 #endif
1819     	np->xn_ifp->if_capenable =
1820 	    np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4) & cap_enabled;
1821 	np->xn_ifp->if_hwassist &= ~CSUM_TSO;
1822 #if (defined(INET) || defined(INET6))
1823 	if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) ==
1824 	    (cap_enabled & IFCAP_LRO)) {
1825 		err = tcp_lro_init(&np->xn_lro);
1826 		if (err) {
1827 			device_printf(np->xbdev, "LRO initialization failed\n");
1828 		} else {
1829 			np->xn_lro.ifp = np->xn_ifp;
1830 			np->xn_ifp->if_capenable |= IFCAP_LRO;
1831 		}
1832 	}
1833 	if ((np->xn_ifp->if_capabilities & IFCAP_TSO4) ==
1834 	    (cap_enabled & IFCAP_TSO4)) {
1835 		np->xn_ifp->if_capenable |= IFCAP_TSO4;
1836 		np->xn_ifp->if_hwassist |= CSUM_TSO;
1837 	}
1838 #endif
1839 	return (err);
1840 }
1841 
1842 /**
1843  * Create a network device.
1844  * @param dev  Newbus device representing this virtual NIC.
1845  */
1846 int
1847 create_netdev(device_t dev)
1848 {
1849 	int i;
1850 	struct netfront_info *np;
1851 	int err;
1852 	struct ifnet *ifp;
1853 
1854 	np = device_get_softc(dev);
1855 
1856 	np->xbdev         = dev;
1857 
1858 	mtx_init(&np->tx_lock, "xntx", "netfront transmit lock", MTX_DEF);
1859 	mtx_init(&np->rx_lock, "xnrx", "netfront receive lock", MTX_DEF);
1860 	mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF);
1861 
1862 	ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
1863 	ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
1864 	ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
1865 
1866 	np->rx_target     = RX_MIN_TARGET;
1867 	np->rx_min_target = RX_MIN_TARGET;
1868 	np->rx_max_target = RX_MAX_TARGET;
1869 
1870 	/* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
1871 	for (i = 0; i <= NET_TX_RING_SIZE; i++) {
1872 		np->tx_mbufs[i] = (void *) ((u_long) i+1);
1873 		np->grant_tx_ref[i] = GRANT_REF_INVALID;
1874 	}
1875 	np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0;
1876 
1877 	for (i = 0; i <= NET_RX_RING_SIZE; i++) {
1878 
1879 		np->rx_mbufs[i] = NULL;
1880 		np->grant_rx_ref[i] = GRANT_REF_INVALID;
1881 	}
1882 
1883 	mbufq_init(&np->xn_rx_batch, INT_MAX);
1884 
1885 	/* A grant for every tx ring slot */
1886 	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1887 					  &np->gref_tx_head) != 0) {
1888 		IPRINTK("#### netfront can't alloc tx grant refs\n");
1889 		err = ENOMEM;
1890 		goto error;
1891 	}
1892 	/* A grant for every rx ring slot */
1893 	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1894 					  &np->gref_rx_head) != 0) {
1895 		WPRINTK("#### netfront can't alloc rx grant refs\n");
1896 		gnttab_free_grant_references(np->gref_tx_head);
1897 		err = ENOMEM;
1898 		goto error;
1899 	}
1900 
1901 	err = xen_net_read_mac(dev, np->mac);
1902 	if (err) {
1903 		gnttab_free_grant_references(np->gref_rx_head);
1904 		gnttab_free_grant_references(np->gref_tx_head);
1905 		goto error;
1906 	}
1907 
1908 	/* Set up ifnet structure */
1909 	ifp = np->xn_ifp = if_alloc(IFT_ETHER);
1910     	ifp->if_softc = np;
1911     	if_initname(ifp, "xn",  device_get_unit(dev));
1912     	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1913     	ifp->if_ioctl = xn_ioctl;
1914     	ifp->if_start = xn_start;
1915 #ifdef notyet
1916     	ifp->if_watchdog = xn_watchdog;
1917 #endif
1918     	ifp->if_init = xn_ifinit;
1919     	ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1;
1920 
1921     	ifp->if_hwassist = XN_CSUM_FEATURES;
1922     	ifp->if_capabilities = IFCAP_HWCSUM;
1923 	ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1924 	ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS;
1925 	ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1926 
1927     	ether_ifattach(ifp, np->mac);
1928     	callout_init(&np->xn_stat_ch, 1);
1929 	netfront_carrier_off(np);
1930 
1931 	return (0);
1932 
1933 error:
1934 	KASSERT(err != 0, ("Error path with no error code specified"));
1935 	return (err);
1936 }
1937 
1938 /**
1939  * Handle the change of state of the backend to Closing.  We must delete our
1940  * device-layer structures now, to ensure that writes are flushed through to
1941  * the backend.  Once is this done, we can switch to Closed in
1942  * acknowledgement.
1943  */
1944 #if 0
1945 static void
1946 netfront_closing(device_t dev)
1947 {
1948 #if 0
1949 	struct netfront_info *info = dev->dev_driver_data;
1950 
1951 	DPRINTK("netfront_closing: %s removed\n", dev->nodename);
1952 
1953 	close_netdev(info);
1954 #endif
1955 	xenbus_switch_state(dev, XenbusStateClosed);
1956 }
1957 #endif
1958 
1959 static int
1960 netfront_detach(device_t dev)
1961 {
1962 	struct netfront_info *info = device_get_softc(dev);
1963 
1964 	DPRINTK("%s\n", xenbus_get_node(dev));
1965 
1966 	netif_free(info);
1967 
1968 	return 0;
1969 }
1970 
1971 static void
1972 netif_free(struct netfront_info *info)
1973 {
1974 	XN_LOCK(info);
1975 	xn_stop(info);
1976 	XN_UNLOCK(info);
1977 	callout_drain(&info->xn_stat_ch);
1978 	netif_disconnect_backend(info);
1979 	if (info->xn_ifp != NULL) {
1980 		ether_ifdetach(info->xn_ifp);
1981 		if_free(info->xn_ifp);
1982 		info->xn_ifp = NULL;
1983 	}
1984 	ifmedia_removeall(&info->sc_media);
1985 }
1986 
1987 static void
1988 netif_disconnect_backend(struct netfront_info *info)
1989 {
1990 	XN_RX_LOCK(info);
1991 	XN_TX_LOCK(info);
1992 	netfront_carrier_off(info);
1993 	XN_TX_UNLOCK(info);
1994 	XN_RX_UNLOCK(info);
1995 
1996 	free_ring(&info->tx_ring_ref, &info->tx.sring);
1997 	free_ring(&info->rx_ring_ref, &info->rx.sring);
1998 
1999 	xen_intr_unbind(&info->xen_intr_handle);
2000 }
2001 
2002 static void
2003 free_ring(int *ref, void *ring_ptr_ref)
2004 {
2005 	void **ring_ptr_ptr = ring_ptr_ref;
2006 
2007 	if (*ref != GRANT_REF_INVALID) {
2008 		/* This API frees the associated storage. */
2009 		gnttab_end_foreign_access(*ref, *ring_ptr_ptr);
2010 		*ref = GRANT_REF_INVALID;
2011 	}
2012 	*ring_ptr_ptr = NULL;
2013 }
2014 
2015 static int
2016 xn_ifmedia_upd(struct ifnet *ifp)
2017 {
2018 	return (0);
2019 }
2020 
2021 static void
2022 xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2023 {
2024 	ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2025 	ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2026 }
2027 
2028 /* ** Driver registration ** */
2029 static device_method_t netfront_methods[] = {
2030 	/* Device interface */
2031 	DEVMETHOD(device_probe,         netfront_probe),
2032 	DEVMETHOD(device_attach,        netfront_attach),
2033 	DEVMETHOD(device_detach,        netfront_detach),
2034 	DEVMETHOD(device_shutdown,      bus_generic_shutdown),
2035 	DEVMETHOD(device_suspend,       netfront_suspend),
2036 	DEVMETHOD(device_resume,        netfront_resume),
2037 
2038 	/* Xenbus interface */
2039 	DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed),
2040 
2041 	DEVMETHOD_END
2042 };
2043 
2044 static driver_t netfront_driver = {
2045 	"xn",
2046 	netfront_methods,
2047 	sizeof(struct netfront_info),
2048 };
2049 devclass_t netfront_devclass;
2050 
2051 DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL,
2052     NULL);
2053