xref: /freebsd/sys/dev/xen/netfront/netfront.c (revision 64de80195bba295c961a4cdf96dbe0e4979bdf2a)
1 /*-
2  * Copyright (c) 2004-2006 Kip Macy
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_inet.h"
31 #include "opt_inet6.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sockio.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/kernel.h>
40 #include <sys/socket.h>
41 #include <sys/sysctl.h>
42 #include <sys/queue.h>
43 #include <sys/lock.h>
44 #include <sys/sx.h>
45 
46 #include <net/if.h>
47 #include <net/if_var.h>
48 #include <net/if_arp.h>
49 #include <net/ethernet.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 
53 #include <net/bpf.h>
54 
55 #include <net/if_types.h>
56 
57 #include <netinet/in_systm.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/if_ether.h>
61 #if __FreeBSD_version >= 700000
62 #include <netinet/tcp.h>
63 #include <netinet/tcp_lro.h>
64 #endif
65 
66 #include <vm/vm.h>
67 #include <vm/pmap.h>
68 
69 #include <machine/clock.h>      /* for DELAY */
70 #include <machine/bus.h>
71 #include <machine/resource.h>
72 #include <machine/frame.h>
73 #include <machine/vmparam.h>
74 
75 #include <sys/bus.h>
76 #include <sys/rman.h>
77 
78 #include <machine/intr_machdep.h>
79 
80 #include <xen/xen-os.h>
81 #include <xen/hypervisor.h>
82 #include <xen/xen_intr.h>
83 #include <xen/gnttab.h>
84 #include <xen/interface/memory.h>
85 #include <xen/interface/io/netif.h>
86 #include <xen/xenbus/xenbusvar.h>
87 
88 #include <machine/xen/xenvar.h>
89 
90 #include <dev/xen/netfront/mbufq.h>
91 
92 #include "xenbus_if.h"
93 
94 /* Features supported by all backends.  TSO and LRO can be negotiated */
95 #define XN_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
96 
97 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
98 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
99 
100 #if __FreeBSD_version >= 700000
101 /*
102  * Should the driver do LRO on the RX end
103  *  this can be toggled on the fly, but the
104  *  interface must be reset (down/up) for it
105  *  to take effect.
106  */
107 static int xn_enable_lro = 1;
108 TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro);
109 #else
110 
111 #define IFCAP_TSO4	0
112 #define CSUM_TSO	0
113 
114 #endif
115 
116 #ifdef CONFIG_XEN
117 static int MODPARM_rx_copy = 0;
118 module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
119 MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
120 static int MODPARM_rx_flip = 0;
121 module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
122 MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
123 #else
124 static const int MODPARM_rx_copy = 1;
125 static const int MODPARM_rx_flip = 0;
126 #endif
127 
128 /**
129  * \brief The maximum allowed data fragments in a single transmit
130  *        request.
131  *
132  * This limit is imposed by the backend driver.  We assume here that
133  * we are dealing with a Linux driver domain and have set our limit
134  * to mirror the Linux MAX_SKB_FRAGS constant.
135  */
136 #define	MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2)
137 
138 #define RX_COPY_THRESHOLD 256
139 
140 #define net_ratelimit() 0
141 
142 struct netfront_info;
143 struct netfront_rx_info;
144 
145 static void xn_txeof(struct netfront_info *);
146 static void xn_rxeof(struct netfront_info *);
147 static void network_alloc_rx_buffers(struct netfront_info *);
148 
149 static void xn_tick_locked(struct netfront_info *);
150 static void xn_tick(void *);
151 
152 static void xn_intr(void *);
153 static inline int xn_count_frags(struct mbuf *m);
154 static int  xn_assemble_tx_request(struct netfront_info *sc,
155 				   struct mbuf *m_head);
156 static void xn_start_locked(struct ifnet *);
157 static void xn_start(struct ifnet *);
158 static int  xn_ioctl(struct ifnet *, u_long, caddr_t);
159 static void xn_ifinit_locked(struct netfront_info *);
160 static void xn_ifinit(void *);
161 static void xn_stop(struct netfront_info *);
162 static void xn_query_features(struct netfront_info *np);
163 static int  xn_configure_features(struct netfront_info *np);
164 #ifdef notyet
165 static void xn_watchdog(struct ifnet *);
166 #endif
167 
168 #ifdef notyet
169 static void netfront_closing(device_t dev);
170 #endif
171 static void netif_free(struct netfront_info *info);
172 static int netfront_detach(device_t dev);
173 
174 static int talk_to_backend(device_t dev, struct netfront_info *info);
175 static int create_netdev(device_t dev);
176 static void netif_disconnect_backend(struct netfront_info *info);
177 static int setup_device(device_t dev, struct netfront_info *info);
178 static void free_ring(int *ref, void *ring_ptr_ref);
179 
180 static int  xn_ifmedia_upd(struct ifnet *ifp);
181 static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
182 
183 /* Xenolinux helper functions */
184 int network_connect(struct netfront_info *);
185 
186 static void xn_free_rx_ring(struct netfront_info *);
187 
188 static void xn_free_tx_ring(struct netfront_info *);
189 
190 static int xennet_get_responses(struct netfront_info *np,
191 	struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
192 	struct mbuf **list, int *pages_flipped_p);
193 
194 #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT)
195 
196 #define INVALID_P2M_ENTRY (~0UL)
197 
198 /*
199  * Mbuf pointers. We need these to keep track of the virtual addresses
200  * of our mbuf chains since we can only convert from virtual to physical,
201  * not the other way around.  The size must track the free index arrays.
202  */
203 struct xn_chain_data {
204 	struct mbuf    *xn_tx_chain[NET_TX_RING_SIZE+1];
205 	int		xn_tx_chain_cnt;
206 	struct mbuf    *xn_rx_chain[NET_RX_RING_SIZE+1];
207 };
208 
209 struct net_device_stats
210 {
211 	u_long	rx_packets;		/* total packets received	*/
212 	u_long	tx_packets;		/* total packets transmitted	*/
213 	u_long	rx_bytes;		/* total bytes received 	*/
214 	u_long	tx_bytes;		/* total bytes transmitted	*/
215 	u_long	rx_errors;		/* bad packets received		*/
216 	u_long	tx_errors;		/* packet transmit problems	*/
217 	u_long	rx_dropped;		/* no space in linux buffers	*/
218 	u_long	tx_dropped;		/* no space available in linux	*/
219 	u_long	multicast;		/* multicast packets received	*/
220 	u_long	collisions;
221 
222 	/* detailed rx_errors: */
223 	u_long	rx_length_errors;
224 	u_long	rx_over_errors;		/* receiver ring buff overflow	*/
225 	u_long	rx_crc_errors;		/* recved pkt with crc error	*/
226 	u_long	rx_frame_errors;	/* recv'd frame alignment error */
227 	u_long	rx_fifo_errors;		/* recv'r fifo overrun		*/
228 	u_long	rx_missed_errors;	/* receiver missed packet	*/
229 
230 	/* detailed tx_errors */
231 	u_long	tx_aborted_errors;
232 	u_long	tx_carrier_errors;
233 	u_long	tx_fifo_errors;
234 	u_long	tx_heartbeat_errors;
235 	u_long	tx_window_errors;
236 
237 	/* for cslip etc */
238 	u_long	rx_compressed;
239 	u_long	tx_compressed;
240 };
241 
242 struct netfront_info {
243 	struct ifnet *xn_ifp;
244 #if __FreeBSD_version >= 700000
245 	struct lro_ctrl xn_lro;
246 #endif
247 
248 	struct net_device_stats stats;
249 	u_int tx_full;
250 
251 	netif_tx_front_ring_t tx;
252 	netif_rx_front_ring_t rx;
253 
254 	struct mtx   tx_lock;
255 	struct mtx   rx_lock;
256 	struct mtx   sc_lock;
257 
258 	xen_intr_handle_t xen_intr_handle;
259 	u_int copying_receiver;
260 	u_int carrier;
261 	u_int maxfrags;
262 
263 	/* Receive-ring batched refills. */
264 #define RX_MIN_TARGET 32
265 #define RX_MAX_TARGET NET_RX_RING_SIZE
266 	int rx_min_target;
267 	int rx_max_target;
268 	int rx_target;
269 
270 	grant_ref_t gref_tx_head;
271 	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
272 	grant_ref_t gref_rx_head;
273 	grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
274 
275 	device_t		xbdev;
276 	int			tx_ring_ref;
277 	int			rx_ring_ref;
278 	uint8_t			mac[ETHER_ADDR_LEN];
279 	struct xn_chain_data	xn_cdata;	/* mbufs */
280 	struct mbuf_head	xn_rx_batch;	/* head of the batch queue */
281 
282 	int			xn_if_flags;
283 	struct callout	        xn_stat_ch;
284 
285 	u_long			rx_pfn_array[NET_RX_RING_SIZE];
286 	multicall_entry_t	rx_mcl[NET_RX_RING_SIZE+1];
287 	mmu_update_t		rx_mmu[NET_RX_RING_SIZE];
288 	struct ifmedia		sc_media;
289 };
290 
291 #define rx_mbufs xn_cdata.xn_rx_chain
292 #define tx_mbufs xn_cdata.xn_tx_chain
293 
294 #define XN_LOCK_INIT(_sc, _name) \
295         mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \
296         mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF);  \
297         mtx_init(&(_sc)->sc_lock, #_name"_sc", "netfront softc lock", MTX_DEF)
298 
299 #define XN_RX_LOCK(_sc)           mtx_lock(&(_sc)->rx_lock)
300 #define XN_RX_UNLOCK(_sc)         mtx_unlock(&(_sc)->rx_lock)
301 
302 #define XN_TX_LOCK(_sc)           mtx_lock(&(_sc)->tx_lock)
303 #define XN_TX_UNLOCK(_sc)         mtx_unlock(&(_sc)->tx_lock)
304 
305 #define XN_LOCK(_sc)           mtx_lock(&(_sc)->sc_lock);
306 #define XN_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_lock);
307 
308 #define XN_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->sc_lock, MA_OWNED);
309 #define XN_RX_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->rx_lock, MA_OWNED);
310 #define XN_TX_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->tx_lock, MA_OWNED);
311 #define XN_LOCK_DESTROY(_sc)   mtx_destroy(&(_sc)->rx_lock); \
312                                mtx_destroy(&(_sc)->tx_lock); \
313                                mtx_destroy(&(_sc)->sc_lock);
314 
315 struct netfront_rx_info {
316 	struct netif_rx_response rx;
317 	struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
318 };
319 
320 #define netfront_carrier_on(netif)	((netif)->carrier = 1)
321 #define netfront_carrier_off(netif)	((netif)->carrier = 0)
322 #define netfront_carrier_ok(netif)	((netif)->carrier)
323 
324 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
325 
326 static inline void
327 add_id_to_freelist(struct mbuf **list, uintptr_t id)
328 {
329 	KASSERT(id != 0,
330 		("%s: the head item (0) must always be free.", __func__));
331 	list[id] = list[0];
332 	list[0]  = (struct mbuf *)id;
333 }
334 
335 static inline unsigned short
336 get_id_from_freelist(struct mbuf **list)
337 {
338 	uintptr_t id;
339 
340 	id = (uintptr_t)list[0];
341 	KASSERT(id != 0,
342 		("%s: the head item (0) must always remain free.", __func__));
343 	list[0] = list[id];
344 	return (id);
345 }
346 
347 static inline int
348 xennet_rxidx(RING_IDX idx)
349 {
350 	return idx & (NET_RX_RING_SIZE - 1);
351 }
352 
353 static inline struct mbuf *
354 xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri)
355 {
356 	int i = xennet_rxidx(ri);
357 	struct mbuf *m;
358 
359 	m = np->rx_mbufs[i];
360 	np->rx_mbufs[i] = NULL;
361 	return (m);
362 }
363 
364 static inline grant_ref_t
365 xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri)
366 {
367 	int i = xennet_rxidx(ri);
368 	grant_ref_t ref = np->grant_rx_ref[i];
369 	KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n"));
370 	np->grant_rx_ref[i] = GRANT_REF_INVALID;
371 	return ref;
372 }
373 
374 #define IPRINTK(fmt, args...) \
375     printf("[XEN] " fmt, ##args)
376 #ifdef INVARIANTS
377 #define WPRINTK(fmt, args...) \
378     printf("[XEN] " fmt, ##args)
379 #else
380 #define WPRINTK(fmt, args...)
381 #endif
382 #ifdef DEBUG
383 #define DPRINTK(fmt, args...) \
384     printf("[XEN] %s: " fmt, __func__, ##args)
385 #else
386 #define DPRINTK(fmt, args...)
387 #endif
388 
389 /**
390  * Read the 'mac' node at the given device's node in the store, and parse that
391  * as colon-separated octets, placing result the given mac array.  mac must be
392  * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
393  * Return 0 on success, or errno on error.
394  */
395 static int
396 xen_net_read_mac(device_t dev, uint8_t mac[])
397 {
398 	int error, i;
399 	char *s, *e, *macstr;
400 	const char *path;
401 
402 	path = xenbus_get_node(dev);
403 	error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
404 	if (error == ENOENT) {
405 		/*
406 		 * Deal with missing mac XenStore nodes on devices with
407 		 * HVM emulation (the 'ioemu' configuration attribute)
408 		 * enabled.
409 		 *
410 		 * The HVM emulator may execute in a stub device model
411 		 * domain which lacks the permission, only given to Dom0,
412 		 * to update the guest's XenStore tree.  For this reason,
413 		 * the HVM emulator doesn't even attempt to write the
414 		 * front-side mac node, even when operating in Dom0.
415 		 * However, there should always be a mac listed in the
416 		 * backend tree.  Fallback to this version if our query
417 		 * of the front side XenStore location doesn't find
418 		 * anything.
419 		 */
420 		path = xenbus_get_otherend_path(dev);
421 		error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
422 	}
423 	if (error != 0) {
424 		xenbus_dev_fatal(dev, error, "parsing %s/mac", path);
425 		return (error);
426 	}
427 
428 	s = macstr;
429 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
430 		mac[i] = strtoul(s, &e, 16);
431 		if (s == e || (e[0] != ':' && e[0] != 0)) {
432 			free(macstr, M_XENBUS);
433 			return (ENOENT);
434 		}
435 		s = &e[1];
436 	}
437 	free(macstr, M_XENBUS);
438 	return (0);
439 }
440 
441 /**
442  * Entry point to this code when a new device is created.  Allocate the basic
443  * structures and the ring buffers for communication with the backend, and
444  * inform the backend of the appropriate details for those.  Switch to
445  * Connected state.
446  */
447 static int
448 netfront_probe(device_t dev)
449 {
450 
451 	if (!strcmp(xenbus_get_type(dev), "vif")) {
452 		device_set_desc(dev, "Virtual Network Interface");
453 		return (0);
454 	}
455 
456 	return (ENXIO);
457 }
458 
459 static int
460 netfront_attach(device_t dev)
461 {
462 	int err;
463 
464 	err = create_netdev(dev);
465 	if (err) {
466 		xenbus_dev_fatal(dev, err, "creating netdev");
467 		return (err);
468 	}
469 
470 #if __FreeBSD_version >= 700000
471 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
472 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
473 	    OID_AUTO, "enable_lro", CTLFLAG_RW,
474 	    &xn_enable_lro, 0, "Large Receive Offload");
475 #endif
476 
477 	return (0);
478 }
479 
480 static int
481 netfront_suspend(device_t dev)
482 {
483 	struct netfront_info *info = device_get_softc(dev);
484 
485 	XN_RX_LOCK(info);
486 	XN_TX_LOCK(info);
487 	netfront_carrier_off(info);
488 	XN_TX_UNLOCK(info);
489 	XN_RX_UNLOCK(info);
490 	return (0);
491 }
492 
493 /**
494  * We are reconnecting to the backend, due to a suspend/resume, or a backend
495  * driver restart.  We tear down our netif structure and recreate it, but
496  * leave the device-layer structures intact so that this is transparent to the
497  * rest of the kernel.
498  */
499 static int
500 netfront_resume(device_t dev)
501 {
502 	struct netfront_info *info = device_get_softc(dev);
503 
504 	netif_disconnect_backend(info);
505 	return (0);
506 }
507 
508 /* Common code used when first setting up, and when resuming. */
509 static int
510 talk_to_backend(device_t dev, struct netfront_info *info)
511 {
512 	const char *message;
513 	struct xs_transaction xst;
514 	const char *node = xenbus_get_node(dev);
515 	int err;
516 
517 	err = xen_net_read_mac(dev, info->mac);
518 	if (err) {
519 		xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
520 		goto out;
521 	}
522 
523 	/* Create shared ring, alloc event channel. */
524 	err = setup_device(dev, info);
525 	if (err)
526 		goto out;
527 
528  again:
529 	err = xs_transaction_start(&xst);
530 	if (err) {
531 		xenbus_dev_fatal(dev, err, "starting transaction");
532 		goto destroy_ring;
533 	}
534 	err = xs_printf(xst, node, "tx-ring-ref","%u",
535 			info->tx_ring_ref);
536 	if (err) {
537 		message = "writing tx ring-ref";
538 		goto abort_transaction;
539 	}
540 	err = xs_printf(xst, node, "rx-ring-ref","%u",
541 			info->rx_ring_ref);
542 	if (err) {
543 		message = "writing rx ring-ref";
544 		goto abort_transaction;
545 	}
546 	err = xs_printf(xst, node,
547 			"event-channel", "%u",
548 			xen_intr_port(info->xen_intr_handle));
549 	if (err) {
550 		message = "writing event-channel";
551 		goto abort_transaction;
552 	}
553 	err = xs_printf(xst, node, "request-rx-copy", "%u",
554 			info->copying_receiver);
555 	if (err) {
556 		message = "writing request-rx-copy";
557 		goto abort_transaction;
558 	}
559 	err = xs_printf(xst, node, "feature-rx-notify", "%d", 1);
560 	if (err) {
561 		message = "writing feature-rx-notify";
562 		goto abort_transaction;
563 	}
564 	err = xs_printf(xst, node, "feature-sg", "%d", 1);
565 	if (err) {
566 		message = "writing feature-sg";
567 		goto abort_transaction;
568 	}
569 #if __FreeBSD_version >= 700000
570 	err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1);
571 	if (err) {
572 		message = "writing feature-gso-tcpv4";
573 		goto abort_transaction;
574 	}
575 #endif
576 
577 	err = xs_transaction_end(xst, 0);
578 	if (err) {
579 		if (err == EAGAIN)
580 			goto again;
581 		xenbus_dev_fatal(dev, err, "completing transaction");
582 		goto destroy_ring;
583 	}
584 
585 	return 0;
586 
587  abort_transaction:
588 	xs_transaction_end(xst, 1);
589 	xenbus_dev_fatal(dev, err, "%s", message);
590  destroy_ring:
591 	netif_free(info);
592  out:
593 	return err;
594 }
595 
596 static int
597 setup_device(device_t dev, struct netfront_info *info)
598 {
599 	netif_tx_sring_t *txs;
600 	netif_rx_sring_t *rxs;
601 	int error;
602 	struct ifnet *ifp;
603 
604 	ifp = info->xn_ifp;
605 
606 	info->tx_ring_ref = GRANT_REF_INVALID;
607 	info->rx_ring_ref = GRANT_REF_INVALID;
608 	info->rx.sring = NULL;
609 	info->tx.sring = NULL;
610 
611 	txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
612 	if (!txs) {
613 		error = ENOMEM;
614 		xenbus_dev_fatal(dev, error, "allocating tx ring page");
615 		goto fail;
616 	}
617 	SHARED_RING_INIT(txs);
618 	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
619 	error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref);
620 	if (error)
621 		goto fail;
622 
623 	rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
624 	if (!rxs) {
625 		error = ENOMEM;
626 		xenbus_dev_fatal(dev, error, "allocating rx ring page");
627 		goto fail;
628 	}
629 	SHARED_RING_INIT(rxs);
630 	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
631 
632 	error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref);
633 	if (error)
634 		goto fail;
635 
636 	error = xen_intr_alloc_and_bind_local_port(dev,
637 	    xenbus_get_otherend_id(dev), /*filter*/NULL, xn_intr, info,
638 	    INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, &info->xen_intr_handle);
639 
640 	if (error) {
641 		xenbus_dev_fatal(dev, error,
642 				 "xen_intr_alloc_and_bind_local_port failed");
643 		goto fail;
644 	}
645 
646 	return (0);
647 
648  fail:
649 	netif_free(info);
650 	return (error);
651 }
652 
653 #ifdef INET
654 /**
655  * If this interface has an ipv4 address, send an arp for it. This
656  * helps to get the network going again after migrating hosts.
657  */
658 static void
659 netfront_send_fake_arp(device_t dev, struct netfront_info *info)
660 {
661 	struct ifnet *ifp;
662 	struct ifaddr *ifa;
663 
664 	ifp = info->xn_ifp;
665 	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
666 		if (ifa->ifa_addr->sa_family == AF_INET) {
667 			arp_ifinit(ifp, ifa);
668 		}
669 	}
670 }
671 #endif
672 
673 /**
674  * Callback received when the backend's state changes.
675  */
676 static void
677 netfront_backend_changed(device_t dev, XenbusState newstate)
678 {
679 	struct netfront_info *sc = device_get_softc(dev);
680 
681 	DPRINTK("newstate=%d\n", newstate);
682 
683 	switch (newstate) {
684 	case XenbusStateInitialising:
685 	case XenbusStateInitialised:
686 	case XenbusStateConnected:
687 	case XenbusStateUnknown:
688 	case XenbusStateClosed:
689 	case XenbusStateReconfigured:
690 	case XenbusStateReconfiguring:
691 		break;
692 	case XenbusStateInitWait:
693 		if (xenbus_get_state(dev) != XenbusStateInitialising)
694 			break;
695 		if (network_connect(sc) != 0)
696 			break;
697 		xenbus_set_state(dev, XenbusStateConnected);
698 #ifdef INET
699 		netfront_send_fake_arp(dev, sc);
700 #endif
701 		break;
702 	case XenbusStateClosing:
703 		xenbus_set_state(dev, XenbusStateClosed);
704 		break;
705 	}
706 }
707 
708 static void
709 xn_free_rx_ring(struct netfront_info *sc)
710 {
711 #if 0
712 	int i;
713 
714 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
715 		if (sc->xn_cdata.rx_mbufs[i] != NULL) {
716 			m_freem(sc->rx_mbufs[i]);
717 			sc->rx_mbufs[i] = NULL;
718 		}
719 	}
720 
721 	sc->rx.rsp_cons = 0;
722 	sc->xn_rx_if->req_prod = 0;
723 	sc->xn_rx_if->event = sc->rx.rsp_cons ;
724 #endif
725 }
726 
727 static void
728 xn_free_tx_ring(struct netfront_info *sc)
729 {
730 #if 0
731 	int i;
732 
733 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
734 		if (sc->tx_mbufs[i] != NULL) {
735 			m_freem(sc->tx_mbufs[i]);
736 			sc->xn_cdata.xn_tx_chain[i] = NULL;
737 		}
738 	}
739 
740 	return;
741 #endif
742 }
743 
744 /**
745  * \brief Verify that there is sufficient space in the Tx ring
746  *        buffer for a maximally sized request to be enqueued.
747  *
748  * A transmit request requires a transmit descriptor for each packet
749  * fragment, plus up to 2 entries for "options" (e.g. TSO).
750  */
751 static inline int
752 xn_tx_slot_available(struct netfront_info *np)
753 {
754 	return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2));
755 }
756 
757 static void
758 netif_release_tx_bufs(struct netfront_info *np)
759 {
760 	int i;
761 
762 	for (i = 1; i <= NET_TX_RING_SIZE; i++) {
763 		struct mbuf *m;
764 
765 		m = np->tx_mbufs[i];
766 
767 		/*
768 		 * We assume that no kernel addresses are
769 		 * less than NET_TX_RING_SIZE.  Any entry
770 		 * in the table that is below this number
771 		 * must be an index from free-list tracking.
772 		 */
773 		if (((uintptr_t)m) <= NET_TX_RING_SIZE)
774 			continue;
775 		gnttab_end_foreign_access_ref(np->grant_tx_ref[i]);
776 		gnttab_release_grant_reference(&np->gref_tx_head,
777 		    np->grant_tx_ref[i]);
778 		np->grant_tx_ref[i] = GRANT_REF_INVALID;
779 		add_id_to_freelist(np->tx_mbufs, i);
780 		np->xn_cdata.xn_tx_chain_cnt--;
781 		if (np->xn_cdata.xn_tx_chain_cnt < 0) {
782 			panic("%s: tx_chain_cnt must be >= 0", __func__);
783 		}
784 		m_free(m);
785 	}
786 }
787 
788 static void
789 network_alloc_rx_buffers(struct netfront_info *sc)
790 {
791 	int otherend_id = xenbus_get_otherend_id(sc->xbdev);
792 	unsigned short id;
793 	struct mbuf *m_new;
794 	int i, batch_target, notify;
795 	RING_IDX req_prod;
796 	struct xen_memory_reservation reservation;
797 	grant_ref_t ref;
798 	int nr_flips;
799 	netif_rx_request_t *req;
800 	vm_offset_t vaddr;
801 	u_long pfn;
802 
803 	req_prod = sc->rx.req_prod_pvt;
804 
805 	if (__predict_false(sc->carrier == 0))
806 		return;
807 
808 	/*
809 	 * Allocate mbufs greedily, even though we batch updates to the
810 	 * receive ring. This creates a less bursty demand on the memory
811 	 * allocator, and so should reduce the chance of failed allocation
812 	 * requests both for ourself and for other kernel subsystems.
813 	 *
814 	 * Here we attempt to maintain rx_target buffers in flight, counting
815 	 * buffers that we have yet to process in the receive ring.
816 	 */
817 	batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons);
818 	for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) {
819 		MGETHDR(m_new, M_NOWAIT, MT_DATA);
820 		if (m_new == NULL) {
821 			printf("%s: MGETHDR failed\n", __func__);
822 			goto no_mbuf;
823 		}
824 
825 		if (m_cljget(m_new, M_NOWAIT, MJUMPAGESIZE) == NULL) {
826 			printf("%s: m_cljget failed\n", __func__);
827 			m_freem(m_new);
828 
829 no_mbuf:
830 			if (i != 0)
831 				goto refill;
832 			/*
833 			 * XXX set timer
834 			 */
835 			break;
836 		}
837 		m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
838 
839 		/* queue the mbufs allocated */
840 		mbufq_tail(&sc->xn_rx_batch, m_new);
841 	}
842 
843 	/*
844 	 * If we've allocated at least half of our target number of entries,
845 	 * submit them to the backend - we have enough to make the overhead
846 	 * of submission worthwhile.  Otherwise wait for more mbufs and
847 	 * request entries to become available.
848 	 */
849 	if (i < (sc->rx_target/2)) {
850 		if (req_prod >sc->rx.sring->req_prod)
851 			goto push;
852 		return;
853 	}
854 
855 	/*
856 	 * Double floating fill target if we risked having the backend
857 	 * run out of empty buffers for receive traffic.  We define "running
858 	 * low" as having less than a fourth of our target buffers free
859 	 * at the time we refilled the queue.
860 	 */
861 	if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) {
862 		sc->rx_target *= 2;
863 		if (sc->rx_target > sc->rx_max_target)
864 			sc->rx_target = sc->rx_max_target;
865 	}
866 
867 refill:
868 	for (nr_flips = i = 0; ; i++) {
869 		if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
870 			break;
871 
872 		m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)(
873 				vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT);
874 
875 		id = xennet_rxidx(req_prod + i);
876 
877 		KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain"));
878 		sc->rx_mbufs[id] = m_new;
879 
880 		ref = gnttab_claim_grant_reference(&sc->gref_rx_head);
881 		KASSERT(ref != GNTTAB_LIST_END,
882 			("reserved grant references exhuasted"));
883 		sc->grant_rx_ref[id] = ref;
884 
885 		vaddr = mtod(m_new, vm_offset_t);
886 		pfn = vtophys(vaddr) >> PAGE_SHIFT;
887 		req = RING_GET_REQUEST(&sc->rx, req_prod + i);
888 
889 		if (sc->copying_receiver == 0) {
890 			gnttab_grant_foreign_transfer_ref(ref,
891 			    otherend_id, pfn);
892 			sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn);
893 			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
894 				/* Remove this page before passing
895 				 * back to Xen.
896 				 */
897 				set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
898 				MULTI_update_va_mapping(&sc->rx_mcl[i],
899 				    vaddr, 0, 0);
900 			}
901 			nr_flips++;
902 		} else {
903 			gnttab_grant_foreign_access_ref(ref,
904 			    otherend_id,
905 			    PFNTOMFN(pfn), 0);
906 		}
907 		req->id = id;
908 		req->gref = ref;
909 
910 		sc->rx_pfn_array[i] =
911 		    vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
912 	}
913 
914 	KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
915 	KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed"));
916 	/*
917 	 * We may have allocated buffers which have entries outstanding
918 	 * in the page * update queue -- make sure we flush those first!
919 	 */
920 	PT_UPDATES_FLUSH();
921 	if (nr_flips != 0) {
922 #ifdef notyet
923 		/* Tell the ballon driver what is going on. */
924 		balloon_update_driver_allowance(i);
925 #endif
926 		set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array);
927 		reservation.nr_extents   = i;
928 		reservation.extent_order = 0;
929 		reservation.address_bits = 0;
930 		reservation.domid        = DOMID_SELF;
931 
932 		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
933 			/* After all PTEs have been zapped, flush the TLB. */
934 			sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
935 			    UVMF_TLB_FLUSH|UVMF_ALL;
936 
937 			/* Give away a batch of pages. */
938 			sc->rx_mcl[i].op = __HYPERVISOR_memory_op;
939 			sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
940 			sc->rx_mcl[i].args[1] =  (u_long)&reservation;
941 			/* Zap PTEs and give away pages in one big multicall. */
942 			(void)HYPERVISOR_multicall(sc->rx_mcl, i+1);
943 
944 			if (__predict_false(sc->rx_mcl[i].result != i ||
945 			    HYPERVISOR_memory_op(XENMEM_decrease_reservation,
946 			    &reservation) != i))
947 				panic("%s: unable to reduce memory "
948 				    "reservation\n", __func__);
949 		}
950 	} else {
951 		wmb();
952 	}
953 
954 	/* Above is a suitable barrier to ensure backend will see requests. */
955 	sc->rx.req_prod_pvt = req_prod + i;
956 push:
957 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify);
958 	if (notify)
959 		xen_intr_signal(sc->xen_intr_handle);
960 }
961 
962 static void
963 xn_rxeof(struct netfront_info *np)
964 {
965 	struct ifnet *ifp;
966 #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
967 	struct lro_ctrl *lro = &np->xn_lro;
968 	struct lro_entry *queued;
969 #endif
970 	struct netfront_rx_info rinfo;
971 	struct netif_rx_response *rx = &rinfo.rx;
972 	struct netif_extra_info *extras = rinfo.extras;
973 	RING_IDX i, rp;
974 	multicall_entry_t *mcl;
975 	struct mbuf *m;
976 	struct mbuf_head rxq, errq;
977 	int err, pages_flipped = 0, work_to_do;
978 
979 	do {
980 		XN_RX_LOCK_ASSERT(np);
981 		if (!netfront_carrier_ok(np))
982 			return;
983 
984 		mbufq_init(&errq);
985 		mbufq_init(&rxq);
986 
987 		ifp = np->xn_ifp;
988 
989 		rp = np->rx.sring->rsp_prod;
990 		rmb();	/* Ensure we see queued responses up to 'rp'. */
991 
992 		i = np->rx.rsp_cons;
993 		while ((i != rp)) {
994 			memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
995 			memset(extras, 0, sizeof(rinfo.extras));
996 
997 			m = NULL;
998 			err = xennet_get_responses(np, &rinfo, rp, &i, &m,
999 			    &pages_flipped);
1000 
1001 			if (__predict_false(err)) {
1002 				if (m)
1003 					mbufq_tail(&errq, m);
1004 				np->stats.rx_errors++;
1005 				continue;
1006 			}
1007 
1008 			m->m_pkthdr.rcvif = ifp;
1009 			if ( rx->flags & NETRXF_data_validated ) {
1010 				/* Tell the stack the checksums are okay */
1011 				/*
1012 				 * XXX this isn't necessarily the case - need to add
1013 				 * check
1014 				 */
1015 
1016 				m->m_pkthdr.csum_flags |=
1017 					(CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID
1018 					    | CSUM_PSEUDO_HDR);
1019 				m->m_pkthdr.csum_data = 0xffff;
1020 			}
1021 
1022 			np->stats.rx_packets++;
1023 			np->stats.rx_bytes += m->m_pkthdr.len;
1024 
1025 			mbufq_tail(&rxq, m);
1026 			np->rx.rsp_cons = i;
1027 		}
1028 
1029 		if (pages_flipped) {
1030 			/* Some pages are no longer absent... */
1031 #ifdef notyet
1032 			balloon_update_driver_allowance(-pages_flipped);
1033 #endif
1034 			/* Do all the remapping work, and M->P updates, in one big
1035 			 * hypercall.
1036 			 */
1037 			if (!!xen_feature(XENFEAT_auto_translated_physmap)) {
1038 				mcl = np->rx_mcl + pages_flipped;
1039 				mcl->op = __HYPERVISOR_mmu_update;
1040 				mcl->args[0] = (u_long)np->rx_mmu;
1041 				mcl->args[1] = pages_flipped;
1042 				mcl->args[2] = 0;
1043 				mcl->args[3] = DOMID_SELF;
1044 				(void)HYPERVISOR_multicall(np->rx_mcl,
1045 				    pages_flipped + 1);
1046 			}
1047 		}
1048 
1049 		while ((m = mbufq_dequeue(&errq)))
1050 			m_freem(m);
1051 
1052 		/*
1053 		 * Process all the mbufs after the remapping is complete.
1054 		 * Break the mbuf chain first though.
1055 		 */
1056 		while ((m = mbufq_dequeue(&rxq)) != NULL) {
1057 			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1058 
1059 			/*
1060 			 * Do we really need to drop the rx lock?
1061 			 */
1062 			XN_RX_UNLOCK(np);
1063 #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
1064 			/* Use LRO if possible */
1065 			if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
1066 			    lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
1067 				/*
1068 				 * If LRO fails, pass up to the stack
1069 				 * directly.
1070 				 */
1071 				(*ifp->if_input)(ifp, m);
1072 			}
1073 #else
1074 			(*ifp->if_input)(ifp, m);
1075 #endif
1076 			XN_RX_LOCK(np);
1077 		}
1078 
1079 		np->rx.rsp_cons = i;
1080 
1081 #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
1082 		/*
1083 		 * Flush any outstanding LRO work
1084 		 */
1085 		while (!SLIST_EMPTY(&lro->lro_active)) {
1086 			queued = SLIST_FIRST(&lro->lro_active);
1087 			SLIST_REMOVE_HEAD(&lro->lro_active, next);
1088 			tcp_lro_flush(lro, queued);
1089 		}
1090 #endif
1091 
1092 #if 0
1093 		/* If we get a callback with very few responses, reduce fill target. */
1094 		/* NB. Note exponential increase, linear decrease. */
1095 		if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1096 			((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
1097 			np->rx_target = np->rx_min_target;
1098 #endif
1099 
1100 		network_alloc_rx_buffers(np);
1101 
1102 		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do);
1103 	} while (work_to_do);
1104 }
1105 
1106 static void
1107 xn_txeof(struct netfront_info *np)
1108 {
1109 	RING_IDX i, prod;
1110 	unsigned short id;
1111 	struct ifnet *ifp;
1112 	netif_tx_response_t *txr;
1113 	struct mbuf *m;
1114 
1115 	XN_TX_LOCK_ASSERT(np);
1116 
1117 	if (!netfront_carrier_ok(np))
1118 		return;
1119 
1120 	ifp = np->xn_ifp;
1121 
1122 	do {
1123 		prod = np->tx.sring->rsp_prod;
1124 		rmb(); /* Ensure we see responses up to 'rp'. */
1125 
1126 		for (i = np->tx.rsp_cons; i != prod; i++) {
1127 			txr = RING_GET_RESPONSE(&np->tx, i);
1128 			if (txr->status == NETIF_RSP_NULL)
1129 				continue;
1130 
1131 			if (txr->status != NETIF_RSP_OKAY) {
1132 				printf("%s: WARNING: response is %d!\n",
1133 				       __func__, txr->status);
1134 			}
1135 			id = txr->id;
1136 			m = np->tx_mbufs[id];
1137 			KASSERT(m != NULL, ("mbuf not found in xn_tx_chain"));
1138 			KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
1139 				("mbuf already on the free list, but we're "
1140 				"trying to free it again!"));
1141 			M_ASSERTVALID(m);
1142 
1143 			/*
1144 			 * Increment packet count if this is the last
1145 			 * mbuf of the chain.
1146 			 */
1147 			if (!m->m_next)
1148 				if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1149 			if (__predict_false(gnttab_query_foreign_access(
1150 			    np->grant_tx_ref[id]) != 0)) {
1151 				panic("%s: grant id %u still in use by the "
1152 				    "backend", __func__, id);
1153 			}
1154 			gnttab_end_foreign_access_ref(
1155 				np->grant_tx_ref[id]);
1156 			gnttab_release_grant_reference(
1157 				&np->gref_tx_head, np->grant_tx_ref[id]);
1158 			np->grant_tx_ref[id] = GRANT_REF_INVALID;
1159 
1160 			np->tx_mbufs[id] = NULL;
1161 			add_id_to_freelist(np->tx_mbufs, id);
1162 			np->xn_cdata.xn_tx_chain_cnt--;
1163 			m_free(m);
1164 			/* Only mark the queue active if we've freed up at least one slot to try */
1165 			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1166 		}
1167 		np->tx.rsp_cons = prod;
1168 
1169 		/*
1170 		 * Set a new event, then check for race with update of
1171 		 * tx_cons. Note that it is essential to schedule a
1172 		 * callback, no matter how few buffers are pending. Even if
1173 		 * there is space in the transmit ring, higher layers may
1174 		 * be blocked because too much data is outstanding: in such
1175 		 * cases notification from Xen is likely to be the only kick
1176 		 * that we'll get.
1177 		 */
1178 		np->tx.sring->rsp_event =
1179 		    prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
1180 
1181 		mb();
1182 	} while (prod != np->tx.sring->rsp_prod);
1183 
1184 	if (np->tx_full &&
1185 	    ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1186 		np->tx_full = 0;
1187 #if 0
1188 		if (np->user_state == UST_OPEN)
1189 			netif_wake_queue(dev);
1190 #endif
1191 	}
1192 }
1193 
1194 static void
1195 xn_intr(void *xsc)
1196 {
1197 	struct netfront_info *np = xsc;
1198 	struct ifnet *ifp = np->xn_ifp;
1199 
1200 #if 0
1201 	if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod &&
1202 	    likely(netfront_carrier_ok(np)) &&
1203 	    ifp->if_drv_flags & IFF_DRV_RUNNING))
1204 		return;
1205 #endif
1206 	if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) {
1207 		XN_TX_LOCK(np);
1208 		xn_txeof(np);
1209 		XN_TX_UNLOCK(np);
1210 	}
1211 
1212 	XN_RX_LOCK(np);
1213 	xn_rxeof(np);
1214 	XN_RX_UNLOCK(np);
1215 
1216 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1217 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1218 		xn_start(ifp);
1219 }
1220 
1221 static void
1222 xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m,
1223 	grant_ref_t ref)
1224 {
1225 	int new = xennet_rxidx(np->rx.req_prod_pvt);
1226 
1227 	KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL"));
1228 	np->rx_mbufs[new] = m;
1229 	np->grant_rx_ref[new] = ref;
1230 	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
1231 	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
1232 	np->rx.req_prod_pvt++;
1233 }
1234 
1235 static int
1236 xennet_get_extras(struct netfront_info *np,
1237     struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons)
1238 {
1239 	struct netif_extra_info *extra;
1240 
1241 	int err = 0;
1242 
1243 	do {
1244 		struct mbuf *m;
1245 		grant_ref_t ref;
1246 
1247 		if (__predict_false(*cons + 1 == rp)) {
1248 #if 0
1249 			if (net_ratelimit())
1250 				WPRINTK("Missing extra info\n");
1251 #endif
1252 			err = EINVAL;
1253 			break;
1254 		}
1255 
1256 		extra = (struct netif_extra_info *)
1257 		RING_GET_RESPONSE(&np->rx, ++(*cons));
1258 
1259 		if (__predict_false(!extra->type ||
1260 			extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1261 #if 0
1262 			if (net_ratelimit())
1263 				WPRINTK("Invalid extra type: %d\n",
1264 					extra->type);
1265 #endif
1266 			err = EINVAL;
1267 		} else {
1268 			memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1269 		}
1270 
1271 		m = xennet_get_rx_mbuf(np, *cons);
1272 		ref = xennet_get_rx_ref(np, *cons);
1273 		xennet_move_rx_slot(np, m, ref);
1274 	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1275 
1276 	return err;
1277 }
1278 
1279 static int
1280 xennet_get_responses(struct netfront_info *np,
1281 	struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
1282 	struct mbuf  **list,
1283 	int *pages_flipped_p)
1284 {
1285 	int pages_flipped = *pages_flipped_p;
1286 	struct mmu_update *mmu;
1287 	struct multicall_entry *mcl;
1288 	struct netif_rx_response *rx = &rinfo->rx;
1289 	struct netif_extra_info *extras = rinfo->extras;
1290 	struct mbuf *m, *m0, *m_prev;
1291 	grant_ref_t ref = xennet_get_rx_ref(np, *cons);
1292 	RING_IDX ref_cons = *cons;
1293 	int frags = 1;
1294 	int err = 0;
1295 	u_long ret;
1296 
1297 	m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons);
1298 
1299 	if (rx->flags & NETRXF_extra_info) {
1300 		err = xennet_get_extras(np, extras, rp, cons);
1301 	}
1302 
1303 	if (m0 != NULL) {
1304 		m0->m_pkthdr.len = 0;
1305 		m0->m_next = NULL;
1306 	}
1307 
1308 	for (;;) {
1309 		u_long mfn;
1310 
1311 #if 0
1312 		DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
1313 			rx->status, rx->offset, frags);
1314 #endif
1315 		if (__predict_false(rx->status < 0 ||
1316 			rx->offset + rx->status > PAGE_SIZE)) {
1317 
1318 #if 0
1319 			if (net_ratelimit())
1320 				WPRINTK("rx->offset: %x, size: %u\n",
1321 					rx->offset, rx->status);
1322 #endif
1323 			xennet_move_rx_slot(np, m, ref);
1324 			if (m0 == m)
1325 				m0 = NULL;
1326 			m = NULL;
1327 			err = EINVAL;
1328 			goto next_skip_queue;
1329 		}
1330 
1331 		/*
1332 		 * This definitely indicates a bug, either in this driver or in
1333 		 * the backend driver. In future this should flag the bad
1334 		 * situation to the system controller to reboot the backed.
1335 		 */
1336 		if (ref == GRANT_REF_INVALID) {
1337 
1338 #if 0
1339 			if (net_ratelimit())
1340 				WPRINTK("Bad rx response id %d.\n", rx->id);
1341 #endif
1342 			printf("%s: Bad rx response id %d.\n", __func__,rx->id);
1343 			err = EINVAL;
1344 			goto next;
1345 		}
1346 
1347 		if (!np->copying_receiver) {
1348 			/* Memory pressure, insufficient buffer
1349 			 * headroom, ...
1350 			 */
1351 			if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
1352 				WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
1353 					rx->id, rx->status);
1354 				xennet_move_rx_slot(np, m, ref);
1355 				err = ENOMEM;
1356 				goto next;
1357 			}
1358 
1359 			if (!xen_feature( XENFEAT_auto_translated_physmap)) {
1360 				/* Remap the page. */
1361 				void *vaddr = mtod(m, void *);
1362 				uint32_t pfn;
1363 
1364 				mcl = np->rx_mcl + pages_flipped;
1365 				mmu = np->rx_mmu + pages_flipped;
1366 
1367 				MULTI_update_va_mapping(mcl, (u_long)vaddr,
1368 				    (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW |
1369 				    PG_V | PG_M | PG_A, 0);
1370 				pfn = (uintptr_t)m->m_ext.ext_arg1;
1371 				mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) |
1372 				    MMU_MACHPHYS_UPDATE;
1373 				mmu->val = pfn;
1374 
1375 				set_phys_to_machine(pfn, mfn);
1376 			}
1377 			pages_flipped++;
1378 		} else {
1379 			ret = gnttab_end_foreign_access_ref(ref);
1380 			KASSERT(ret, ("ret != 0"));
1381 		}
1382 
1383 		gnttab_release_grant_reference(&np->gref_rx_head, ref);
1384 
1385 next:
1386 		if (m == NULL)
1387 			break;
1388 
1389 		m->m_len = rx->status;
1390 		m->m_data += rx->offset;
1391 		m0->m_pkthdr.len += rx->status;
1392 
1393 next_skip_queue:
1394 		if (!(rx->flags & NETRXF_more_data))
1395 			break;
1396 
1397 		if (*cons + frags == rp) {
1398 			if (net_ratelimit())
1399 				WPRINTK("Need more frags\n");
1400 			err = ENOENT;
1401 			printf("%s: cons %u frags %u rp %u, not enough frags\n",
1402 			       __func__, *cons, frags, rp);
1403 			break;
1404 		}
1405 		/*
1406 		 * Note that m can be NULL, if rx->status < 0 or if
1407 		 * rx->offset + rx->status > PAGE_SIZE above.
1408 		 */
1409 		m_prev = m;
1410 
1411 		rx = RING_GET_RESPONSE(&np->rx, *cons + frags);
1412 		m = xennet_get_rx_mbuf(np, *cons + frags);
1413 
1414 		/*
1415 		 * m_prev == NULL can happen if rx->status < 0 or if
1416 		 * rx->offset + * rx->status > PAGE_SIZE above.
1417 		 */
1418 		if (m_prev != NULL)
1419 			m_prev->m_next = m;
1420 
1421 		/*
1422 		 * m0 can be NULL if rx->status < 0 or if * rx->offset +
1423 		 * rx->status > PAGE_SIZE above.
1424 		 */
1425 		if (m0 == NULL)
1426 			m0 = m;
1427 		m->m_next = NULL;
1428 		ref = xennet_get_rx_ref(np, *cons + frags);
1429 		ref_cons = *cons + frags;
1430 		frags++;
1431 	}
1432 	*list = m0;
1433 	*cons += frags;
1434 	*pages_flipped_p = pages_flipped;
1435 
1436 	return (err);
1437 }
1438 
1439 static void
1440 xn_tick_locked(struct netfront_info *sc)
1441 {
1442 	XN_RX_LOCK_ASSERT(sc);
1443 	callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1444 
1445 	/* XXX placeholder for printing debug information */
1446 }
1447 
1448 static void
1449 xn_tick(void *xsc)
1450 {
1451 	struct netfront_info *sc;
1452 
1453 	sc = xsc;
1454 	XN_RX_LOCK(sc);
1455 	xn_tick_locked(sc);
1456 	XN_RX_UNLOCK(sc);
1457 }
1458 
1459 /**
1460  * \brief Count the number of fragments in an mbuf chain.
1461  *
1462  * Surprisingly, there isn't an M* macro for this.
1463  */
1464 static inline int
1465 xn_count_frags(struct mbuf *m)
1466 {
1467 	int nfrags;
1468 
1469 	for (nfrags = 0; m != NULL; m = m->m_next)
1470 		nfrags++;
1471 
1472 	return (nfrags);
1473 }
1474 
1475 /**
1476  * Given an mbuf chain, make sure we have enough room and then push
1477  * it onto the transmit ring.
1478  */
1479 static int
1480 xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head)
1481 {
1482 	struct ifnet *ifp;
1483 	struct mbuf *m;
1484 	u_int nfrags;
1485 	netif_extra_info_t *extra;
1486 	int otherend_id;
1487 
1488 	ifp = sc->xn_ifp;
1489 
1490 	/**
1491 	 * Defragment the mbuf if necessary.
1492 	 */
1493 	nfrags = xn_count_frags(m_head);
1494 
1495 	/*
1496 	 * Check to see whether this request is longer than netback
1497 	 * can handle, and try to defrag it.
1498 	 */
1499 	/**
1500 	 * It is a bit lame, but the netback driver in Linux can't
1501 	 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of
1502 	 * the Linux network stack.
1503 	 */
1504 	if (nfrags > sc->maxfrags) {
1505 		m = m_defrag(m_head, M_NOWAIT);
1506 		if (!m) {
1507 			/*
1508 			 * Defrag failed, so free the mbuf and
1509 			 * therefore drop the packet.
1510 			 */
1511 			m_freem(m_head);
1512 			return (EMSGSIZE);
1513 		}
1514 		m_head = m;
1515 	}
1516 
1517 	/* Determine how many fragments now exist */
1518 	nfrags = xn_count_frags(m_head);
1519 
1520 	/*
1521 	 * Check to see whether the defragmented packet has too many
1522 	 * segments for the Linux netback driver.
1523 	 */
1524 	/**
1525 	 * The FreeBSD TCP stack, with TSO enabled, can produce a chain
1526 	 * of mbufs longer than Linux can handle.  Make sure we don't
1527 	 * pass a too-long chain over to the other side by dropping the
1528 	 * packet.  It doesn't look like there is currently a way to
1529 	 * tell the TCP stack to generate a shorter chain of packets.
1530 	 */
1531 	if (nfrags > MAX_TX_REQ_FRAGS) {
1532 #ifdef DEBUG
1533 		printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback "
1534 		       "won't be able to handle it, dropping\n",
1535 		       __func__, nfrags, MAX_TX_REQ_FRAGS);
1536 #endif
1537 		m_freem(m_head);
1538 		return (EMSGSIZE);
1539 	}
1540 
1541 	/*
1542 	 * This check should be redundant.  We've already verified that we
1543 	 * have enough slots in the ring to handle a packet of maximum
1544 	 * size, and that our packet is less than the maximum size.  Keep
1545 	 * it in here as an assert for now just to make certain that
1546 	 * xn_tx_chain_cnt is accurate.
1547 	 */
1548 	KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE,
1549 		("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE "
1550 		 "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt,
1551                     (int) nfrags, (int) NET_TX_RING_SIZE));
1552 
1553 	/*
1554 	 * Start packing the mbufs in this chain into
1555 	 * the fragment pointers. Stop when we run out
1556 	 * of fragments or hit the end of the mbuf chain.
1557 	 */
1558 	m = m_head;
1559 	extra = NULL;
1560 	otherend_id = xenbus_get_otherend_id(sc->xbdev);
1561 	for (m = m_head; m; m = m->m_next) {
1562 		netif_tx_request_t *tx;
1563 		uintptr_t id;
1564 		grant_ref_t ref;
1565 		u_long mfn; /* XXX Wrong type? */
1566 
1567 		tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt);
1568 		id = get_id_from_freelist(sc->tx_mbufs);
1569 		if (id == 0)
1570 			panic("%s: was allocated the freelist head!\n",
1571 			    __func__);
1572 		sc->xn_cdata.xn_tx_chain_cnt++;
1573 		if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE)
1574 			panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n",
1575 			    __func__);
1576 		sc->tx_mbufs[id] = m;
1577 		tx->id = id;
1578 		ref = gnttab_claim_grant_reference(&sc->gref_tx_head);
1579 		KASSERT((short)ref >= 0, ("Negative ref"));
1580 		mfn = virt_to_mfn(mtod(m, vm_offset_t));
1581 		gnttab_grant_foreign_access_ref(ref, otherend_id,
1582 		    mfn, GNTMAP_readonly);
1583 		tx->gref = sc->grant_tx_ref[id] = ref;
1584 		tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1);
1585 		tx->flags = 0;
1586 		if (m == m_head) {
1587 			/*
1588 			 * The first fragment has the entire packet
1589 			 * size, subsequent fragments have just the
1590 			 * fragment size. The backend works out the
1591 			 * true size of the first fragment by
1592 			 * subtracting the sizes of the other
1593 			 * fragments.
1594 			 */
1595 			tx->size = m->m_pkthdr.len;
1596 
1597 			/*
1598 			 * The first fragment contains the checksum flags
1599 			 * and is optionally followed by extra data for
1600 			 * TSO etc.
1601 			 */
1602 			/**
1603 			 * CSUM_TSO requires checksum offloading.
1604 			 * Some versions of FreeBSD fail to
1605 			 * set CSUM_TCP in the CSUM_TSO case,
1606 			 * so we have to test for CSUM_TSO
1607 			 * explicitly.
1608 			 */
1609 			if (m->m_pkthdr.csum_flags
1610 			    & (CSUM_DELAY_DATA | CSUM_TSO)) {
1611 				tx->flags |= (NETTXF_csum_blank
1612 				    | NETTXF_data_validated);
1613 			}
1614 #if __FreeBSD_version >= 700000
1615 			if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1616 				struct netif_extra_info *gso =
1617 					(struct netif_extra_info *)
1618 					RING_GET_REQUEST(&sc->tx,
1619 							 ++sc->tx.req_prod_pvt);
1620 
1621 				tx->flags |= NETTXF_extra_info;
1622 
1623 				gso->u.gso.size = m->m_pkthdr.tso_segsz;
1624 				gso->u.gso.type =
1625 					XEN_NETIF_GSO_TYPE_TCPV4;
1626 				gso->u.gso.pad = 0;
1627 				gso->u.gso.features = 0;
1628 
1629 				gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1630 				gso->flags = 0;
1631 			}
1632 #endif
1633 		} else {
1634 			tx->size = m->m_len;
1635 		}
1636 		if (m->m_next)
1637 			tx->flags |= NETTXF_more_data;
1638 
1639 		sc->tx.req_prod_pvt++;
1640 	}
1641 	BPF_MTAP(ifp, m_head);
1642 
1643 	sc->stats.tx_bytes += m_head->m_pkthdr.len;
1644 	sc->stats.tx_packets++;
1645 
1646 	return (0);
1647 }
1648 
1649 static void
1650 xn_start_locked(struct ifnet *ifp)
1651 {
1652 	struct netfront_info *sc;
1653 	struct mbuf *m_head;
1654 	int notify;
1655 
1656 	sc = ifp->if_softc;
1657 
1658 	if (!netfront_carrier_ok(sc))
1659 		return;
1660 
1661 	/*
1662 	 * While we have enough transmit slots available for at least one
1663 	 * maximum-sized packet, pull mbufs off the queue and put them on
1664 	 * the transmit ring.
1665 	 */
1666 	while (xn_tx_slot_available(sc)) {
1667 		IF_DEQUEUE(&ifp->if_snd, m_head);
1668 		if (m_head == NULL)
1669 			break;
1670 
1671 		if (xn_assemble_tx_request(sc, m_head) != 0)
1672 			break;
1673 	}
1674 
1675 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify);
1676 	if (notify)
1677 		xen_intr_signal(sc->xen_intr_handle);
1678 
1679 	if (RING_FULL(&sc->tx)) {
1680 		sc->tx_full = 1;
1681 #if 0
1682 		netif_stop_queue(dev);
1683 #endif
1684 	}
1685 }
1686 
1687 static void
1688 xn_start(struct ifnet *ifp)
1689 {
1690 	struct netfront_info *sc;
1691 	sc = ifp->if_softc;
1692 	XN_TX_LOCK(sc);
1693 	xn_start_locked(ifp);
1694 	XN_TX_UNLOCK(sc);
1695 }
1696 
1697 /* equivalent of network_open() in Linux */
1698 static void
1699 xn_ifinit_locked(struct netfront_info *sc)
1700 {
1701 	struct ifnet *ifp;
1702 
1703 	XN_LOCK_ASSERT(sc);
1704 
1705 	ifp = sc->xn_ifp;
1706 
1707 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1708 		return;
1709 
1710 	xn_stop(sc);
1711 
1712 	network_alloc_rx_buffers(sc);
1713 	sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1;
1714 
1715 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1716 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1717 	if_link_state_change(ifp, LINK_STATE_UP);
1718 
1719 	callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1720 }
1721 
1722 static void
1723 xn_ifinit(void *xsc)
1724 {
1725 	struct netfront_info *sc = xsc;
1726 
1727 	XN_LOCK(sc);
1728 	xn_ifinit_locked(sc);
1729 	XN_UNLOCK(sc);
1730 }
1731 
1732 static int
1733 xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1734 {
1735 	struct netfront_info *sc = ifp->if_softc;
1736 	struct ifreq *ifr = (struct ifreq *) data;
1737 #ifdef INET
1738 	struct ifaddr *ifa = (struct ifaddr *)data;
1739 #endif
1740 
1741 	int mask, error = 0;
1742 	switch(cmd) {
1743 	case SIOCSIFADDR:
1744 #ifdef INET
1745 		XN_LOCK(sc);
1746 		if (ifa->ifa_addr->sa_family == AF_INET) {
1747 			ifp->if_flags |= IFF_UP;
1748 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1749 				xn_ifinit_locked(sc);
1750 			arp_ifinit(ifp, ifa);
1751 			XN_UNLOCK(sc);
1752 		} else {
1753 			XN_UNLOCK(sc);
1754 #endif
1755 			error = ether_ioctl(ifp, cmd, data);
1756 #ifdef INET
1757 		}
1758 #endif
1759 		break;
1760 	case SIOCSIFMTU:
1761 		/* XXX can we alter the MTU on a VN ?*/
1762 #ifdef notyet
1763 		if (ifr->ifr_mtu > XN_JUMBO_MTU)
1764 			error = EINVAL;
1765 		else
1766 #endif
1767 		{
1768 			ifp->if_mtu = ifr->ifr_mtu;
1769 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1770 			xn_ifinit(sc);
1771 		}
1772 		break;
1773 	case SIOCSIFFLAGS:
1774 		XN_LOCK(sc);
1775 		if (ifp->if_flags & IFF_UP) {
1776 			/*
1777 			 * If only the state of the PROMISC flag changed,
1778 			 * then just use the 'set promisc mode' command
1779 			 * instead of reinitializing the entire NIC. Doing
1780 			 * a full re-init means reloading the firmware and
1781 			 * waiting for it to start up, which may take a
1782 			 * second or two.
1783 			 */
1784 #ifdef notyet
1785 			/* No promiscuous mode with Xen */
1786 			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1787 			    ifp->if_flags & IFF_PROMISC &&
1788 			    !(sc->xn_if_flags & IFF_PROMISC)) {
1789 				XN_SETBIT(sc, XN_RX_MODE,
1790 					  XN_RXMODE_RX_PROMISC);
1791 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1792 				   !(ifp->if_flags & IFF_PROMISC) &&
1793 				   sc->xn_if_flags & IFF_PROMISC) {
1794 				XN_CLRBIT(sc, XN_RX_MODE,
1795 					  XN_RXMODE_RX_PROMISC);
1796 			} else
1797 #endif
1798 				xn_ifinit_locked(sc);
1799 		} else {
1800 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1801 				xn_stop(sc);
1802 			}
1803 		}
1804 		sc->xn_if_flags = ifp->if_flags;
1805 		XN_UNLOCK(sc);
1806 		error = 0;
1807 		break;
1808 	case SIOCSIFCAP:
1809 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1810 		if (mask & IFCAP_TXCSUM) {
1811 			if (IFCAP_TXCSUM & ifp->if_capenable) {
1812 				ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
1813 				ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
1814 				    | CSUM_IP | CSUM_TSO);
1815 			} else {
1816 				ifp->if_capenable |= IFCAP_TXCSUM;
1817 				ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
1818 				    | CSUM_IP);
1819 			}
1820 		}
1821 		if (mask & IFCAP_RXCSUM) {
1822 			ifp->if_capenable ^= IFCAP_RXCSUM;
1823 		}
1824 #if __FreeBSD_version >= 700000
1825 		if (mask & IFCAP_TSO4) {
1826 			if (IFCAP_TSO4 & ifp->if_capenable) {
1827 				ifp->if_capenable &= ~IFCAP_TSO4;
1828 				ifp->if_hwassist &= ~CSUM_TSO;
1829 			} else if (IFCAP_TXCSUM & ifp->if_capenable) {
1830 				ifp->if_capenable |= IFCAP_TSO4;
1831 				ifp->if_hwassist |= CSUM_TSO;
1832 			} else {
1833 				IPRINTK("Xen requires tx checksum offload"
1834 				    " be enabled to use TSO\n");
1835 				error = EINVAL;
1836 			}
1837 		}
1838 		if (mask & IFCAP_LRO) {
1839 			ifp->if_capenable ^= IFCAP_LRO;
1840 
1841 		}
1842 #endif
1843 		error = 0;
1844 		break;
1845 	case SIOCADDMULTI:
1846 	case SIOCDELMULTI:
1847 #ifdef notyet
1848 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1849 			XN_LOCK(sc);
1850 			xn_setmulti(sc);
1851 			XN_UNLOCK(sc);
1852 			error = 0;
1853 		}
1854 #endif
1855 		/* FALLTHROUGH */
1856 	case SIOCSIFMEDIA:
1857 	case SIOCGIFMEDIA:
1858 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1859 		break;
1860 	default:
1861 		error = ether_ioctl(ifp, cmd, data);
1862 	}
1863 
1864 	return (error);
1865 }
1866 
1867 static void
1868 xn_stop(struct netfront_info *sc)
1869 {
1870 	struct ifnet *ifp;
1871 
1872 	XN_LOCK_ASSERT(sc);
1873 
1874 	ifp = sc->xn_ifp;
1875 
1876 	callout_stop(&sc->xn_stat_ch);
1877 
1878 	xn_free_rx_ring(sc);
1879 	xn_free_tx_ring(sc);
1880 
1881 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1882 	if_link_state_change(ifp, LINK_STATE_DOWN);
1883 }
1884 
1885 /* START of Xenolinux helper functions adapted to FreeBSD */
1886 int
1887 network_connect(struct netfront_info *np)
1888 {
1889 	int i, requeue_idx, error;
1890 	grant_ref_t ref;
1891 	netif_rx_request_t *req;
1892 	u_int feature_rx_copy, feature_rx_flip;
1893 
1894 	error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1895 	    "feature-rx-copy", NULL, "%u", &feature_rx_copy);
1896 	if (error)
1897 		feature_rx_copy = 0;
1898 	error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1899 	    "feature-rx-flip", NULL, "%u", &feature_rx_flip);
1900 	if (error)
1901 		feature_rx_flip = 1;
1902 
1903 	/*
1904 	 * Copy packets on receive path if:
1905 	 *  (a) This was requested by user, and the backend supports it; or
1906 	 *  (b) Flipping was requested, but this is unsupported by the backend.
1907 	 */
1908 	np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
1909 				(MODPARM_rx_flip && !feature_rx_flip));
1910 
1911 	/* Recovery procedure: */
1912 	error = talk_to_backend(np->xbdev, np);
1913 	if (error)
1914 		return (error);
1915 
1916 	/* Step 1: Reinitialise variables. */
1917 	xn_query_features(np);
1918 	xn_configure_features(np);
1919 	netif_release_tx_bufs(np);
1920 
1921 	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1922 	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1923 		struct mbuf *m;
1924 		u_long pfn;
1925 
1926 		if (np->rx_mbufs[i] == NULL)
1927 			continue;
1928 
1929 		m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i);
1930 		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1931 
1932 		req = RING_GET_REQUEST(&np->rx, requeue_idx);
1933 		pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
1934 
1935 		if (!np->copying_receiver) {
1936 			gnttab_grant_foreign_transfer_ref(ref,
1937 			    xenbus_get_otherend_id(np->xbdev),
1938 			    pfn);
1939 		} else {
1940 			gnttab_grant_foreign_access_ref(ref,
1941 			    xenbus_get_otherend_id(np->xbdev),
1942 			    PFNTOMFN(pfn), 0);
1943 		}
1944 		req->gref = ref;
1945 		req->id   = requeue_idx;
1946 
1947 		requeue_idx++;
1948 	}
1949 
1950 	np->rx.req_prod_pvt = requeue_idx;
1951 
1952 	/* Step 3: All public and private state should now be sane.  Get
1953 	 * ready to start sending and receiving packets and give the driver
1954 	 * domain a kick because we've probably just requeued some
1955 	 * packets.
1956 	 */
1957 	netfront_carrier_on(np);
1958 	xen_intr_signal(np->xen_intr_handle);
1959 	XN_TX_LOCK(np);
1960 	xn_txeof(np);
1961 	XN_TX_UNLOCK(np);
1962 	network_alloc_rx_buffers(np);
1963 
1964 	return (0);
1965 }
1966 
1967 static void
1968 xn_query_features(struct netfront_info *np)
1969 {
1970 	int val;
1971 
1972 	device_printf(np->xbdev, "backend features:");
1973 
1974 	if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1975 		"feature-sg", NULL, "%d", &val) < 0)
1976 		val = 0;
1977 
1978 	np->maxfrags = 1;
1979 	if (val) {
1980 		np->maxfrags = MAX_TX_REQ_FRAGS;
1981 		printf(" feature-sg");
1982 	}
1983 
1984 	if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1985 		"feature-gso-tcpv4", NULL, "%d", &val) < 0)
1986 		val = 0;
1987 
1988 	np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO);
1989 	if (val) {
1990 		np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO;
1991 		printf(" feature-gso-tcp4");
1992 	}
1993 
1994 	printf("\n");
1995 }
1996 
1997 static int
1998 xn_configure_features(struct netfront_info *np)
1999 {
2000 	int err;
2001 
2002 	err = 0;
2003 #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
2004 	if ((np->xn_ifp->if_capenable & IFCAP_LRO) != 0)
2005 		tcp_lro_free(&np->xn_lro);
2006 #endif
2007     	np->xn_ifp->if_capenable =
2008 	    np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4);
2009 	np->xn_ifp->if_hwassist &= ~CSUM_TSO;
2010 #if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
2011 	if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) != 0) {
2012 		err = tcp_lro_init(&np->xn_lro);
2013 		if (err) {
2014 			device_printf(np->xbdev, "LRO initialization failed\n");
2015 		} else {
2016 			np->xn_lro.ifp = np->xn_ifp;
2017 			np->xn_ifp->if_capenable |= IFCAP_LRO;
2018 		}
2019 	}
2020 	if ((np->xn_ifp->if_capabilities & IFCAP_TSO4) != 0) {
2021 		np->xn_ifp->if_capenable |= IFCAP_TSO4;
2022 		np->xn_ifp->if_hwassist |= CSUM_TSO;
2023 	}
2024 #endif
2025 	return (err);
2026 }
2027 
2028 /**
2029  * Create a network device.
2030  * @param dev  Newbus device representing this virtual NIC.
2031  */
2032 int
2033 create_netdev(device_t dev)
2034 {
2035 	int i;
2036 	struct netfront_info *np;
2037 	int err;
2038 	struct ifnet *ifp;
2039 
2040 	np = device_get_softc(dev);
2041 
2042 	np->xbdev         = dev;
2043 
2044 	XN_LOCK_INIT(np, xennetif);
2045 
2046 	ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
2047 	ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
2048 	ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
2049 
2050 	np->rx_target     = RX_MIN_TARGET;
2051 	np->rx_min_target = RX_MIN_TARGET;
2052 	np->rx_max_target = RX_MAX_TARGET;
2053 
2054 	/* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
2055 	for (i = 0; i <= NET_TX_RING_SIZE; i++) {
2056 		np->tx_mbufs[i] = (void *) ((u_long) i+1);
2057 		np->grant_tx_ref[i] = GRANT_REF_INVALID;
2058 	}
2059 	np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0;
2060 
2061 	for (i = 0; i <= NET_RX_RING_SIZE; i++) {
2062 
2063 		np->rx_mbufs[i] = NULL;
2064 		np->grant_rx_ref[i] = GRANT_REF_INVALID;
2065 	}
2066 	/* A grant for every tx ring slot */
2067 	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2068 					  &np->gref_tx_head) != 0) {
2069 		IPRINTK("#### netfront can't alloc tx grant refs\n");
2070 		err = ENOMEM;
2071 		goto exit;
2072 	}
2073 	/* A grant for every rx ring slot */
2074 	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
2075 					  &np->gref_rx_head) != 0) {
2076 		WPRINTK("#### netfront can't alloc rx grant refs\n");
2077 		gnttab_free_grant_references(np->gref_tx_head);
2078 		err = ENOMEM;
2079 		goto exit;
2080 	}
2081 
2082 	err = xen_net_read_mac(dev, np->mac);
2083 	if (err)
2084 		goto out;
2085 
2086 	/* Set up ifnet structure */
2087 	ifp = np->xn_ifp = if_alloc(IFT_ETHER);
2088     	ifp->if_softc = np;
2089     	if_initname(ifp, "xn",  device_get_unit(dev));
2090     	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2091     	ifp->if_ioctl = xn_ioctl;
2092     	ifp->if_output = ether_output;
2093     	ifp->if_start = xn_start;
2094 #ifdef notyet
2095     	ifp->if_watchdog = xn_watchdog;
2096 #endif
2097     	ifp->if_init = xn_ifinit;
2098     	ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1;
2099 
2100     	ifp->if_hwassist = XN_CSUM_FEATURES;
2101     	ifp->if_capabilities = IFCAP_HWCSUM;
2102 	ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2103 	ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS;
2104 	ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
2105 
2106     	ether_ifattach(ifp, np->mac);
2107     	callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE);
2108 	netfront_carrier_off(np);
2109 
2110 	return (0);
2111 
2112 exit:
2113 	gnttab_free_grant_references(np->gref_tx_head);
2114 out:
2115 	return (err);
2116 }
2117 
2118 /**
2119  * Handle the change of state of the backend to Closing.  We must delete our
2120  * device-layer structures now, to ensure that writes are flushed through to
2121  * the backend.  Once is this done, we can switch to Closed in
2122  * acknowledgement.
2123  */
2124 #if 0
2125 static void
2126 netfront_closing(device_t dev)
2127 {
2128 #if 0
2129 	struct netfront_info *info = dev->dev_driver_data;
2130 
2131 	DPRINTK("netfront_closing: %s removed\n", dev->nodename);
2132 
2133 	close_netdev(info);
2134 #endif
2135 	xenbus_switch_state(dev, XenbusStateClosed);
2136 }
2137 #endif
2138 
2139 static int
2140 netfront_detach(device_t dev)
2141 {
2142 	struct netfront_info *info = device_get_softc(dev);
2143 
2144 	DPRINTK("%s\n", xenbus_get_node(dev));
2145 
2146 	netif_free(info);
2147 
2148 	return 0;
2149 }
2150 
2151 static void
2152 netif_free(struct netfront_info *info)
2153 {
2154 	XN_LOCK(info);
2155 	xn_stop(info);
2156 	XN_UNLOCK(info);
2157 	callout_drain(&info->xn_stat_ch);
2158 	netif_disconnect_backend(info);
2159 	if (info->xn_ifp != NULL) {
2160 		ether_ifdetach(info->xn_ifp);
2161 		if_free(info->xn_ifp);
2162 		info->xn_ifp = NULL;
2163 	}
2164 	ifmedia_removeall(&info->sc_media);
2165 }
2166 
2167 static void
2168 netif_disconnect_backend(struct netfront_info *info)
2169 {
2170 	XN_RX_LOCK(info);
2171 	XN_TX_LOCK(info);
2172 	netfront_carrier_off(info);
2173 	XN_TX_UNLOCK(info);
2174 	XN_RX_UNLOCK(info);
2175 
2176 	free_ring(&info->tx_ring_ref, &info->tx.sring);
2177 	free_ring(&info->rx_ring_ref, &info->rx.sring);
2178 
2179 	xen_intr_unbind(&info->xen_intr_handle);
2180 }
2181 
2182 static void
2183 free_ring(int *ref, void *ring_ptr_ref)
2184 {
2185 	void **ring_ptr_ptr = ring_ptr_ref;
2186 
2187 	if (*ref != GRANT_REF_INVALID) {
2188 		/* This API frees the associated storage. */
2189 		gnttab_end_foreign_access(*ref, *ring_ptr_ptr);
2190 		*ref = GRANT_REF_INVALID;
2191 	}
2192 	*ring_ptr_ptr = NULL;
2193 }
2194 
2195 static int
2196 xn_ifmedia_upd(struct ifnet *ifp)
2197 {
2198 	return (0);
2199 }
2200 
2201 static void
2202 xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2203 {
2204 	ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2205 	ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2206 }
2207 
2208 /* ** Driver registration ** */
2209 static device_method_t netfront_methods[] = {
2210 	/* Device interface */
2211 	DEVMETHOD(device_probe,         netfront_probe),
2212 	DEVMETHOD(device_attach,        netfront_attach),
2213 	DEVMETHOD(device_detach,        netfront_detach),
2214 	DEVMETHOD(device_shutdown,      bus_generic_shutdown),
2215 	DEVMETHOD(device_suspend,       netfront_suspend),
2216 	DEVMETHOD(device_resume,        netfront_resume),
2217 
2218 	/* Xenbus interface */
2219 	DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed),
2220 
2221 	DEVMETHOD_END
2222 };
2223 
2224 static driver_t netfront_driver = {
2225 	"xn",
2226 	netfront_methods,
2227 	sizeof(struct netfront_info),
2228 };
2229 devclass_t netfront_devclass;
2230 
2231 DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL,
2232     NULL);
2233