xref: /freebsd/sys/dev/xen/netfront/netfront.c (revision cf9c09e1f38c9878a2414a5262c65ba1fe4caf79)
18e0ad55aSJoel Dahl /*-
289e0f4d2SKip Macy  * Copyright (c) 2004-2006 Kip Macy
389e0f4d2SKip Macy  * All rights reserved.
489e0f4d2SKip Macy  *
58e0ad55aSJoel Dahl  * Redistribution and use in source and binary forms, with or without
68e0ad55aSJoel Dahl  * modification, are permitted provided that the following conditions
78e0ad55aSJoel Dahl  * are met:
88e0ad55aSJoel Dahl  * 1. Redistributions of source code must retain the above copyright
98e0ad55aSJoel Dahl  *    notice, this list of conditions and the following disclaimer.
108e0ad55aSJoel Dahl  * 2. Redistributions in binary form must reproduce the above copyright
118e0ad55aSJoel Dahl  *    notice, this list of conditions and the following disclaimer in the
128e0ad55aSJoel Dahl  *    documentation and/or other materials provided with the distribution.
1389e0f4d2SKip Macy  *
148e0ad55aSJoel Dahl  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
158e0ad55aSJoel Dahl  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
168e0ad55aSJoel Dahl  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
178e0ad55aSJoel Dahl  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
188e0ad55aSJoel Dahl  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
198e0ad55aSJoel Dahl  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
208e0ad55aSJoel Dahl  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
218e0ad55aSJoel Dahl  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
228e0ad55aSJoel Dahl  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
238e0ad55aSJoel Dahl  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
248e0ad55aSJoel Dahl  * SUCH DAMAGE.
2589e0f4d2SKip Macy  */
2689e0f4d2SKip Macy 
2789e0f4d2SKip Macy 
2889e0f4d2SKip Macy #include <sys/cdefs.h>
2989e0f4d2SKip Macy __FBSDID("$FreeBSD$");
3089e0f4d2SKip Macy 
31a0ae8f04SBjoern A. Zeeb #include "opt_inet.h"
32a0ae8f04SBjoern A. Zeeb 
3389e0f4d2SKip Macy #include <sys/param.h>
3489e0f4d2SKip Macy #include <sys/systm.h>
3589e0f4d2SKip Macy #include <sys/sockio.h>
3689e0f4d2SKip Macy #include <sys/mbuf.h>
3789e0f4d2SKip Macy #include <sys/malloc.h>
3823dc5621SKip Macy #include <sys/module.h>
3989e0f4d2SKip Macy #include <sys/kernel.h>
4089e0f4d2SKip Macy #include <sys/socket.h>
4112678024SDoug Rabson #include <sys/sysctl.h>
4289e0f4d2SKip Macy #include <sys/queue.h>
438cb07992SAdrian Chadd #include <sys/lock.h>
4489e0f4d2SKip Macy #include <sys/sx.h>
4589e0f4d2SKip Macy 
4689e0f4d2SKip Macy #include <net/if.h>
4789e0f4d2SKip Macy #include <net/if_arp.h>
4889e0f4d2SKip Macy #include <net/ethernet.h>
4989e0f4d2SKip Macy #include <net/if_dl.h>
5089e0f4d2SKip Macy #include <net/if_media.h>
5189e0f4d2SKip Macy 
5289e0f4d2SKip Macy #include <net/bpf.h>
5389e0f4d2SKip Macy 
5489e0f4d2SKip Macy #include <net/if_types.h>
5589e0f4d2SKip Macy #include <net/if.h>
5689e0f4d2SKip Macy 
5789e0f4d2SKip Macy #include <netinet/in_systm.h>
5889e0f4d2SKip Macy #include <netinet/in.h>
5989e0f4d2SKip Macy #include <netinet/ip.h>
6089e0f4d2SKip Macy #include <netinet/if_ether.h>
6112678024SDoug Rabson #if __FreeBSD_version >= 700000
6212678024SDoug Rabson #include <netinet/tcp.h>
6312678024SDoug Rabson #include <netinet/tcp_lro.h>
6412678024SDoug Rabson #endif
6589e0f4d2SKip Macy 
6689e0f4d2SKip Macy #include <vm/vm.h>
6789e0f4d2SKip Macy #include <vm/pmap.h>
6889e0f4d2SKip Macy 
6989e0f4d2SKip Macy #include <machine/clock.h>      /* for DELAY */
7089e0f4d2SKip Macy #include <machine/bus.h>
7189e0f4d2SKip Macy #include <machine/resource.h>
7289e0f4d2SKip Macy #include <machine/frame.h>
73980c7178SKip Macy #include <machine/vmparam.h>
7489e0f4d2SKip Macy 
7589e0f4d2SKip Macy #include <sys/bus.h>
7689e0f4d2SKip Macy #include <sys/rman.h>
7789e0f4d2SKip Macy 
7889e0f4d2SKip Macy #include <machine/intr_machdep.h>
7989e0f4d2SKip Macy 
8089e0f4d2SKip Macy #include <machine/xen/xen-os.h>
8112678024SDoug Rabson #include <machine/xen/xenfunc.h>
822913e88cSRobert Watson #include <machine/xen/xenvar.h>
833a6d1fcfSKip Macy #include <xen/hypervisor.h>
843a6d1fcfSKip Macy #include <xen/xen_intr.h>
853a6d1fcfSKip Macy #include <xen/evtchn.h>
8689e0f4d2SKip Macy #include <xen/gnttab.h>
8789e0f4d2SKip Macy #include <xen/interface/memory.h>
8889e0f4d2SKip Macy #include <xen/interface/io/netif.h>
8923dc5621SKip Macy #include <xen/xenbus/xenbusvar.h>
9089e0f4d2SKip Macy 
9112678024SDoug Rabson #include <dev/xen/netfront/mbufq.h>
9212678024SDoug Rabson 
9323dc5621SKip Macy #include "xenbus_if.h"
9489e0f4d2SKip Macy 
95931eeffaSKenneth D. Merry #define XN_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP | CSUM_TSO)
9612678024SDoug Rabson 
9789e0f4d2SKip Macy #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
9889e0f4d2SKip Macy #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
9989e0f4d2SKip Macy 
10012678024SDoug Rabson #if __FreeBSD_version >= 700000
10112678024SDoug Rabson /*
10212678024SDoug Rabson  * Should the driver do LRO on the RX end
10312678024SDoug Rabson  *  this can be toggled on the fly, but the
10412678024SDoug Rabson  *  interface must be reset (down/up) for it
10512678024SDoug Rabson  *  to take effect.
10612678024SDoug Rabson  */
10712678024SDoug Rabson static int xn_enable_lro = 1;
10812678024SDoug Rabson TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro);
10912678024SDoug Rabson #else
11012678024SDoug Rabson 
11112678024SDoug Rabson #define IFCAP_TSO4	0
11212678024SDoug Rabson #define CSUM_TSO	0
11312678024SDoug Rabson 
11412678024SDoug Rabson #endif
11512678024SDoug Rabson 
11689e0f4d2SKip Macy #ifdef CONFIG_XEN
11789e0f4d2SKip Macy static int MODPARM_rx_copy = 0;
11889e0f4d2SKip Macy module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
11989e0f4d2SKip Macy MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
12089e0f4d2SKip Macy static int MODPARM_rx_flip = 0;
12189e0f4d2SKip Macy module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
12289e0f4d2SKip Macy MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
12389e0f4d2SKip Macy #else
12489e0f4d2SKip Macy static const int MODPARM_rx_copy = 1;
12589e0f4d2SKip Macy static const int MODPARM_rx_flip = 0;
12689e0f4d2SKip Macy #endif
12789e0f4d2SKip Macy 
128931eeffaSKenneth D. Merry /**
129931eeffaSKenneth D. Merry  * \brief The maximum allowed data fragments in a single transmit
130931eeffaSKenneth D. Merry  *        request.
131931eeffaSKenneth D. Merry  *
132931eeffaSKenneth D. Merry  * This limit is imposed by the backend driver.  We assume here that
133931eeffaSKenneth D. Merry  * we are dealing with a Linux driver domain and have set our limit
134931eeffaSKenneth D. Merry  * to mirror the Linux MAX_SKB_FRAGS constant.
135931eeffaSKenneth D. Merry  */
136931eeffaSKenneth D. Merry #define	MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2)
137931eeffaSKenneth D. Merry 
13889e0f4d2SKip Macy #define RX_COPY_THRESHOLD 256
13989e0f4d2SKip Macy 
14089e0f4d2SKip Macy #define net_ratelimit() 0
14189e0f4d2SKip Macy 
14289e0f4d2SKip Macy struct netfront_info;
14389e0f4d2SKip Macy struct netfront_rx_info;
14489e0f4d2SKip Macy 
14589e0f4d2SKip Macy static void xn_txeof(struct netfront_info *);
14689e0f4d2SKip Macy static void xn_rxeof(struct netfront_info *);
14789e0f4d2SKip Macy static void network_alloc_rx_buffers(struct netfront_info *);
14889e0f4d2SKip Macy 
14989e0f4d2SKip Macy static void xn_tick_locked(struct netfront_info *);
15089e0f4d2SKip Macy static void xn_tick(void *);
15189e0f4d2SKip Macy 
15289e0f4d2SKip Macy static void xn_intr(void *);
153931eeffaSKenneth D. Merry static inline int xn_count_frags(struct mbuf *m);
154931eeffaSKenneth D. Merry static int  xn_assemble_tx_request(struct netfront_info *sc,
155931eeffaSKenneth D. Merry 				   struct mbuf *m_head);
15689e0f4d2SKip Macy static void xn_start_locked(struct ifnet *);
15789e0f4d2SKip Macy static void xn_start(struct ifnet *);
15889e0f4d2SKip Macy static int  xn_ioctl(struct ifnet *, u_long, caddr_t);
15989e0f4d2SKip Macy static void xn_ifinit_locked(struct netfront_info *);
16089e0f4d2SKip Macy static void xn_ifinit(void *);
16189e0f4d2SKip Macy static void xn_stop(struct netfront_info *);
162*cf9c09e1SJustin T. Gibbs static int xn_configure_lro(struct netfront_info *np);
16389e0f4d2SKip Macy #ifdef notyet
16489e0f4d2SKip Macy static void xn_watchdog(struct ifnet *);
16589e0f4d2SKip Macy #endif
16689e0f4d2SKip Macy 
16789e0f4d2SKip Macy static void show_device(struct netfront_info *sc);
16889e0f4d2SKip Macy #ifdef notyet
16923dc5621SKip Macy static void netfront_closing(device_t dev);
17089e0f4d2SKip Macy #endif
17189e0f4d2SKip Macy static void netif_free(struct netfront_info *info);
17223dc5621SKip Macy static int netfront_detach(device_t dev);
17389e0f4d2SKip Macy 
17423dc5621SKip Macy static int talk_to_backend(device_t dev, struct netfront_info *info);
17523dc5621SKip Macy static int create_netdev(device_t dev);
17689e0f4d2SKip Macy static void netif_disconnect_backend(struct netfront_info *info);
17723dc5621SKip Macy static int setup_device(device_t dev, struct netfront_info *info);
178*cf9c09e1SJustin T. Gibbs static void free_ring(int *ref, void *ring_ptr_ref);
17989e0f4d2SKip Macy 
1800e509842SJustin T. Gibbs static int  xn_ifmedia_upd(struct ifnet *ifp);
1810e509842SJustin T. Gibbs static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
1820e509842SJustin T. Gibbs 
18389e0f4d2SKip Macy /* Xenolinux helper functions */
18423dc5621SKip Macy int network_connect(struct netfront_info *);
18589e0f4d2SKip Macy 
18689e0f4d2SKip Macy static void xn_free_rx_ring(struct netfront_info *);
18789e0f4d2SKip Macy 
18889e0f4d2SKip Macy static void xn_free_tx_ring(struct netfront_info *);
18989e0f4d2SKip Macy 
19089e0f4d2SKip Macy static int xennet_get_responses(struct netfront_info *np,
191931eeffaSKenneth D. Merry 	struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
192931eeffaSKenneth D. Merry 	struct mbuf **list, int *pages_flipped_p);
19389e0f4d2SKip Macy 
19489e0f4d2SKip Macy #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT)
19589e0f4d2SKip Macy 
19689e0f4d2SKip Macy #define INVALID_P2M_ENTRY (~0UL)
19789e0f4d2SKip Macy 
19889e0f4d2SKip Macy /*
19989e0f4d2SKip Macy  * Mbuf pointers. We need these to keep track of the virtual addresses
20089e0f4d2SKip Macy  * of our mbuf chains since we can only convert from virtual to physical,
20189e0f4d2SKip Macy  * not the other way around.  The size must track the free index arrays.
20289e0f4d2SKip Macy  */
20389e0f4d2SKip Macy struct xn_chain_data {
20489e0f4d2SKip Macy 	struct mbuf    *xn_tx_chain[NET_TX_RING_SIZE+1];
205a4ec37f5SAdrian Chadd 	int		xn_tx_chain_cnt;
20689e0f4d2SKip Macy 	struct mbuf    *xn_rx_chain[NET_RX_RING_SIZE+1];
20789e0f4d2SKip Macy };
20889e0f4d2SKip Macy 
209931eeffaSKenneth D. Merry #define NUM_ELEMENTS(x) (sizeof(x)/sizeof(*x))
21089e0f4d2SKip Macy 
21189e0f4d2SKip Macy struct net_device_stats
21289e0f4d2SKip Macy {
21389e0f4d2SKip Macy 	u_long	rx_packets;		/* total packets received	*/
21489e0f4d2SKip Macy 	u_long	tx_packets;		/* total packets transmitted	*/
21589e0f4d2SKip Macy 	u_long	rx_bytes;		/* total bytes received 	*/
21689e0f4d2SKip Macy 	u_long	tx_bytes;		/* total bytes transmitted	*/
21789e0f4d2SKip Macy 	u_long	rx_errors;		/* bad packets received		*/
21889e0f4d2SKip Macy 	u_long	tx_errors;		/* packet transmit problems	*/
21989e0f4d2SKip Macy 	u_long	rx_dropped;		/* no space in linux buffers	*/
22089e0f4d2SKip Macy 	u_long	tx_dropped;		/* no space available in linux	*/
22189e0f4d2SKip Macy 	u_long	multicast;		/* multicast packets received	*/
22289e0f4d2SKip Macy 	u_long	collisions;
22389e0f4d2SKip Macy 
22489e0f4d2SKip Macy 	/* detailed rx_errors: */
22589e0f4d2SKip Macy 	u_long	rx_length_errors;
22689e0f4d2SKip Macy 	u_long	rx_over_errors;		/* receiver ring buff overflow	*/
22789e0f4d2SKip Macy 	u_long	rx_crc_errors;		/* recved pkt with crc error	*/
22889e0f4d2SKip Macy 	u_long	rx_frame_errors;	/* recv'd frame alignment error */
22989e0f4d2SKip Macy 	u_long	rx_fifo_errors;		/* recv'r fifo overrun		*/
23089e0f4d2SKip Macy 	u_long	rx_missed_errors;	/* receiver missed packet	*/
23189e0f4d2SKip Macy 
23289e0f4d2SKip Macy 	/* detailed tx_errors */
23389e0f4d2SKip Macy 	u_long	tx_aborted_errors;
23489e0f4d2SKip Macy 	u_long	tx_carrier_errors;
23589e0f4d2SKip Macy 	u_long	tx_fifo_errors;
23689e0f4d2SKip Macy 	u_long	tx_heartbeat_errors;
23789e0f4d2SKip Macy 	u_long	tx_window_errors;
23889e0f4d2SKip Macy 
23989e0f4d2SKip Macy 	/* for cslip etc */
24089e0f4d2SKip Macy 	u_long	rx_compressed;
24189e0f4d2SKip Macy 	u_long	tx_compressed;
24289e0f4d2SKip Macy };
24389e0f4d2SKip Macy 
24489e0f4d2SKip Macy struct netfront_info {
24589e0f4d2SKip Macy 
24689e0f4d2SKip Macy 	struct ifnet *xn_ifp;
24712678024SDoug Rabson #if __FreeBSD_version >= 700000
24812678024SDoug Rabson 	struct lro_ctrl xn_lro;
24912678024SDoug Rabson #endif
25089e0f4d2SKip Macy 
25189e0f4d2SKip Macy 	struct net_device_stats stats;
25289e0f4d2SKip Macy 	u_int tx_full;
25389e0f4d2SKip Macy 
25489e0f4d2SKip Macy 	netif_tx_front_ring_t tx;
25589e0f4d2SKip Macy 	netif_rx_front_ring_t rx;
25689e0f4d2SKip Macy 
25789e0f4d2SKip Macy 	struct mtx   tx_lock;
25889e0f4d2SKip Macy 	struct mtx   rx_lock;
259227ca257SKip Macy 	struct mtx   sc_lock;
26089e0f4d2SKip Macy 
26189e0f4d2SKip Macy 	u_int handle;
26289e0f4d2SKip Macy 	u_int irq;
26389e0f4d2SKip Macy 	u_int copying_receiver;
26489e0f4d2SKip Macy 	u_int carrier;
26589e0f4d2SKip Macy 
26689e0f4d2SKip Macy 	/* Receive-ring batched refills. */
26789e0f4d2SKip Macy #define RX_MIN_TARGET 32
26889e0f4d2SKip Macy #define RX_MAX_TARGET NET_RX_RING_SIZE
2690e509842SJustin T. Gibbs 	int rx_min_target;
2700e509842SJustin T. Gibbs 	int rx_max_target;
2710e509842SJustin T. Gibbs 	int rx_target;
27289e0f4d2SKip Macy 
27389e0f4d2SKip Macy 	grant_ref_t gref_tx_head;
27489e0f4d2SKip Macy 	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
27589e0f4d2SKip Macy 	grant_ref_t gref_rx_head;
27689e0f4d2SKip Macy 	grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
27789e0f4d2SKip Macy 
27823dc5621SKip Macy 	device_t		xbdev;
27989e0f4d2SKip Macy 	int			tx_ring_ref;
28089e0f4d2SKip Macy 	int			rx_ring_ref;
28189e0f4d2SKip Macy 	uint8_t			mac[ETHER_ADDR_LEN];
28289e0f4d2SKip Macy 	struct xn_chain_data	xn_cdata;	/* mbufs */
28389e0f4d2SKip Macy 	struct mbuf_head	xn_rx_batch;	/* head of the batch queue */
28489e0f4d2SKip Macy 
28589e0f4d2SKip Macy 	int			xn_if_flags;
28689e0f4d2SKip Macy 	struct callout	        xn_stat_ch;
28789e0f4d2SKip Macy 
28889e0f4d2SKip Macy 	u_long			rx_pfn_array[NET_RX_RING_SIZE];
28989e0f4d2SKip Macy 	multicall_entry_t	rx_mcl[NET_RX_RING_SIZE+1];
29089e0f4d2SKip Macy 	mmu_update_t		rx_mmu[NET_RX_RING_SIZE];
2910e509842SJustin T. Gibbs 	struct ifmedia		sc_media;
29289e0f4d2SKip Macy };
29389e0f4d2SKip Macy 
29489e0f4d2SKip Macy #define rx_mbufs xn_cdata.xn_rx_chain
29589e0f4d2SKip Macy #define tx_mbufs xn_cdata.xn_tx_chain
29689e0f4d2SKip Macy 
29789e0f4d2SKip Macy #define XN_LOCK_INIT(_sc, _name) \
29889e0f4d2SKip Macy         mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \
29989e0f4d2SKip Macy         mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF);  \
300227ca257SKip Macy         mtx_init(&(_sc)->sc_lock, #_name"_sc", "netfront softc lock", MTX_DEF)
30189e0f4d2SKip Macy 
30289e0f4d2SKip Macy #define XN_RX_LOCK(_sc)           mtx_lock(&(_sc)->rx_lock)
30389e0f4d2SKip Macy #define XN_RX_UNLOCK(_sc)         mtx_unlock(&(_sc)->rx_lock)
30489e0f4d2SKip Macy 
30589e0f4d2SKip Macy #define XN_TX_LOCK(_sc)           mtx_lock(&(_sc)->tx_lock)
30689e0f4d2SKip Macy #define XN_TX_UNLOCK(_sc)         mtx_unlock(&(_sc)->tx_lock)
30789e0f4d2SKip Macy 
308227ca257SKip Macy #define XN_LOCK(_sc)           mtx_lock(&(_sc)->sc_lock);
309227ca257SKip Macy #define XN_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_lock);
31089e0f4d2SKip Macy 
311227ca257SKip Macy #define XN_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->sc_lock, MA_OWNED);
31289e0f4d2SKip Macy #define XN_RX_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->rx_lock, MA_OWNED);
31389e0f4d2SKip Macy #define XN_TX_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->tx_lock, MA_OWNED);
31489e0f4d2SKip Macy #define XN_LOCK_DESTROY(_sc)   mtx_destroy(&(_sc)->rx_lock); \
31589e0f4d2SKip Macy                                mtx_destroy(&(_sc)->tx_lock); \
316227ca257SKip Macy                                mtx_destroy(&(_sc)->sc_lock);
31789e0f4d2SKip Macy 
31889e0f4d2SKip Macy struct netfront_rx_info {
31989e0f4d2SKip Macy 	struct netif_rx_response rx;
32089e0f4d2SKip Macy 	struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
32189e0f4d2SKip Macy };
32289e0f4d2SKip Macy 
32389e0f4d2SKip Macy #define netfront_carrier_on(netif)	((netif)->carrier = 1)
32489e0f4d2SKip Macy #define netfront_carrier_off(netif)	((netif)->carrier = 0)
32589e0f4d2SKip Macy #define netfront_carrier_ok(netif)	((netif)->carrier)
32689e0f4d2SKip Macy 
32789e0f4d2SKip Macy /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
32889e0f4d2SKip Macy 
32989e0f4d2SKip Macy 
33089e0f4d2SKip Macy 
33189e0f4d2SKip Macy /*
33289e0f4d2SKip Macy  * Access macros for acquiring freeing slots in tx_skbs[].
33389e0f4d2SKip Macy  */
33489e0f4d2SKip Macy 
33589e0f4d2SKip Macy static inline void
336931eeffaSKenneth D. Merry add_id_to_freelist(struct mbuf **list, uintptr_t id)
33789e0f4d2SKip Macy {
338931eeffaSKenneth D. Merry 	KASSERT(id != 0,
339931eeffaSKenneth D. Merry 		("%s: the head item (0) must always be free.", __func__));
34089e0f4d2SKip Macy 	list[id] = list[0];
341931eeffaSKenneth D. Merry 	list[0]  = (struct mbuf *)id;
34289e0f4d2SKip Macy }
34389e0f4d2SKip Macy 
34489e0f4d2SKip Macy static inline unsigned short
34589e0f4d2SKip Macy get_id_from_freelist(struct mbuf **list)
34689e0f4d2SKip Macy {
347931eeffaSKenneth D. Merry 	uintptr_t id;
348931eeffaSKenneth D. Merry 
349931eeffaSKenneth D. Merry 	id = (uintptr_t)list[0];
350931eeffaSKenneth D. Merry 	KASSERT(id != 0,
351931eeffaSKenneth D. Merry 		("%s: the head item (0) must always remain free.", __func__));
35289e0f4d2SKip Macy 	list[0] = list[id];
35389e0f4d2SKip Macy 	return (id);
35489e0f4d2SKip Macy }
35589e0f4d2SKip Macy 
35689e0f4d2SKip Macy static inline int
35789e0f4d2SKip Macy xennet_rxidx(RING_IDX idx)
35889e0f4d2SKip Macy {
35989e0f4d2SKip Macy 	return idx & (NET_RX_RING_SIZE - 1);
36089e0f4d2SKip Macy }
36189e0f4d2SKip Macy 
36289e0f4d2SKip Macy static inline struct mbuf *
363931eeffaSKenneth D. Merry xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri)
36489e0f4d2SKip Macy {
36589e0f4d2SKip Macy 	int i = xennet_rxidx(ri);
36689e0f4d2SKip Macy 	struct mbuf *m;
36789e0f4d2SKip Macy 
36889e0f4d2SKip Macy 	m = np->rx_mbufs[i];
36989e0f4d2SKip Macy 	np->rx_mbufs[i] = NULL;
37089e0f4d2SKip Macy 	return (m);
37189e0f4d2SKip Macy }
37289e0f4d2SKip Macy 
37389e0f4d2SKip Macy static inline grant_ref_t
37489e0f4d2SKip Macy xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri)
37589e0f4d2SKip Macy {
37689e0f4d2SKip Macy 	int i = xennet_rxidx(ri);
37789e0f4d2SKip Macy 	grant_ref_t ref = np->grant_rx_ref[i];
378ff662b5cSJustin T. Gibbs 	KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n"));
379ff662b5cSJustin T. Gibbs 	np->grant_rx_ref[i] = GRANT_REF_INVALID;
38089e0f4d2SKip Macy 	return ref;
38189e0f4d2SKip Macy }
38289e0f4d2SKip Macy 
38389e0f4d2SKip Macy #define IPRINTK(fmt, args...) \
38489e0f4d2SKip Macy     printf("[XEN] " fmt, ##args)
385227ca257SKip Macy #ifdef INVARIANTS
38689e0f4d2SKip Macy #define WPRINTK(fmt, args...) \
38789e0f4d2SKip Macy     printf("[XEN] " fmt, ##args)
388227ca257SKip Macy #else
389227ca257SKip Macy #define WPRINTK(fmt, args...)
390227ca257SKip Macy #endif
391227ca257SKip Macy #ifdef DEBUG
39289e0f4d2SKip Macy #define DPRINTK(fmt, args...) \
39323dc5621SKip Macy     printf("[XEN] %s: " fmt, __func__, ##args)
39412678024SDoug Rabson #else
39512678024SDoug Rabson #define DPRINTK(fmt, args...)
39612678024SDoug Rabson #endif
39789e0f4d2SKip Macy 
39889e0f4d2SKip Macy /**
39989e0f4d2SKip Macy  * Read the 'mac' node at the given device's node in the store, and parse that
40089e0f4d2SKip Macy  * as colon-separated octets, placing result the given mac array.  mac must be
40189e0f4d2SKip Macy  * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
40289e0f4d2SKip Macy  * Return 0 on success, or errno on error.
40389e0f4d2SKip Macy  */
40489e0f4d2SKip Macy static int
40523dc5621SKip Macy xen_net_read_mac(device_t dev, uint8_t mac[])
40689e0f4d2SKip Macy {
4073a6d1fcfSKip Macy 	int error, i;
4083a6d1fcfSKip Macy 	char *s, *e, *macstr;
4093a6d1fcfSKip Macy 
410ff662b5cSJustin T. Gibbs 	error = xs_read(XST_NIL, xenbus_get_node(dev), "mac", NULL,
4113a6d1fcfSKip Macy 	    (void **) &macstr);
4123a6d1fcfSKip Macy 	if (error)
4133a6d1fcfSKip Macy 		return (error);
4143a6d1fcfSKip Macy 
41589e0f4d2SKip Macy 	s = macstr;
41689e0f4d2SKip Macy 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
41789e0f4d2SKip Macy 		mac[i] = strtoul(s, &e, 16);
41889e0f4d2SKip Macy 		if (s == e || (e[0] != ':' && e[0] != 0)) {
419ff662b5cSJustin T. Gibbs 			free(macstr, M_XENBUS);
4203a6d1fcfSKip Macy 			return (ENOENT);
42189e0f4d2SKip Macy 		}
42289e0f4d2SKip Macy 		s = &e[1];
42389e0f4d2SKip Macy 	}
424ff662b5cSJustin T. Gibbs 	free(macstr, M_XENBUS);
4253a6d1fcfSKip Macy 	return (0);
42689e0f4d2SKip Macy }
42789e0f4d2SKip Macy 
42889e0f4d2SKip Macy /**
42989e0f4d2SKip Macy  * Entry point to this code when a new device is created.  Allocate the basic
43089e0f4d2SKip Macy  * structures and the ring buffers for communication with the backend, and
43189e0f4d2SKip Macy  * inform the backend of the appropriate details for those.  Switch to
43289e0f4d2SKip Macy  * Connected state.
43389e0f4d2SKip Macy  */
43489e0f4d2SKip Macy static int
43523dc5621SKip Macy netfront_probe(device_t dev)
43623dc5621SKip Macy {
43723dc5621SKip Macy 
43823dc5621SKip Macy 	if (!strcmp(xenbus_get_type(dev), "vif")) {
43923dc5621SKip Macy 		device_set_desc(dev, "Virtual Network Interface");
44023dc5621SKip Macy 		return (0);
44123dc5621SKip Macy 	}
44223dc5621SKip Macy 
44323dc5621SKip Macy 	return (ENXIO);
44423dc5621SKip Macy }
44523dc5621SKip Macy 
44623dc5621SKip Macy static int
44723dc5621SKip Macy netfront_attach(device_t dev)
44889e0f4d2SKip Macy {
44989e0f4d2SKip Macy 	int err;
45089e0f4d2SKip Macy 
45123dc5621SKip Macy 	err = create_netdev(dev);
45289e0f4d2SKip Macy 	if (err) {
45389e0f4d2SKip Macy 		xenbus_dev_fatal(dev, err, "creating netdev");
45489e0f4d2SKip Macy 		return err;
45589e0f4d2SKip Macy 	}
45689e0f4d2SKip Macy 
45712678024SDoug Rabson #if __FreeBSD_version >= 700000
45812678024SDoug Rabson 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
45912678024SDoug Rabson 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
46012678024SDoug Rabson 	    OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW,
46112678024SDoug Rabson 	    &xn_enable_lro, 0, "Large Receive Offload");
46212678024SDoug Rabson #endif
46312678024SDoug Rabson 
46489e0f4d2SKip Macy 	return 0;
46589e0f4d2SKip Macy }
46689e0f4d2SKip Macy 
467*cf9c09e1SJustin T. Gibbs static int
468*cf9c09e1SJustin T. Gibbs netfront_suspend(device_t dev)
469*cf9c09e1SJustin T. Gibbs {
470*cf9c09e1SJustin T. Gibbs 	struct netfront_info *info = device_get_softc(dev);
471*cf9c09e1SJustin T. Gibbs 
472*cf9c09e1SJustin T. Gibbs 	XN_RX_LOCK(info);
473*cf9c09e1SJustin T. Gibbs 	XN_TX_LOCK(info);
474*cf9c09e1SJustin T. Gibbs 	netfront_carrier_off(info);
475*cf9c09e1SJustin T. Gibbs 	XN_TX_UNLOCK(info);
476*cf9c09e1SJustin T. Gibbs 	XN_RX_UNLOCK(info);
477*cf9c09e1SJustin T. Gibbs 	return (0);
478*cf9c09e1SJustin T. Gibbs }
47989e0f4d2SKip Macy 
48089e0f4d2SKip Macy /**
48189e0f4d2SKip Macy  * We are reconnecting to the backend, due to a suspend/resume, or a backend
48289e0f4d2SKip Macy  * driver restart.  We tear down our netif structure and recreate it, but
48389e0f4d2SKip Macy  * leave the device-layer structures intact so that this is transparent to the
48489e0f4d2SKip Macy  * rest of the kernel.
48589e0f4d2SKip Macy  */
48689e0f4d2SKip Macy static int
48723dc5621SKip Macy netfront_resume(device_t dev)
48889e0f4d2SKip Macy {
48923dc5621SKip Macy 	struct netfront_info *info = device_get_softc(dev);
49089e0f4d2SKip Macy 
49189e0f4d2SKip Macy 	netif_disconnect_backend(info);
49289e0f4d2SKip Macy 	return (0);
49389e0f4d2SKip Macy }
49489e0f4d2SKip Macy 
49589e0f4d2SKip Macy 
49689e0f4d2SKip Macy /* Common code used when first setting up, and when resuming. */
49789e0f4d2SKip Macy static int
49823dc5621SKip Macy talk_to_backend(device_t dev, struct netfront_info *info)
49989e0f4d2SKip Macy {
50089e0f4d2SKip Macy 	const char *message;
501ff662b5cSJustin T. Gibbs 	struct xs_transaction xst;
50223dc5621SKip Macy 	const char *node = xenbus_get_node(dev);
50389e0f4d2SKip Macy 	int err;
50489e0f4d2SKip Macy 
50589e0f4d2SKip Macy 	err = xen_net_read_mac(dev, info->mac);
50689e0f4d2SKip Macy 	if (err) {
50723dc5621SKip Macy 		xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
50889e0f4d2SKip Macy 		goto out;
50989e0f4d2SKip Macy 	}
51089e0f4d2SKip Macy 
51189e0f4d2SKip Macy 	/* Create shared ring, alloc event channel. */
51289e0f4d2SKip Macy 	err = setup_device(dev, info);
51389e0f4d2SKip Macy 	if (err)
51489e0f4d2SKip Macy 		goto out;
51589e0f4d2SKip Macy 
51689e0f4d2SKip Macy  again:
517ff662b5cSJustin T. Gibbs 	err = xs_transaction_start(&xst);
51889e0f4d2SKip Macy 	if (err) {
51989e0f4d2SKip Macy 		xenbus_dev_fatal(dev, err, "starting transaction");
52089e0f4d2SKip Macy 		goto destroy_ring;
52189e0f4d2SKip Macy 	}
522ff662b5cSJustin T. Gibbs 	err = xs_printf(xst, node, "tx-ring-ref","%u",
52389e0f4d2SKip Macy 			info->tx_ring_ref);
52489e0f4d2SKip Macy 	if (err) {
52589e0f4d2SKip Macy 		message = "writing tx ring-ref";
52689e0f4d2SKip Macy 		goto abort_transaction;
52789e0f4d2SKip Macy 	}
528ff662b5cSJustin T. Gibbs 	err = xs_printf(xst, node, "rx-ring-ref","%u",
52989e0f4d2SKip Macy 			info->rx_ring_ref);
53089e0f4d2SKip Macy 	if (err) {
53189e0f4d2SKip Macy 		message = "writing rx ring-ref";
53289e0f4d2SKip Macy 		goto abort_transaction;
53389e0f4d2SKip Macy 	}
534ff662b5cSJustin T. Gibbs 	err = xs_printf(xst, node,
53589e0f4d2SKip Macy 			"event-channel", "%u", irq_to_evtchn_port(info->irq));
53689e0f4d2SKip Macy 	if (err) {
53789e0f4d2SKip Macy 		message = "writing event-channel";
53889e0f4d2SKip Macy 		goto abort_transaction;
53989e0f4d2SKip Macy 	}
540ff662b5cSJustin T. Gibbs 	err = xs_printf(xst, node, "request-rx-copy", "%u",
54189e0f4d2SKip Macy 			info->copying_receiver);
54289e0f4d2SKip Macy 	if (err) {
54389e0f4d2SKip Macy 		message = "writing request-rx-copy";
54489e0f4d2SKip Macy 		goto abort_transaction;
54589e0f4d2SKip Macy 	}
546ff662b5cSJustin T. Gibbs 	err = xs_printf(xst, node, "feature-rx-notify", "%d", 1);
54789e0f4d2SKip Macy 	if (err) {
54889e0f4d2SKip Macy 		message = "writing feature-rx-notify";
54989e0f4d2SKip Macy 		goto abort_transaction;
55089e0f4d2SKip Macy 	}
551ff662b5cSJustin T. Gibbs 	err = xs_printf(xst, node, "feature-sg", "%d", 1);
55289e0f4d2SKip Macy 	if (err) {
55389e0f4d2SKip Macy 		message = "writing feature-sg";
55489e0f4d2SKip Macy 		goto abort_transaction;
55589e0f4d2SKip Macy 	}
55612678024SDoug Rabson #if __FreeBSD_version >= 700000
557ff662b5cSJustin T. Gibbs 	err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1);
55889e0f4d2SKip Macy 	if (err) {
55989e0f4d2SKip Macy 		message = "writing feature-gso-tcpv4";
56089e0f4d2SKip Macy 		goto abort_transaction;
56189e0f4d2SKip Macy 	}
56289e0f4d2SKip Macy #endif
56389e0f4d2SKip Macy 
564ff662b5cSJustin T. Gibbs 	err = xs_transaction_end(xst, 0);
56589e0f4d2SKip Macy 	if (err) {
56689e0f4d2SKip Macy 		if (err == EAGAIN)
56789e0f4d2SKip Macy 			goto again;
56889e0f4d2SKip Macy 		xenbus_dev_fatal(dev, err, "completing transaction");
56989e0f4d2SKip Macy 		goto destroy_ring;
57089e0f4d2SKip Macy 	}
57189e0f4d2SKip Macy 
57289e0f4d2SKip Macy 	return 0;
57389e0f4d2SKip Macy 
57489e0f4d2SKip Macy  abort_transaction:
575ff662b5cSJustin T. Gibbs 	xs_transaction_end(xst, 1);
57689e0f4d2SKip Macy 	xenbus_dev_fatal(dev, err, "%s", message);
57789e0f4d2SKip Macy  destroy_ring:
57889e0f4d2SKip Macy 	netif_free(info);
57989e0f4d2SKip Macy  out:
58089e0f4d2SKip Macy 	return err;
58189e0f4d2SKip Macy }
58289e0f4d2SKip Macy 
58389e0f4d2SKip Macy 
58489e0f4d2SKip Macy static int
58523dc5621SKip Macy setup_device(device_t dev, struct netfront_info *info)
58689e0f4d2SKip Macy {
58789e0f4d2SKip Macy 	netif_tx_sring_t *txs;
58889e0f4d2SKip Macy 	netif_rx_sring_t *rxs;
5893a6d1fcfSKip Macy 	int error;
59089e0f4d2SKip Macy 	struct ifnet *ifp;
59189e0f4d2SKip Macy 
59289e0f4d2SKip Macy 	ifp = info->xn_ifp;
59389e0f4d2SKip Macy 
594ff662b5cSJustin T. Gibbs 	info->tx_ring_ref = GRANT_REF_INVALID;
595ff662b5cSJustin T. Gibbs 	info->rx_ring_ref = GRANT_REF_INVALID;
59689e0f4d2SKip Macy 	info->rx.sring = NULL;
59789e0f4d2SKip Macy 	info->tx.sring = NULL;
59889e0f4d2SKip Macy 	info->irq = 0;
59989e0f4d2SKip Macy 
60089e0f4d2SKip Macy 	txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
60189e0f4d2SKip Macy 	if (!txs) {
6023a6d1fcfSKip Macy 		error = ENOMEM;
6033a6d1fcfSKip Macy 		xenbus_dev_fatal(dev, error, "allocating tx ring page");
60489e0f4d2SKip Macy 		goto fail;
60589e0f4d2SKip Macy 	}
60689e0f4d2SKip Macy 	SHARED_RING_INIT(txs);
60789e0f4d2SKip Macy 	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
6083a6d1fcfSKip Macy 	error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref);
6093a6d1fcfSKip Macy 	if (error)
61089e0f4d2SKip Macy 		goto fail;
61189e0f4d2SKip Macy 
61289e0f4d2SKip Macy 	rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
61389e0f4d2SKip Macy 	if (!rxs) {
6143a6d1fcfSKip Macy 		error = ENOMEM;
6153a6d1fcfSKip Macy 		xenbus_dev_fatal(dev, error, "allocating rx ring page");
61689e0f4d2SKip Macy 		goto fail;
61789e0f4d2SKip Macy 	}
61889e0f4d2SKip Macy 	SHARED_RING_INIT(rxs);
61989e0f4d2SKip Macy 	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
62089e0f4d2SKip Macy 
6213a6d1fcfSKip Macy 	error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref);
6223a6d1fcfSKip Macy 	if (error)
62389e0f4d2SKip Macy 		goto fail;
62489e0f4d2SKip Macy 
6253a6d1fcfSKip Macy 	error = bind_listening_port_to_irqhandler(xenbus_get_otherend_id(dev),
6263a6d1fcfSKip Macy 	    "xn", xn_intr, info, INTR_TYPE_NET | INTR_MPSAFE, &info->irq);
62789e0f4d2SKip Macy 
6283a6d1fcfSKip Macy 	if (error) {
6293a6d1fcfSKip Macy 		xenbus_dev_fatal(dev, error,
63089e0f4d2SKip Macy 				 "bind_evtchn_to_irqhandler failed");
63189e0f4d2SKip Macy 		goto fail;
63289e0f4d2SKip Macy 	}
63389e0f4d2SKip Macy 
63489e0f4d2SKip Macy 	show_device(info);
63589e0f4d2SKip Macy 
6363a6d1fcfSKip Macy 	return (0);
63789e0f4d2SKip Macy 
63889e0f4d2SKip Macy  fail:
63989e0f4d2SKip Macy 	netif_free(info);
6403a6d1fcfSKip Macy 	return (error);
64189e0f4d2SKip Macy }
64289e0f4d2SKip Macy 
643a0ae8f04SBjoern A. Zeeb #ifdef INET
64489e0f4d2SKip Macy /**
64512678024SDoug Rabson  * If this interface has an ipv4 address, send an arp for it. This
64612678024SDoug Rabson  * helps to get the network going again after migrating hosts.
64712678024SDoug Rabson  */
64812678024SDoug Rabson static void
64912678024SDoug Rabson netfront_send_fake_arp(device_t dev, struct netfront_info *info)
65012678024SDoug Rabson {
65112678024SDoug Rabson 	struct ifnet *ifp;
65212678024SDoug Rabson 	struct ifaddr *ifa;
65312678024SDoug Rabson 
65412678024SDoug Rabson 	ifp = info->xn_ifp;
65512678024SDoug Rabson 	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
65612678024SDoug Rabson 		if (ifa->ifa_addr->sa_family == AF_INET) {
65712678024SDoug Rabson 			arp_ifinit(ifp, ifa);
65812678024SDoug Rabson 		}
65912678024SDoug Rabson 	}
66012678024SDoug Rabson }
661a0ae8f04SBjoern A. Zeeb #endif
66212678024SDoug Rabson 
66312678024SDoug Rabson /**
66489e0f4d2SKip Macy  * Callback received when the backend's state changes.
66589e0f4d2SKip Macy  */
666283d6f72SJustin T. Gibbs static void
66723dc5621SKip Macy netfront_backend_changed(device_t dev, XenbusState newstate)
66889e0f4d2SKip Macy {
66923dc5621SKip Macy 	struct netfront_info *sc = device_get_softc(dev);
67089e0f4d2SKip Macy 
67123dc5621SKip Macy 	DPRINTK("newstate=%d\n", newstate);
67289e0f4d2SKip Macy 
67323dc5621SKip Macy 	switch (newstate) {
67489e0f4d2SKip Macy 	case XenbusStateInitialising:
67589e0f4d2SKip Macy 	case XenbusStateInitialised:
67689e0f4d2SKip Macy 	case XenbusStateConnected:
67789e0f4d2SKip Macy 	case XenbusStateUnknown:
67889e0f4d2SKip Macy 	case XenbusStateClosed:
679920ba15bSKip Macy 	case XenbusStateReconfigured:
680920ba15bSKip Macy 	case XenbusStateReconfiguring:
68189e0f4d2SKip Macy 		break;
68289e0f4d2SKip Macy 	case XenbusStateInitWait:
68323dc5621SKip Macy 		if (xenbus_get_state(dev) != XenbusStateInitialising)
68489e0f4d2SKip Macy 			break;
68523dc5621SKip Macy 		if (network_connect(sc) != 0)
68689e0f4d2SKip Macy 			break;
68723dc5621SKip Macy 		xenbus_set_state(dev, XenbusStateConnected);
688a0ae8f04SBjoern A. Zeeb #ifdef INET
68912678024SDoug Rabson 		netfront_send_fake_arp(dev, sc);
690a0ae8f04SBjoern A. Zeeb #endif
69123dc5621SKip Macy 		break;
69289e0f4d2SKip Macy 	case XenbusStateClosing:
69323dc5621SKip Macy 		xenbus_set_state(dev, XenbusStateClosed);
69489e0f4d2SKip Macy 		break;
69589e0f4d2SKip Macy 	}
69689e0f4d2SKip Macy }
69789e0f4d2SKip Macy 
69889e0f4d2SKip Macy static void
69989e0f4d2SKip Macy xn_free_rx_ring(struct netfront_info *sc)
70089e0f4d2SKip Macy {
70189e0f4d2SKip Macy #if 0
70289e0f4d2SKip Macy 	int i;
70389e0f4d2SKip Macy 
70489e0f4d2SKip Macy 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
705931eeffaSKenneth D. Merry 		if (sc->xn_cdata.rx_mbufs[i] != NULL) {
706931eeffaSKenneth D. Merry 			m_freem(sc->rx_mbufs[i]);
707931eeffaSKenneth D. Merry 			sc->rx_mbufs[i] = NULL;
70889e0f4d2SKip Macy 		}
70989e0f4d2SKip Macy 	}
71089e0f4d2SKip Macy 
71189e0f4d2SKip Macy 	sc->rx.rsp_cons = 0;
71289e0f4d2SKip Macy 	sc->xn_rx_if->req_prod = 0;
71389e0f4d2SKip Macy 	sc->xn_rx_if->event = sc->rx.rsp_cons ;
71489e0f4d2SKip Macy #endif
71589e0f4d2SKip Macy }
71689e0f4d2SKip Macy 
71789e0f4d2SKip Macy static void
71889e0f4d2SKip Macy xn_free_tx_ring(struct netfront_info *sc)
71989e0f4d2SKip Macy {
72089e0f4d2SKip Macy #if 0
72189e0f4d2SKip Macy 	int i;
72289e0f4d2SKip Macy 
72389e0f4d2SKip Macy 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
724931eeffaSKenneth D. Merry 		if (sc->tx_mbufs[i] != NULL) {
725931eeffaSKenneth D. Merry 			m_freem(sc->tx_mbufs[i]);
72689e0f4d2SKip Macy 			sc->xn_cdata.xn_tx_chain[i] = NULL;
72789e0f4d2SKip Macy 		}
72889e0f4d2SKip Macy 	}
72989e0f4d2SKip Macy 
73089e0f4d2SKip Macy 	return;
73189e0f4d2SKip Macy #endif
73289e0f4d2SKip Macy }
73389e0f4d2SKip Macy 
734931eeffaSKenneth D. Merry /**
735931eeffaSKenneth D. Merry  * \brief Verify that there is sufficient space in the Tx ring
736931eeffaSKenneth D. Merry  *        buffer for a maximally sized request to be enqueued.
737c099cafaSAdrian Chadd  *
738931eeffaSKenneth D. Merry  * A transmit request requires a transmit descriptor for each packet
739931eeffaSKenneth D. Merry  * fragment, plus up to 2 entries for "options" (e.g. TSO).
740c099cafaSAdrian Chadd  */
74189e0f4d2SKip Macy static inline int
742931eeffaSKenneth D. Merry xn_tx_slot_available(struct netfront_info *np)
74389e0f4d2SKip Macy {
744931eeffaSKenneth D. Merry 	return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2));
74589e0f4d2SKip Macy }
746931eeffaSKenneth D. Merry 
74789e0f4d2SKip Macy static void
74889e0f4d2SKip Macy netif_release_tx_bufs(struct netfront_info *np)
74989e0f4d2SKip Macy {
75089e0f4d2SKip Macy 	int i;
75189e0f4d2SKip Macy 
75289e0f4d2SKip Macy 	for (i = 1; i <= NET_TX_RING_SIZE; i++) {
753931eeffaSKenneth D. Merry 		struct mbuf *m;
75489e0f4d2SKip Macy 
755931eeffaSKenneth D. Merry 		m = np->tx_mbufs[i];
756931eeffaSKenneth D. Merry 
757931eeffaSKenneth D. Merry 		/*
758931eeffaSKenneth D. Merry 		 * We assume that no kernel addresses are
759931eeffaSKenneth D. Merry 		 * less than NET_TX_RING_SIZE.  Any entry
760931eeffaSKenneth D. Merry 		 * in the table that is below this number
761931eeffaSKenneth D. Merry 		 * must be an index from free-list tracking.
762931eeffaSKenneth D. Merry 		 */
763931eeffaSKenneth D. Merry 		if (((uintptr_t)m) <= NET_TX_RING_SIZE)
76489e0f4d2SKip Macy 			continue;
765*cf9c09e1SJustin T. Gibbs 		gnttab_end_foreign_access_ref(np->grant_tx_ref[i]);
76689e0f4d2SKip Macy 		gnttab_release_grant_reference(&np->gref_tx_head,
76789e0f4d2SKip Macy 		    np->grant_tx_ref[i]);
768ff662b5cSJustin T. Gibbs 		np->grant_tx_ref[i] = GRANT_REF_INVALID;
76989e0f4d2SKip Macy 		add_id_to_freelist(np->tx_mbufs, i);
770a4ec37f5SAdrian Chadd 		np->xn_cdata.xn_tx_chain_cnt--;
771a4ec37f5SAdrian Chadd 		if (np->xn_cdata.xn_tx_chain_cnt < 0) {
772a4ec37f5SAdrian Chadd 			panic("netif_release_tx_bufs: tx_chain_cnt must be >= 0");
773a4ec37f5SAdrian Chadd 		}
774*cf9c09e1SJustin T. Gibbs 		m_free(m);
77589e0f4d2SKip Macy 	}
77689e0f4d2SKip Macy }
77789e0f4d2SKip Macy 
77889e0f4d2SKip Macy static void
77989e0f4d2SKip Macy network_alloc_rx_buffers(struct netfront_info *sc)
78089e0f4d2SKip Macy {
78123dc5621SKip Macy 	int otherend_id = xenbus_get_otherend_id(sc->xbdev);
78289e0f4d2SKip Macy 	unsigned short id;
78389e0f4d2SKip Macy 	struct mbuf *m_new;
78489e0f4d2SKip Macy 	int i, batch_target, notify;
78589e0f4d2SKip Macy 	RING_IDX req_prod;
78689e0f4d2SKip Macy 	struct xen_memory_reservation reservation;
78789e0f4d2SKip Macy 	grant_ref_t ref;
78889e0f4d2SKip Macy 	int nr_flips;
78989e0f4d2SKip Macy 	netif_rx_request_t *req;
79089e0f4d2SKip Macy 	vm_offset_t vaddr;
79189e0f4d2SKip Macy 	u_long pfn;
79289e0f4d2SKip Macy 
79389e0f4d2SKip Macy 	req_prod = sc->rx.req_prod_pvt;
79489e0f4d2SKip Macy 
79589e0f4d2SKip Macy 	if (unlikely(sc->carrier == 0))
79689e0f4d2SKip Macy 		return;
79789e0f4d2SKip Macy 
79889e0f4d2SKip Macy 	/*
799931eeffaSKenneth D. Merry 	 * Allocate mbufs greedily, even though we batch updates to the
80089e0f4d2SKip Macy 	 * receive ring. This creates a less bursty demand on the memory
801931eeffaSKenneth D. Merry 	 * allocator, and so should reduce the chance of failed allocation
80289e0f4d2SKip Macy 	 * requests both for ourself and for other kernel subsystems.
803931eeffaSKenneth D. Merry 	 *
804931eeffaSKenneth D. Merry 	 * Here we attempt to maintain rx_target buffers in flight, counting
805931eeffaSKenneth D. Merry 	 * buffers that we have yet to process in the receive ring.
80689e0f4d2SKip Macy 	 */
80789e0f4d2SKip Macy 	batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons);
80889e0f4d2SKip Macy 	for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) {
80989e0f4d2SKip Macy 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
810931eeffaSKenneth D. Merry 		if (m_new == NULL) {
811931eeffaSKenneth D. Merry 			printf("%s: MGETHDR failed\n", __func__);
81289e0f4d2SKip Macy 			goto no_mbuf;
813931eeffaSKenneth D. Merry 		}
81489e0f4d2SKip Macy 
81589e0f4d2SKip Macy 		m_cljget(m_new, M_DONTWAIT, MJUMPAGESIZE);
81689e0f4d2SKip Macy 		if ((m_new->m_flags & M_EXT) == 0) {
817931eeffaSKenneth D. Merry 			printf("%s: m_cljget failed\n", __func__);
81889e0f4d2SKip Macy 			m_freem(m_new);
81989e0f4d2SKip Macy 
82089e0f4d2SKip Macy no_mbuf:
82189e0f4d2SKip Macy 			if (i != 0)
82289e0f4d2SKip Macy 				goto refill;
82389e0f4d2SKip Macy 			/*
82489e0f4d2SKip Macy 			 * XXX set timer
82589e0f4d2SKip Macy 			 */
82689e0f4d2SKip Macy 			break;
82789e0f4d2SKip Macy 		}
82889e0f4d2SKip Macy 		m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
82989e0f4d2SKip Macy 
83089e0f4d2SKip Macy 		/* queue the mbufs allocated */
83189e0f4d2SKip Macy 		mbufq_tail(&sc->xn_rx_batch, m_new);
83289e0f4d2SKip Macy 	}
83389e0f4d2SKip Macy 
834931eeffaSKenneth D. Merry 	/*
835931eeffaSKenneth D. Merry 	 * If we've allocated at least half of our target number of entries,
836931eeffaSKenneth D. Merry 	 * submit them to the backend - we have enough to make the overhead
837931eeffaSKenneth D. Merry 	 * of submission worthwhile.  Otherwise wait for more mbufs and
838931eeffaSKenneth D. Merry 	 * request entries to become available.
839931eeffaSKenneth D. Merry 	 */
84089e0f4d2SKip Macy 	if (i < (sc->rx_target/2)) {
84189e0f4d2SKip Macy 		if (req_prod >sc->rx.sring->req_prod)
84289e0f4d2SKip Macy 			goto push;
84389e0f4d2SKip Macy 		return;
84489e0f4d2SKip Macy 	}
845931eeffaSKenneth D. Merry 
846931eeffaSKenneth D. Merry 	/*
847931eeffaSKenneth D. Merry 	 * Double floating fill target if we risked having the backend
848931eeffaSKenneth D. Merry 	 * run out of empty buffers for receive traffic.  We define "running
849931eeffaSKenneth D. Merry 	 * low" as having less than a fourth of our target buffers free
850931eeffaSKenneth D. Merry 	 * at the time we refilled the queue.
851931eeffaSKenneth D. Merry 	 */
852931eeffaSKenneth D. Merry 	if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) {
853931eeffaSKenneth D. Merry 		sc->rx_target *= 2;
854931eeffaSKenneth D. Merry 		if (sc->rx_target > sc->rx_max_target)
85589e0f4d2SKip Macy 			sc->rx_target = sc->rx_max_target;
856931eeffaSKenneth D. Merry 	}
85789e0f4d2SKip Macy 
85889e0f4d2SKip Macy refill:
85989e0f4d2SKip Macy 	for (nr_flips = i = 0; ; i++) {
86089e0f4d2SKip Macy 		if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
86189e0f4d2SKip Macy 			break;
86289e0f4d2SKip Macy 
86389e0f4d2SKip Macy 		m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)(
86489e0f4d2SKip Macy 				vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT);
86589e0f4d2SKip Macy 
86689e0f4d2SKip Macy 		id = xennet_rxidx(req_prod + i);
86789e0f4d2SKip Macy 
868931eeffaSKenneth D. Merry 		KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain"));
869931eeffaSKenneth D. Merry 		sc->rx_mbufs[id] = m_new;
87089e0f4d2SKip Macy 
87189e0f4d2SKip Macy 		ref = gnttab_claim_grant_reference(&sc->gref_rx_head);
872ff662b5cSJustin T. Gibbs 		KASSERT(ref != GNTTAB_LIST_END,
873ff662b5cSJustin T. Gibbs 			("reserved grant references exhuasted"));
87489e0f4d2SKip Macy 		sc->grant_rx_ref[id] = ref;
87589e0f4d2SKip Macy 
87689e0f4d2SKip Macy 		vaddr = mtod(m_new, vm_offset_t);
87789e0f4d2SKip Macy 		pfn = vtophys(vaddr) >> PAGE_SHIFT;
87889e0f4d2SKip Macy 		req = RING_GET_REQUEST(&sc->rx, req_prod + i);
87989e0f4d2SKip Macy 
88089e0f4d2SKip Macy 		if (sc->copying_receiver == 0) {
88189e0f4d2SKip Macy 			gnttab_grant_foreign_transfer_ref(ref,
88223dc5621SKip Macy 			    otherend_id, pfn);
88389e0f4d2SKip Macy 			sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn);
88489e0f4d2SKip Macy 			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
88589e0f4d2SKip Macy 				/* Remove this page before passing
88689e0f4d2SKip Macy 				 * back to Xen.
88789e0f4d2SKip Macy 				 */
88889e0f4d2SKip Macy 				set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
88989e0f4d2SKip Macy 				MULTI_update_va_mapping(&sc->rx_mcl[i],
89089e0f4d2SKip Macy 				    vaddr, 0, 0);
89189e0f4d2SKip Macy 			}
89289e0f4d2SKip Macy 			nr_flips++;
89389e0f4d2SKip Macy 		} else {
89489e0f4d2SKip Macy 			gnttab_grant_foreign_access_ref(ref,
89523dc5621SKip Macy 			    otherend_id,
89689e0f4d2SKip Macy 			    PFNTOMFN(pfn), 0);
89789e0f4d2SKip Macy 		}
89889e0f4d2SKip Macy 		req->id = id;
89989e0f4d2SKip Macy 		req->gref = ref;
90089e0f4d2SKip Macy 
90189e0f4d2SKip Macy 		sc->rx_pfn_array[i] =
90289e0f4d2SKip Macy 		    vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
90389e0f4d2SKip Macy 	}
90489e0f4d2SKip Macy 
90589e0f4d2SKip Macy 	KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
90689e0f4d2SKip Macy 	KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed"));
90789e0f4d2SKip Macy 	/*
90889e0f4d2SKip Macy 	 * We may have allocated buffers which have entries outstanding
90989e0f4d2SKip Macy 	 * in the page * update queue -- make sure we flush those first!
91089e0f4d2SKip Macy 	 */
91189e0f4d2SKip Macy 	PT_UPDATES_FLUSH();
91289e0f4d2SKip Macy 	if (nr_flips != 0) {
91389e0f4d2SKip Macy #ifdef notyet
91489e0f4d2SKip Macy 		/* Tell the ballon driver what is going on. */
91589e0f4d2SKip Macy 		balloon_update_driver_allowance(i);
91689e0f4d2SKip Macy #endif
917920ba15bSKip Macy 		set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array);
91889e0f4d2SKip Macy 		reservation.nr_extents   = i;
91989e0f4d2SKip Macy 		reservation.extent_order = 0;
92089e0f4d2SKip Macy 		reservation.address_bits = 0;
92189e0f4d2SKip Macy 		reservation.domid        = DOMID_SELF;
92289e0f4d2SKip Macy 
92389e0f4d2SKip Macy 		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
92489e0f4d2SKip Macy 
92589e0f4d2SKip Macy 			/* After all PTEs have been zapped, flush the TLB. */
92689e0f4d2SKip Macy 			sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
92789e0f4d2SKip Macy 			    UVMF_TLB_FLUSH|UVMF_ALL;
92889e0f4d2SKip Macy 
92989e0f4d2SKip Macy 			/* Give away a batch of pages. */
93089e0f4d2SKip Macy 			sc->rx_mcl[i].op = __HYPERVISOR_memory_op;
93189e0f4d2SKip Macy 			sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
93289e0f4d2SKip Macy 			sc->rx_mcl[i].args[1] =  (u_long)&reservation;
93389e0f4d2SKip Macy 			/* Zap PTEs and give away pages in one big multicall. */
93489e0f4d2SKip Macy 			(void)HYPERVISOR_multicall(sc->rx_mcl, i+1);
93589e0f4d2SKip Macy 
93689e0f4d2SKip Macy 			/* Check return status of HYPERVISOR_dom_mem_op(). */
93789e0f4d2SKip Macy 			if (unlikely(sc->rx_mcl[i].result != i))
93889e0f4d2SKip Macy 				panic("Unable to reduce memory reservation\n");
93989e0f4d2SKip Macy 			} else {
94089e0f4d2SKip Macy 				if (HYPERVISOR_memory_op(
94189e0f4d2SKip Macy 				    XENMEM_decrease_reservation, &reservation)
94289e0f4d2SKip Macy 				    != i)
94389e0f4d2SKip Macy 					panic("Unable to reduce memory "
94489e0f4d2SKip Macy 					    "reservation\n");
94589e0f4d2SKip Macy 		}
94689e0f4d2SKip Macy 	} else {
94789e0f4d2SKip Macy 		wmb();
94889e0f4d2SKip Macy 	}
94989e0f4d2SKip Macy 
95089e0f4d2SKip Macy 	/* Above is a suitable barrier to ensure backend will see requests. */
95189e0f4d2SKip Macy 	sc->rx.req_prod_pvt = req_prod + i;
95289e0f4d2SKip Macy push:
95389e0f4d2SKip Macy 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify);
95489e0f4d2SKip Macy 	if (notify)
95589e0f4d2SKip Macy 		notify_remote_via_irq(sc->irq);
95689e0f4d2SKip Macy }
95789e0f4d2SKip Macy 
95889e0f4d2SKip Macy static void
95989e0f4d2SKip Macy xn_rxeof(struct netfront_info *np)
96089e0f4d2SKip Macy {
96189e0f4d2SKip Macy 	struct ifnet *ifp;
96212678024SDoug Rabson #if __FreeBSD_version >= 700000
96312678024SDoug Rabson 	struct lro_ctrl *lro = &np->xn_lro;
96412678024SDoug Rabson 	struct lro_entry *queued;
96512678024SDoug Rabson #endif
96689e0f4d2SKip Macy 	struct netfront_rx_info rinfo;
96789e0f4d2SKip Macy 	struct netif_rx_response *rx = &rinfo.rx;
96889e0f4d2SKip Macy 	struct netif_extra_info *extras = rinfo.extras;
96989e0f4d2SKip Macy 	RING_IDX i, rp;
97089e0f4d2SKip Macy 	multicall_entry_t *mcl;
97189e0f4d2SKip Macy 	struct mbuf *m;
97283b92f6eSKip Macy 	struct mbuf_head rxq, errq;
97349906218SDoug Rabson 	int err, pages_flipped = 0, work_to_do;
97489e0f4d2SKip Macy 
97549906218SDoug Rabson 	do {
97689e0f4d2SKip Macy 		XN_RX_LOCK_ASSERT(np);
97789e0f4d2SKip Macy 		if (!netfront_carrier_ok(np))
97889e0f4d2SKip Macy 			return;
97989e0f4d2SKip Macy 
98089e0f4d2SKip Macy 		mbufq_init(&errq);
98189e0f4d2SKip Macy 		mbufq_init(&rxq);
98289e0f4d2SKip Macy 
98389e0f4d2SKip Macy 		ifp = np->xn_ifp;
98489e0f4d2SKip Macy 
98589e0f4d2SKip Macy 		rp = np->rx.sring->rsp_prod;
98689e0f4d2SKip Macy 		rmb();	/* Ensure we see queued responses up to 'rp'. */
98789e0f4d2SKip Macy 
98889e0f4d2SKip Macy 		i = np->rx.rsp_cons;
98989e0f4d2SKip Macy 		while ((i != rp)) {
99089e0f4d2SKip Macy 			memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
99189e0f4d2SKip Macy 			memset(extras, 0, sizeof(rinfo.extras));
99289e0f4d2SKip Macy 
99383b92f6eSKip Macy 			m = NULL;
994931eeffaSKenneth D. Merry 			err = xennet_get_responses(np, &rinfo, rp, &i, &m,
99589e0f4d2SKip Macy 			    &pages_flipped);
99689e0f4d2SKip Macy 
99789e0f4d2SKip Macy 			if (unlikely(err)) {
99883b92f6eSKip Macy 				if (m)
99989e0f4d2SKip Macy 					mbufq_tail(&errq, m);
100089e0f4d2SKip Macy 				np->stats.rx_errors++;
100189e0f4d2SKip Macy 				continue;
100289e0f4d2SKip Macy 			}
100389e0f4d2SKip Macy 
100489e0f4d2SKip Macy 			m->m_pkthdr.rcvif = ifp;
100589e0f4d2SKip Macy 			if ( rx->flags & NETRXF_data_validated ) {
100689e0f4d2SKip Macy 				/* Tell the stack the checksums are okay */
100789e0f4d2SKip Macy 				/*
100889e0f4d2SKip Macy 				 * XXX this isn't necessarily the case - need to add
100989e0f4d2SKip Macy 				 * check
101089e0f4d2SKip Macy 				 */
101189e0f4d2SKip Macy 
101289e0f4d2SKip Macy 				m->m_pkthdr.csum_flags |=
101389e0f4d2SKip Macy 					(CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID
101489e0f4d2SKip Macy 					    | CSUM_PSEUDO_HDR);
101589e0f4d2SKip Macy 				m->m_pkthdr.csum_data = 0xffff;
101689e0f4d2SKip Macy 			}
101789e0f4d2SKip Macy 
101889e0f4d2SKip Macy 			np->stats.rx_packets++;
101983b92f6eSKip Macy 			np->stats.rx_bytes += m->m_pkthdr.len;
102089e0f4d2SKip Macy 
102189e0f4d2SKip Macy 			mbufq_tail(&rxq, m);
1022931eeffaSKenneth D. Merry 			np->rx.rsp_cons = i;
102389e0f4d2SKip Macy 		}
102489e0f4d2SKip Macy 
102589e0f4d2SKip Macy 		if (pages_flipped) {
102689e0f4d2SKip Macy 			/* Some pages are no longer absent... */
102789e0f4d2SKip Macy #ifdef notyet
102889e0f4d2SKip Macy 			balloon_update_driver_allowance(-pages_flipped);
102989e0f4d2SKip Macy #endif
103089e0f4d2SKip Macy 			/* Do all the remapping work, and M->P updates, in one big
103189e0f4d2SKip Macy 			 * hypercall.
103289e0f4d2SKip Macy 			 */
103389e0f4d2SKip Macy 			if (!!xen_feature(XENFEAT_auto_translated_physmap)) {
103489e0f4d2SKip Macy 				mcl = np->rx_mcl + pages_flipped;
103589e0f4d2SKip Macy 				mcl->op = __HYPERVISOR_mmu_update;
103689e0f4d2SKip Macy 				mcl->args[0] = (u_long)np->rx_mmu;
103789e0f4d2SKip Macy 				mcl->args[1] = pages_flipped;
103889e0f4d2SKip Macy 				mcl->args[2] = 0;
103989e0f4d2SKip Macy 				mcl->args[3] = DOMID_SELF;
104089e0f4d2SKip Macy 				(void)HYPERVISOR_multicall(np->rx_mcl,
104189e0f4d2SKip Macy 				    pages_flipped + 1);
104289e0f4d2SKip Macy 			}
104389e0f4d2SKip Macy 		}
104489e0f4d2SKip Macy 
104589e0f4d2SKip Macy 		while ((m = mbufq_dequeue(&errq)))
104689e0f4d2SKip Macy 			m_freem(m);
104789e0f4d2SKip Macy 
104889e0f4d2SKip Macy 		/*
104989e0f4d2SKip Macy 		 * Process all the mbufs after the remapping is complete.
105089e0f4d2SKip Macy 		 * Break the mbuf chain first though.
105189e0f4d2SKip Macy 		 */
105289e0f4d2SKip Macy 		while ((m = mbufq_dequeue(&rxq)) != NULL) {
105389e0f4d2SKip Macy 			ifp->if_ipackets++;
105489e0f4d2SKip Macy 
105589e0f4d2SKip Macy 			/*
105689e0f4d2SKip Macy 			 * Do we really need to drop the rx lock?
105789e0f4d2SKip Macy 			 */
105889e0f4d2SKip Macy 			XN_RX_UNLOCK(np);
105912678024SDoug Rabson #if __FreeBSD_version >= 700000
106012678024SDoug Rabson 			/* Use LRO if possible */
106112678024SDoug Rabson 			if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
106212678024SDoug Rabson 			    lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
106312678024SDoug Rabson 				/*
106412678024SDoug Rabson 				 * If LRO fails, pass up to the stack
106512678024SDoug Rabson 				 * directly.
106612678024SDoug Rabson 				 */
106789e0f4d2SKip Macy 				(*ifp->if_input)(ifp, m);
106812678024SDoug Rabson 			}
106912678024SDoug Rabson #else
107012678024SDoug Rabson 			(*ifp->if_input)(ifp, m);
107112678024SDoug Rabson #endif
107289e0f4d2SKip Macy 			XN_RX_LOCK(np);
107389e0f4d2SKip Macy 		}
107489e0f4d2SKip Macy 
107589e0f4d2SKip Macy 		np->rx.rsp_cons = i;
107689e0f4d2SKip Macy 
107712678024SDoug Rabson #if __FreeBSD_version >= 700000
107812678024SDoug Rabson 		/*
107912678024SDoug Rabson 		 * Flush any outstanding LRO work
108012678024SDoug Rabson 		 */
108112678024SDoug Rabson 		while (!SLIST_EMPTY(&lro->lro_active)) {
108212678024SDoug Rabson 			queued = SLIST_FIRST(&lro->lro_active);
108312678024SDoug Rabson 			SLIST_REMOVE_HEAD(&lro->lro_active, next);
108412678024SDoug Rabson 			tcp_lro_flush(lro, queued);
108512678024SDoug Rabson 		}
108612678024SDoug Rabson #endif
108712678024SDoug Rabson 
108889e0f4d2SKip Macy #if 0
108989e0f4d2SKip Macy 		/* If we get a callback with very few responses, reduce fill target. */
109089e0f4d2SKip Macy 		/* NB. Note exponential increase, linear decrease. */
109189e0f4d2SKip Macy 		if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
109289e0f4d2SKip Macy 			((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
109389e0f4d2SKip Macy 			np->rx_target = np->rx_min_target;
109489e0f4d2SKip Macy #endif
109589e0f4d2SKip Macy 
109689e0f4d2SKip Macy 		network_alloc_rx_buffers(np);
109789e0f4d2SKip Macy 
109849906218SDoug Rabson 		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do);
109949906218SDoug Rabson 	} while (work_to_do);
110089e0f4d2SKip Macy }
110189e0f4d2SKip Macy 
110289e0f4d2SKip Macy static void
110389e0f4d2SKip Macy xn_txeof(struct netfront_info *np)
110489e0f4d2SKip Macy {
110589e0f4d2SKip Macy 	RING_IDX i, prod;
110689e0f4d2SKip Macy 	unsigned short id;
110789e0f4d2SKip Macy 	struct ifnet *ifp;
110812678024SDoug Rabson 	netif_tx_response_t *txr;
110989e0f4d2SKip Macy 	struct mbuf *m;
111089e0f4d2SKip Macy 
111189e0f4d2SKip Macy 	XN_TX_LOCK_ASSERT(np);
111289e0f4d2SKip Macy 
111389e0f4d2SKip Macy 	if (!netfront_carrier_ok(np))
111489e0f4d2SKip Macy 		return;
111589e0f4d2SKip Macy 
111689e0f4d2SKip Macy 	ifp = np->xn_ifp;
111789e0f4d2SKip Macy 
111889e0f4d2SKip Macy 	do {
111989e0f4d2SKip Macy 		prod = np->tx.sring->rsp_prod;
112089e0f4d2SKip Macy 		rmb(); /* Ensure we see responses up to 'rp'. */
112189e0f4d2SKip Macy 
112289e0f4d2SKip Macy 		for (i = np->tx.rsp_cons; i != prod; i++) {
112312678024SDoug Rabson 			txr = RING_GET_RESPONSE(&np->tx, i);
112412678024SDoug Rabson 			if (txr->status == NETIF_RSP_NULL)
112512678024SDoug Rabson 				continue;
112612678024SDoug Rabson 
1127931eeffaSKenneth D. Merry 			if (txr->status != NETIF_RSP_OKAY) {
1128931eeffaSKenneth D. Merry 				printf("%s: WARNING: response is %d!\n",
1129931eeffaSKenneth D. Merry 				       __func__, txr->status);
1130931eeffaSKenneth D. Merry 			}
113112678024SDoug Rabson 			id = txr->id;
1132931eeffaSKenneth D. Merry 			m = np->tx_mbufs[id];
11332d8fae98SAdrian Chadd 			KASSERT(m != NULL, ("mbuf not found in xn_tx_chain"));
1134931eeffaSKenneth D. Merry 			KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
1135931eeffaSKenneth D. Merry 				("mbuf already on the free list, but we're "
1136931eeffaSKenneth D. Merry 				"trying to free it again!"));
11372d8fae98SAdrian Chadd 			M_ASSERTVALID(m);
113889e0f4d2SKip Macy 
113912678024SDoug Rabson 			/*
114012678024SDoug Rabson 			 * Increment packet count if this is the last
114112678024SDoug Rabson 			 * mbuf of the chain.
114212678024SDoug Rabson 			 */
114312678024SDoug Rabson 			if (!m->m_next)
114489e0f4d2SKip Macy 				ifp->if_opackets++;
114589e0f4d2SKip Macy 			if (unlikely(gnttab_query_foreign_access(
114689e0f4d2SKip Macy 			    np->grant_tx_ref[id]) != 0)) {
1147931eeffaSKenneth D. Merry 				panic("grant id %u still in use by the backend",
1148931eeffaSKenneth D. Merry 				      id);
114989e0f4d2SKip Macy 			}
115089e0f4d2SKip Macy 			gnttab_end_foreign_access_ref(
1151920ba15bSKip Macy 				np->grant_tx_ref[id]);
115289e0f4d2SKip Macy 			gnttab_release_grant_reference(
115389e0f4d2SKip Macy 				&np->gref_tx_head, np->grant_tx_ref[id]);
1154ff662b5cSJustin T. Gibbs 			np->grant_tx_ref[id] = GRANT_REF_INVALID;
115589e0f4d2SKip Macy 
1156931eeffaSKenneth D. Merry 			np->tx_mbufs[id] = NULL;
1157931eeffaSKenneth D. Merry 			add_id_to_freelist(np->tx_mbufs, id);
1158a4ec37f5SAdrian Chadd 			np->xn_cdata.xn_tx_chain_cnt--;
115912678024SDoug Rabson 			m_free(m);
1160d76e4550SAdrian Chadd 			/* Only mark the queue active if we've freed up at least one slot to try */
1161d76e4550SAdrian Chadd 			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
116289e0f4d2SKip Macy 		}
116389e0f4d2SKip Macy 		np->tx.rsp_cons = prod;
116489e0f4d2SKip Macy 
116589e0f4d2SKip Macy 		/*
116689e0f4d2SKip Macy 		 * Set a new event, then check for race with update of
116789e0f4d2SKip Macy 		 * tx_cons. Note that it is essential to schedule a
116889e0f4d2SKip Macy 		 * callback, no matter how few buffers are pending. Even if
116989e0f4d2SKip Macy 		 * there is space in the transmit ring, higher layers may
117089e0f4d2SKip Macy 		 * be blocked because too much data is outstanding: in such
117189e0f4d2SKip Macy 		 * cases notification from Xen is likely to be the only kick
117289e0f4d2SKip Macy 		 * that we'll get.
117389e0f4d2SKip Macy 		 */
117489e0f4d2SKip Macy 		np->tx.sring->rsp_event =
117589e0f4d2SKip Macy 		    prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
117689e0f4d2SKip Macy 
117789e0f4d2SKip Macy 		mb();
117889e0f4d2SKip Macy 	} while (prod != np->tx.sring->rsp_prod);
117989e0f4d2SKip Macy 
118089e0f4d2SKip Macy 	if (np->tx_full &&
118189e0f4d2SKip Macy 	    ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
118289e0f4d2SKip Macy 		np->tx_full = 0;
118389e0f4d2SKip Macy #if 0
118489e0f4d2SKip Macy 		if (np->user_state == UST_OPEN)
118589e0f4d2SKip Macy 			netif_wake_queue(dev);
118689e0f4d2SKip Macy #endif
118789e0f4d2SKip Macy 	}
118889e0f4d2SKip Macy 
118989e0f4d2SKip Macy }
119089e0f4d2SKip Macy 
119189e0f4d2SKip Macy static void
119289e0f4d2SKip Macy xn_intr(void *xsc)
119389e0f4d2SKip Macy {
119489e0f4d2SKip Macy 	struct netfront_info *np = xsc;
119589e0f4d2SKip Macy 	struct ifnet *ifp = np->xn_ifp;
119689e0f4d2SKip Macy 
119789e0f4d2SKip Macy #if 0
119889e0f4d2SKip Macy 	if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod &&
119989e0f4d2SKip Macy 	    likely(netfront_carrier_ok(np)) &&
120089e0f4d2SKip Macy 	    ifp->if_drv_flags & IFF_DRV_RUNNING))
120189e0f4d2SKip Macy 		return;
120289e0f4d2SKip Macy #endif
1203931eeffaSKenneth D. Merry 	if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) {
120489e0f4d2SKip Macy 		XN_TX_LOCK(np);
120589e0f4d2SKip Macy 		xn_txeof(np);
120689e0f4d2SKip Macy 		XN_TX_UNLOCK(np);
120789e0f4d2SKip Macy 	}
120889e0f4d2SKip Macy 
120989e0f4d2SKip Macy 	XN_RX_LOCK(np);
121089e0f4d2SKip Macy 	xn_rxeof(np);
121189e0f4d2SKip Macy 	XN_RX_UNLOCK(np);
121289e0f4d2SKip Macy 
121389e0f4d2SKip Macy 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
121489e0f4d2SKip Macy 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
121589e0f4d2SKip Macy 		xn_start(ifp);
121689e0f4d2SKip Macy }
121789e0f4d2SKip Macy 
121889e0f4d2SKip Macy 
121989e0f4d2SKip Macy static void
122089e0f4d2SKip Macy xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m,
122189e0f4d2SKip Macy 	grant_ref_t ref)
122289e0f4d2SKip Macy {
122389e0f4d2SKip Macy 	int new = xennet_rxidx(np->rx.req_prod_pvt);
122489e0f4d2SKip Macy 
122589e0f4d2SKip Macy 	KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL"));
122689e0f4d2SKip Macy 	np->rx_mbufs[new] = m;
122789e0f4d2SKip Macy 	np->grant_rx_ref[new] = ref;
122889e0f4d2SKip Macy 	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
122989e0f4d2SKip Macy 	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
123089e0f4d2SKip Macy 	np->rx.req_prod_pvt++;
123189e0f4d2SKip Macy }
123289e0f4d2SKip Macy 
123389e0f4d2SKip Macy static int
123489e0f4d2SKip Macy xennet_get_extras(struct netfront_info *np,
1235931eeffaSKenneth D. Merry     struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons)
123689e0f4d2SKip Macy {
123789e0f4d2SKip Macy 	struct netif_extra_info *extra;
123889e0f4d2SKip Macy 
123989e0f4d2SKip Macy 	int err = 0;
124089e0f4d2SKip Macy 
124189e0f4d2SKip Macy 	do {
124289e0f4d2SKip Macy 		struct mbuf *m;
124389e0f4d2SKip Macy 		grant_ref_t ref;
124489e0f4d2SKip Macy 
1245931eeffaSKenneth D. Merry 		if (unlikely(*cons + 1 == rp)) {
124689e0f4d2SKip Macy #if 0
124789e0f4d2SKip Macy 			if (net_ratelimit())
124889e0f4d2SKip Macy 				WPRINTK("Missing extra info\n");
124989e0f4d2SKip Macy #endif
1250931eeffaSKenneth D. Merry 			err = EINVAL;
125189e0f4d2SKip Macy 			break;
125289e0f4d2SKip Macy 		}
125389e0f4d2SKip Macy 
125489e0f4d2SKip Macy 		extra = (struct netif_extra_info *)
1255931eeffaSKenneth D. Merry 		RING_GET_RESPONSE(&np->rx, ++(*cons));
125689e0f4d2SKip Macy 
125789e0f4d2SKip Macy 		if (unlikely(!extra->type ||
125889e0f4d2SKip Macy 			extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
125989e0f4d2SKip Macy #if 0
126089e0f4d2SKip Macy 			if (net_ratelimit())
126189e0f4d2SKip Macy 				WPRINTK("Invalid extra type: %d\n",
126289e0f4d2SKip Macy 					extra->type);
126389e0f4d2SKip Macy #endif
1264931eeffaSKenneth D. Merry 			err = EINVAL;
126589e0f4d2SKip Macy 		} else {
126689e0f4d2SKip Macy 			memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
126789e0f4d2SKip Macy 		}
126889e0f4d2SKip Macy 
1269931eeffaSKenneth D. Merry 		m = xennet_get_rx_mbuf(np, *cons);
1270931eeffaSKenneth D. Merry 		ref = xennet_get_rx_ref(np, *cons);
127189e0f4d2SKip Macy 		xennet_move_rx_slot(np, m, ref);
127289e0f4d2SKip Macy 	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
127389e0f4d2SKip Macy 
127489e0f4d2SKip Macy 	return err;
127589e0f4d2SKip Macy }
127689e0f4d2SKip Macy 
127789e0f4d2SKip Macy static int
127889e0f4d2SKip Macy xennet_get_responses(struct netfront_info *np,
1279931eeffaSKenneth D. Merry 	struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
128083b92f6eSKip Macy 	struct mbuf  **list,
128189e0f4d2SKip Macy 	int *pages_flipped_p)
128289e0f4d2SKip Macy {
128389e0f4d2SKip Macy 	int pages_flipped = *pages_flipped_p;
128489e0f4d2SKip Macy 	struct mmu_update *mmu;
128589e0f4d2SKip Macy 	struct multicall_entry *mcl;
128689e0f4d2SKip Macy 	struct netif_rx_response *rx = &rinfo->rx;
128789e0f4d2SKip Macy 	struct netif_extra_info *extras = rinfo->extras;
128883b92f6eSKip Macy 	struct mbuf *m, *m0, *m_prev;
1289931eeffaSKenneth D. Merry 	grant_ref_t ref = xennet_get_rx_ref(np, *cons);
1290931eeffaSKenneth D. Merry 	RING_IDX ref_cons = *cons;
129189e0f4d2SKip Macy 	int frags = 1;
129289e0f4d2SKip Macy 	int err = 0;
129389e0f4d2SKip Macy 	u_long ret;
129489e0f4d2SKip Macy 
1295931eeffaSKenneth D. Merry 	m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons);
129683b92f6eSKip Macy 
129783b92f6eSKip Macy 
129889e0f4d2SKip Macy 	if (rx->flags & NETRXF_extra_info) {
1299931eeffaSKenneth D. Merry 		err = xennet_get_extras(np, extras, rp, cons);
130089e0f4d2SKip Macy 	}
130189e0f4d2SKip Macy 
130283b92f6eSKip Macy 
130383b92f6eSKip Macy 	if (m0 != NULL) {
130483b92f6eSKip Macy 		m0->m_pkthdr.len = 0;
130583b92f6eSKip Macy 		m0->m_next = NULL;
130683b92f6eSKip Macy 	}
130783b92f6eSKip Macy 
130889e0f4d2SKip Macy 	for (;;) {
130989e0f4d2SKip Macy 		u_long mfn;
131089e0f4d2SKip Macy 
131183b92f6eSKip Macy #if 0
1312227ca257SKip Macy 		DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
131383b92f6eSKip Macy 			rx->status, rx->offset, frags);
131483b92f6eSKip Macy #endif
131589e0f4d2SKip Macy 		if (unlikely(rx->status < 0 ||
131689e0f4d2SKip Macy 			rx->offset + rx->status > PAGE_SIZE)) {
1317931eeffaSKenneth D. Merry 
131889e0f4d2SKip Macy #if 0
131989e0f4d2SKip Macy 			if (net_ratelimit())
132089e0f4d2SKip Macy 				WPRINTK("rx->offset: %x, size: %u\n",
132189e0f4d2SKip Macy 					rx->offset, rx->status);
132289e0f4d2SKip Macy #endif
132389e0f4d2SKip Macy 			xennet_move_rx_slot(np, m, ref);
1324931eeffaSKenneth D. Merry 			if (m0 == m)
1325931eeffaSKenneth D. Merry 				m0 = NULL;
1326931eeffaSKenneth D. Merry 			m = NULL;
1327931eeffaSKenneth D. Merry 			err = EINVAL;
1328931eeffaSKenneth D. Merry 			goto next_skip_queue;
132989e0f4d2SKip Macy 		}
133089e0f4d2SKip Macy 
133189e0f4d2SKip Macy 		/*
133289e0f4d2SKip Macy 		 * This definitely indicates a bug, either in this driver or in
133389e0f4d2SKip Macy 		 * the backend driver. In future this should flag the bad
133489e0f4d2SKip Macy 		 * situation to the system controller to reboot the backed.
133589e0f4d2SKip Macy 		 */
1336ff662b5cSJustin T. Gibbs 		if (ref == GRANT_REF_INVALID) {
1337931eeffaSKenneth D. Merry 
133889e0f4d2SKip Macy #if 0
133989e0f4d2SKip Macy 			if (net_ratelimit())
134089e0f4d2SKip Macy 				WPRINTK("Bad rx response id %d.\n", rx->id);
134189e0f4d2SKip Macy #endif
1342ff662b5cSJustin T. Gibbs 			printf("%s: Bad rx response id %d.\n", __func__,rx->id);
1343931eeffaSKenneth D. Merry 			err = EINVAL;
134489e0f4d2SKip Macy 			goto next;
134589e0f4d2SKip Macy 		}
134689e0f4d2SKip Macy 
134789e0f4d2SKip Macy 		if (!np->copying_receiver) {
134889e0f4d2SKip Macy 			/* Memory pressure, insufficient buffer
134989e0f4d2SKip Macy 			 * headroom, ...
135089e0f4d2SKip Macy 			 */
135189e0f4d2SKip Macy 			if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
1352931eeffaSKenneth D. Merry 				WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
135389e0f4d2SKip Macy 					rx->id, rx->status);
135489e0f4d2SKip Macy 				xennet_move_rx_slot(np, m, ref);
1355931eeffaSKenneth D. Merry 				err = ENOMEM;
135689e0f4d2SKip Macy 				goto next;
135789e0f4d2SKip Macy 			}
135889e0f4d2SKip Macy 
135989e0f4d2SKip Macy 			if (!xen_feature( XENFEAT_auto_translated_physmap)) {
136089e0f4d2SKip Macy 				/* Remap the page. */
136189e0f4d2SKip Macy 				void *vaddr = mtod(m, void *);
136289e0f4d2SKip Macy 				uint32_t pfn;
136389e0f4d2SKip Macy 
136489e0f4d2SKip Macy 				mcl = np->rx_mcl + pages_flipped;
136589e0f4d2SKip Macy 				mmu = np->rx_mmu + pages_flipped;
136689e0f4d2SKip Macy 
136789e0f4d2SKip Macy 				MULTI_update_va_mapping(mcl, (u_long)vaddr,
13686ae0e31bSKip Macy 				    (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW |
136989e0f4d2SKip Macy 				    PG_V | PG_M | PG_A, 0);
13703a6d1fcfSKip Macy 				pfn = (uintptr_t)m->m_ext.ext_arg1;
137189e0f4d2SKip Macy 				mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) |
137289e0f4d2SKip Macy 				    MMU_MACHPHYS_UPDATE;
137389e0f4d2SKip Macy 				mmu->val = pfn;
137489e0f4d2SKip Macy 
137589e0f4d2SKip Macy 				set_phys_to_machine(pfn, mfn);
137689e0f4d2SKip Macy 			}
137789e0f4d2SKip Macy 			pages_flipped++;
137889e0f4d2SKip Macy 		} else {
1379920ba15bSKip Macy 			ret = gnttab_end_foreign_access_ref(ref);
138089e0f4d2SKip Macy 			KASSERT(ret, ("ret != 0"));
138189e0f4d2SKip Macy 		}
138289e0f4d2SKip Macy 
138389e0f4d2SKip Macy 		gnttab_release_grant_reference(&np->gref_rx_head, ref);
138489e0f4d2SKip Macy 
138589e0f4d2SKip Macy next:
13863a539122SAdrian Chadd 		if (m == NULL)
13873a539122SAdrian Chadd 			break;
13883a539122SAdrian Chadd 
138983b92f6eSKip Macy 		m->m_len = rx->status;
139083b92f6eSKip Macy 		m->m_data += rx->offset;
139183b92f6eSKip Macy 		m0->m_pkthdr.len += rx->status;
139283b92f6eSKip Macy 
1393931eeffaSKenneth D. Merry next_skip_queue:
139489e0f4d2SKip Macy 		if (!(rx->flags & NETRXF_more_data))
139589e0f4d2SKip Macy 			break;
139689e0f4d2SKip Macy 
1397931eeffaSKenneth D. Merry 		if (*cons + frags == rp) {
139889e0f4d2SKip Macy 			if (net_ratelimit())
139989e0f4d2SKip Macy 				WPRINTK("Need more frags\n");
1400931eeffaSKenneth D. Merry 			err = ENOENT;
1401931eeffaSKenneth D. Merry 			printf("%s: cons %u frags %u rp %u, not enough frags\n",
1402931eeffaSKenneth D. Merry 			       __func__, *cons, frags, rp);
140389e0f4d2SKip Macy 			break;
140489e0f4d2SKip Macy 		}
1405931eeffaSKenneth D. Merry 		/*
1406931eeffaSKenneth D. Merry 		 * Note that m can be NULL, if rx->status < 0 or if
1407931eeffaSKenneth D. Merry 		 * rx->offset + rx->status > PAGE_SIZE above.
1408931eeffaSKenneth D. Merry 		 */
140983b92f6eSKip Macy 		m_prev = m;
141089e0f4d2SKip Macy 
1411931eeffaSKenneth D. Merry 		rx = RING_GET_RESPONSE(&np->rx, *cons + frags);
1412931eeffaSKenneth D. Merry 		m = xennet_get_rx_mbuf(np, *cons + frags);
141383b92f6eSKip Macy 
1414931eeffaSKenneth D. Merry 		/*
1415931eeffaSKenneth D. Merry 		 * m_prev == NULL can happen if rx->status < 0 or if
1416931eeffaSKenneth D. Merry 		 * rx->offset + * rx->status > PAGE_SIZE above.
1417931eeffaSKenneth D. Merry 		 */
1418931eeffaSKenneth D. Merry 		if (m_prev != NULL)
141983b92f6eSKip Macy 			m_prev->m_next = m;
1420931eeffaSKenneth D. Merry 
1421931eeffaSKenneth D. Merry 		/*
1422931eeffaSKenneth D. Merry 		 * m0 can be NULL if rx->status < 0 or if * rx->offset +
1423931eeffaSKenneth D. Merry 		 * rx->status > PAGE_SIZE above.
1424931eeffaSKenneth D. Merry 		 */
1425931eeffaSKenneth D. Merry 		if (m0 == NULL)
1426931eeffaSKenneth D. Merry 			m0 = m;
142783b92f6eSKip Macy 		m->m_next = NULL;
1428931eeffaSKenneth D. Merry 		ref = xennet_get_rx_ref(np, *cons + frags);
1429931eeffaSKenneth D. Merry 		ref_cons = *cons + frags;
143089e0f4d2SKip Macy 		frags++;
143189e0f4d2SKip Macy 	}
143283b92f6eSKip Macy 	*list = m0;
1433931eeffaSKenneth D. Merry 	*cons += frags;
143489e0f4d2SKip Macy 	*pages_flipped_p = pages_flipped;
143589e0f4d2SKip Macy 
14368577146eSJustin T. Gibbs 	return (err);
143789e0f4d2SKip Macy }
143889e0f4d2SKip Macy 
143989e0f4d2SKip Macy static void
144089e0f4d2SKip Macy xn_tick_locked(struct netfront_info *sc)
144189e0f4d2SKip Macy {
144289e0f4d2SKip Macy 	XN_RX_LOCK_ASSERT(sc);
144389e0f4d2SKip Macy 	callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
144489e0f4d2SKip Macy 
144589e0f4d2SKip Macy 	/* XXX placeholder for printing debug information */
144689e0f4d2SKip Macy 
144789e0f4d2SKip Macy }
144889e0f4d2SKip Macy 
144989e0f4d2SKip Macy 
145089e0f4d2SKip Macy static void
145189e0f4d2SKip Macy xn_tick(void *xsc)
145289e0f4d2SKip Macy {
145389e0f4d2SKip Macy 	struct netfront_info *sc;
145489e0f4d2SKip Macy 
145589e0f4d2SKip Macy 	sc = xsc;
145689e0f4d2SKip Macy 	XN_RX_LOCK(sc);
145789e0f4d2SKip Macy 	xn_tick_locked(sc);
145889e0f4d2SKip Macy 	XN_RX_UNLOCK(sc);
145989e0f4d2SKip Macy 
146089e0f4d2SKip Macy }
146189e0f4d2SKip Macy 
1462931eeffaSKenneth D. Merry /**
1463931eeffaSKenneth D. Merry  * \brief Count the number of fragments in an mbuf chain.
1464931eeffaSKenneth D. Merry  *
1465931eeffaSKenneth D. Merry  * Surprisingly, there isn't an M* macro for this.
1466c099cafaSAdrian Chadd  */
1467931eeffaSKenneth D. Merry static inline int
1468931eeffaSKenneth D. Merry xn_count_frags(struct mbuf *m)
1469931eeffaSKenneth D. Merry {
1470931eeffaSKenneth D. Merry 	int nfrags;
1471931eeffaSKenneth D. Merry 
1472931eeffaSKenneth D. Merry 	for (nfrags = 0; m != NULL; m = m->m_next)
1473931eeffaSKenneth D. Merry 		nfrags++;
1474931eeffaSKenneth D. Merry 
1475931eeffaSKenneth D. Merry 	return (nfrags);
147689e0f4d2SKip Macy }
147789e0f4d2SKip Macy 
1478931eeffaSKenneth D. Merry /**
1479931eeffaSKenneth D. Merry  * Given an mbuf chain, make sure we have enough room and then push
1480931eeffaSKenneth D. Merry  * it onto the transmit ring.
1481931eeffaSKenneth D. Merry  */
1482931eeffaSKenneth D. Merry static int
1483931eeffaSKenneth D. Merry xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head)
1484931eeffaSKenneth D. Merry {
1485931eeffaSKenneth D. Merry 	struct ifnet *ifp;
1486931eeffaSKenneth D. Merry 	struct mbuf *m;
1487931eeffaSKenneth D. Merry 	u_int nfrags;
1488931eeffaSKenneth D. Merry 	netif_extra_info_t *extra;
1489931eeffaSKenneth D. Merry 	int otherend_id;
1490931eeffaSKenneth D. Merry 
1491931eeffaSKenneth D. Merry 	ifp = sc->xn_ifp;
1492931eeffaSKenneth D. Merry 
1493931eeffaSKenneth D. Merry 	/**
149412678024SDoug Rabson 	 * Defragment the mbuf if necessary.
149512678024SDoug Rabson 	 */
1496931eeffaSKenneth D. Merry 	nfrags = xn_count_frags(m_head);
1497931eeffaSKenneth D. Merry 
1498931eeffaSKenneth D. Merry 	/*
1499931eeffaSKenneth D. Merry 	 * Check to see whether this request is longer than netback
1500931eeffaSKenneth D. Merry 	 * can handle, and try to defrag it.
1501931eeffaSKenneth D. Merry 	 */
1502931eeffaSKenneth D. Merry 	/**
1503931eeffaSKenneth D. Merry 	 * It is a bit lame, but the netback driver in Linux can't
1504931eeffaSKenneth D. Merry 	 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of
1505931eeffaSKenneth D. Merry 	 * the Linux network stack.
1506931eeffaSKenneth D. Merry 	 */
1507931eeffaSKenneth D. Merry 	if (nfrags > MAX_TX_REQ_FRAGS) {
150812678024SDoug Rabson 		m = m_defrag(m_head, M_DONTWAIT);
150912678024SDoug Rabson 		if (!m) {
1510931eeffaSKenneth D. Merry 			/*
1511931eeffaSKenneth D. Merry 			 * Defrag failed, so free the mbuf and
1512931eeffaSKenneth D. Merry 			 * therefore drop the packet.
1513931eeffaSKenneth D. Merry 			 */
151412678024SDoug Rabson 			m_freem(m_head);
1515931eeffaSKenneth D. Merry 			return (EMSGSIZE);
151612678024SDoug Rabson 		}
151712678024SDoug Rabson 		m_head = m;
151812678024SDoug Rabson 	}
151989e0f4d2SKip Macy 
1520a4ec37f5SAdrian Chadd 	/* Determine how many fragments now exist */
1521931eeffaSKenneth D. Merry 	nfrags = xn_count_frags(m_head);
1522a4ec37f5SAdrian Chadd 
1523a4ec37f5SAdrian Chadd 	/*
1524931eeffaSKenneth D. Merry 	 * Check to see whether the defragmented packet has too many
1525931eeffaSKenneth D. Merry 	 * segments for the Linux netback driver.
1526a4ec37f5SAdrian Chadd 	 */
1527931eeffaSKenneth D. Merry 	/**
1528931eeffaSKenneth D. Merry 	 * The FreeBSD TCP stack, with TSO enabled, can produce a chain
1529931eeffaSKenneth D. Merry 	 * of mbufs longer than Linux can handle.  Make sure we don't
1530931eeffaSKenneth D. Merry 	 * pass a too-long chain over to the other side by dropping the
1531931eeffaSKenneth D. Merry 	 * packet.  It doesn't look like there is currently a way to
1532931eeffaSKenneth D. Merry 	 * tell the TCP stack to generate a shorter chain of packets.
15333fb28bbbSAdrian Chadd 	 */
1534931eeffaSKenneth D. Merry 	if (nfrags > MAX_TX_REQ_FRAGS) {
1535ff662b5cSJustin T. Gibbs #ifdef DEBUG
1536ff662b5cSJustin T. Gibbs 		printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback "
1537ff662b5cSJustin T. Gibbs 		       "won't be able to handle it, dropping\n",
1538ff662b5cSJustin T. Gibbs 		       __func__, nfrags, MAX_TX_REQ_FRAGS);
1539ff662b5cSJustin T. Gibbs #endif
1540931eeffaSKenneth D. Merry 		m_freem(m_head);
1541931eeffaSKenneth D. Merry 		return (EMSGSIZE);
1542a4ec37f5SAdrian Chadd 	}
1543a4ec37f5SAdrian Chadd 
15443fb28bbbSAdrian Chadd 	/*
1545931eeffaSKenneth D. Merry 	 * This check should be redundant.  We've already verified that we
1546931eeffaSKenneth D. Merry 	 * have enough slots in the ring to handle a packet of maximum
1547931eeffaSKenneth D. Merry 	 * size, and that our packet is less than the maximum size.  Keep
1548931eeffaSKenneth D. Merry 	 * it in here as an assert for now just to make certain that
1549931eeffaSKenneth D. Merry 	 * xn_tx_chain_cnt is accurate.
15503fb28bbbSAdrian Chadd 	 */
1551931eeffaSKenneth D. Merry 	KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE,
1552931eeffaSKenneth D. Merry 		("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE "
1553931eeffaSKenneth D. Merry 		 "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt,
1554931eeffaSKenneth D. Merry                     (int) nfrags, (int) NET_TX_RING_SIZE));
1555a4ec37f5SAdrian Chadd 
155689e0f4d2SKip Macy 	/*
155789e0f4d2SKip Macy 	 * Start packing the mbufs in this chain into
155889e0f4d2SKip Macy 	 * the fragment pointers. Stop when we run out
155989e0f4d2SKip Macy 	 * of fragments or hit the end of the mbuf chain.
156089e0f4d2SKip Macy 	 */
156112678024SDoug Rabson 	m = m_head;
156212678024SDoug Rabson 	extra = NULL;
1563931eeffaSKenneth D. Merry 	otherend_id = xenbus_get_otherend_id(sc->xbdev);
156412678024SDoug Rabson 	for (m = m_head; m; m = m->m_next) {
1565931eeffaSKenneth D. Merry 		netif_tx_request_t *tx;
1566931eeffaSKenneth D. Merry 		uintptr_t id;
1567931eeffaSKenneth D. Merry 		grant_ref_t ref;
1568931eeffaSKenneth D. Merry 		u_long mfn; /* XXX Wrong type? */
1569931eeffaSKenneth D. Merry 
1570931eeffaSKenneth D. Merry 		tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt);
1571931eeffaSKenneth D. Merry 		id = get_id_from_freelist(sc->tx_mbufs);
1572a4ec37f5SAdrian Chadd 		if (id == 0)
1573a4ec37f5SAdrian Chadd 			panic("xn_start_locked: was allocated the freelist head!\n");
1574a4ec37f5SAdrian Chadd 		sc->xn_cdata.xn_tx_chain_cnt++;
1575931eeffaSKenneth D. Merry 		if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE)
1576931eeffaSKenneth D. Merry 			panic("xn_start_locked: tx_chain_cnt must be <= NET_TX_RING_SIZE\n");
1577931eeffaSKenneth D. Merry 		sc->tx_mbufs[id] = m;
157889e0f4d2SKip Macy 		tx->id = id;
157989e0f4d2SKip Macy 		ref = gnttab_claim_grant_reference(&sc->gref_tx_head);
158089e0f4d2SKip Macy 		KASSERT((short)ref >= 0, ("Negative ref"));
158112678024SDoug Rabson 		mfn = virt_to_mfn(mtod(m, vm_offset_t));
158223dc5621SKip Macy 		gnttab_grant_foreign_access_ref(ref, otherend_id,
158389e0f4d2SKip Macy 		    mfn, GNTMAP_readonly);
158489e0f4d2SKip Macy 		tx->gref = sc->grant_tx_ref[id] = ref;
158512678024SDoug Rabson 		tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1);
158689e0f4d2SKip Macy 		tx->flags = 0;
158712678024SDoug Rabson 		if (m == m_head) {
158812678024SDoug Rabson 			/*
158912678024SDoug Rabson 			 * The first fragment has the entire packet
159012678024SDoug Rabson 			 * size, subsequent fragments have just the
159112678024SDoug Rabson 			 * fragment size. The backend works out the
159212678024SDoug Rabson 			 * true size of the first fragment by
159312678024SDoug Rabson 			 * subtracting the sizes of the other
159412678024SDoug Rabson 			 * fragments.
159512678024SDoug Rabson 			 */
159612678024SDoug Rabson 			tx->size = m->m_pkthdr.len;
159789e0f4d2SKip Macy 
159812678024SDoug Rabson 			/*
1599931eeffaSKenneth D. Merry 			 * The first fragment contains the checksum flags
1600931eeffaSKenneth D. Merry 			 * and is optionally followed by extra data for
1601931eeffaSKenneth D. Merry 			 * TSO etc.
1602931eeffaSKenneth D. Merry 			 */
1603931eeffaSKenneth D. Merry 			/**
1604931eeffaSKenneth D. Merry 			 * CSUM_TSO requires checksum offloading.
1605931eeffaSKenneth D. Merry 			 * Some versions of FreeBSD fail to
1606931eeffaSKenneth D. Merry 			 * set CSUM_TCP in the CSUM_TSO case,
1607931eeffaSKenneth D. Merry 			 * so we have to test for CSUM_TSO
1608931eeffaSKenneth D. Merry 			 * explicitly.
160912678024SDoug Rabson 			 */
161012678024SDoug Rabson 			if (m->m_pkthdr.csum_flags
1611931eeffaSKenneth D. Merry 			    & (CSUM_DELAY_DATA | CSUM_TSO)) {
161212678024SDoug Rabson 				tx->flags |= (NETTXF_csum_blank
161312678024SDoug Rabson 				    | NETTXF_data_validated);
161412678024SDoug Rabson 			}
161512678024SDoug Rabson #if __FreeBSD_version >= 700000
161612678024SDoug Rabson 			if (m->m_pkthdr.csum_flags & CSUM_TSO) {
161712678024SDoug Rabson 				struct netif_extra_info *gso =
161812678024SDoug Rabson 					(struct netif_extra_info *)
1619931eeffaSKenneth D. Merry 					RING_GET_REQUEST(&sc->tx,
1620931eeffaSKenneth D. Merry 							 ++sc->tx.req_prod_pvt);
162189e0f4d2SKip Macy 
162212678024SDoug Rabson 				tx->flags |= NETTXF_extra_info;
162389e0f4d2SKip Macy 
162412678024SDoug Rabson 				gso->u.gso.size = m->m_pkthdr.tso_segsz;
162512678024SDoug Rabson 				gso->u.gso.type =
162612678024SDoug Rabson 					XEN_NETIF_GSO_TYPE_TCPV4;
162712678024SDoug Rabson 				gso->u.gso.pad = 0;
162812678024SDoug Rabson 				gso->u.gso.features = 0;
162912678024SDoug Rabson 
163012678024SDoug Rabson 				gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
163112678024SDoug Rabson 				gso->flags = 0;
163212678024SDoug Rabson 			}
163312678024SDoug Rabson #endif
163412678024SDoug Rabson 		} else {
163512678024SDoug Rabson 			tx->size = m->m_len;
163612678024SDoug Rabson 		}
1637931eeffaSKenneth D. Merry 		if (m->m_next)
163812678024SDoug Rabson 			tx->flags |= NETTXF_more_data;
163912678024SDoug Rabson 
1640931eeffaSKenneth D. Merry 		sc->tx.req_prod_pvt++;
1641931eeffaSKenneth D. Merry 	}
164212678024SDoug Rabson 	BPF_MTAP(ifp, m_head);
164312678024SDoug Rabson 
164412678024SDoug Rabson 	sc->stats.tx_bytes += m_head->m_pkthdr.len;
164589e0f4d2SKip Macy 	sc->stats.tx_packets++;
1646931eeffaSKenneth D. Merry 
1647931eeffaSKenneth D. Merry 	return (0);
164889e0f4d2SKip Macy }
164989e0f4d2SKip Macy 
1650931eeffaSKenneth D. Merry static void
1651931eeffaSKenneth D. Merry xn_start_locked(struct ifnet *ifp)
1652931eeffaSKenneth D. Merry {
1653931eeffaSKenneth D. Merry 	struct netfront_info *sc;
1654931eeffaSKenneth D. Merry 	struct mbuf *m_head;
1655931eeffaSKenneth D. Merry 	int notify;
1656931eeffaSKenneth D. Merry 
1657931eeffaSKenneth D. Merry 	sc = ifp->if_softc;
1658931eeffaSKenneth D. Merry 
1659931eeffaSKenneth D. Merry 	if (!netfront_carrier_ok(sc))
1660931eeffaSKenneth D. Merry 		return;
1661931eeffaSKenneth D. Merry 
1662931eeffaSKenneth D. Merry 	/*
1663931eeffaSKenneth D. Merry 	 * While we have enough transmit slots available for at least one
1664931eeffaSKenneth D. Merry 	 * maximum-sized packet, pull mbufs off the queue and put them on
1665931eeffaSKenneth D. Merry 	 * the transmit ring.
1666931eeffaSKenneth D. Merry 	 */
1667931eeffaSKenneth D. Merry 	while (xn_tx_slot_available(sc)) {
1668931eeffaSKenneth D. Merry 		IF_DEQUEUE(&ifp->if_snd, m_head);
1669931eeffaSKenneth D. Merry 		if (m_head == NULL)
1670931eeffaSKenneth D. Merry 			break;
1671931eeffaSKenneth D. Merry 
1672931eeffaSKenneth D. Merry 		if (xn_assemble_tx_request(sc, m_head) != 0)
1673931eeffaSKenneth D. Merry 			break;
1674931eeffaSKenneth D. Merry 	}
1675931eeffaSKenneth D. Merry 
167689e0f4d2SKip Macy 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify);
167789e0f4d2SKip Macy 	if (notify)
167889e0f4d2SKip Macy 		notify_remote_via_irq(sc->irq);
167989e0f4d2SKip Macy 
168089e0f4d2SKip Macy 	if (RING_FULL(&sc->tx)) {
168189e0f4d2SKip Macy 		sc->tx_full = 1;
168289e0f4d2SKip Macy #if 0
168389e0f4d2SKip Macy 		netif_stop_queue(dev);
168489e0f4d2SKip Macy #endif
168589e0f4d2SKip Macy 	}
168689e0f4d2SKip Macy }
168789e0f4d2SKip Macy 
1688931eeffaSKenneth D. Merry 
168989e0f4d2SKip Macy static void
169089e0f4d2SKip Macy xn_start(struct ifnet *ifp)
169189e0f4d2SKip Macy {
169289e0f4d2SKip Macy 	struct netfront_info *sc;
169389e0f4d2SKip Macy 	sc = ifp->if_softc;
169489e0f4d2SKip Macy 	XN_TX_LOCK(sc);
169589e0f4d2SKip Macy 	xn_start_locked(ifp);
169689e0f4d2SKip Macy 	XN_TX_UNLOCK(sc);
169789e0f4d2SKip Macy }
169889e0f4d2SKip Macy 
169989e0f4d2SKip Macy /* equivalent of network_open() in Linux */
170089e0f4d2SKip Macy static void
170189e0f4d2SKip Macy xn_ifinit_locked(struct netfront_info *sc)
170289e0f4d2SKip Macy {
170389e0f4d2SKip Macy 	struct ifnet *ifp;
170489e0f4d2SKip Macy 
170589e0f4d2SKip Macy 	XN_LOCK_ASSERT(sc);
170689e0f4d2SKip Macy 
170789e0f4d2SKip Macy 	ifp = sc->xn_ifp;
170889e0f4d2SKip Macy 
170989e0f4d2SKip Macy 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
171089e0f4d2SKip Macy 		return;
171189e0f4d2SKip Macy 
171289e0f4d2SKip Macy 	xn_stop(sc);
171389e0f4d2SKip Macy 
171489e0f4d2SKip Macy 	network_alloc_rx_buffers(sc);
171589e0f4d2SKip Macy 	sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1;
171689e0f4d2SKip Macy 
171789e0f4d2SKip Macy 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
171889e0f4d2SKip Macy 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
17190e509842SJustin T. Gibbs 	if_link_state_change(ifp, LINK_STATE_UP);
172089e0f4d2SKip Macy 
172189e0f4d2SKip Macy 	callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
172289e0f4d2SKip Macy 
172389e0f4d2SKip Macy }
172489e0f4d2SKip Macy 
172589e0f4d2SKip Macy 
172689e0f4d2SKip Macy static void
172789e0f4d2SKip Macy xn_ifinit(void *xsc)
172889e0f4d2SKip Macy {
172989e0f4d2SKip Macy 	struct netfront_info *sc = xsc;
173089e0f4d2SKip Macy 
173189e0f4d2SKip Macy 	XN_LOCK(sc);
173289e0f4d2SKip Macy 	xn_ifinit_locked(sc);
173389e0f4d2SKip Macy 	XN_UNLOCK(sc);
173489e0f4d2SKip Macy 
173589e0f4d2SKip Macy }
173689e0f4d2SKip Macy 
173789e0f4d2SKip Macy 
173889e0f4d2SKip Macy static int
173989e0f4d2SKip Macy xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
174089e0f4d2SKip Macy {
174189e0f4d2SKip Macy 	struct netfront_info *sc = ifp->if_softc;
174289e0f4d2SKip Macy 	struct ifreq *ifr = (struct ifreq *) data;
1743a0ae8f04SBjoern A. Zeeb #ifdef INET
174489e0f4d2SKip Macy 	struct ifaddr *ifa = (struct ifaddr *)data;
1745a0ae8f04SBjoern A. Zeeb #endif
174689e0f4d2SKip Macy 
174789e0f4d2SKip Macy 	int mask, error = 0;
174889e0f4d2SKip Macy 	switch(cmd) {
174989e0f4d2SKip Macy 	case SIOCSIFADDR:
175089e0f4d2SKip Macy 	case SIOCGIFADDR:
1751a0ae8f04SBjoern A. Zeeb #ifdef INET
175289e0f4d2SKip Macy 		XN_LOCK(sc);
175389e0f4d2SKip Macy 		if (ifa->ifa_addr->sa_family == AF_INET) {
175489e0f4d2SKip Macy 			ifp->if_flags |= IFF_UP;
175589e0f4d2SKip Macy 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
175689e0f4d2SKip Macy 				xn_ifinit_locked(sc);
175789e0f4d2SKip Macy 			arp_ifinit(ifp, ifa);
175889e0f4d2SKip Macy 			XN_UNLOCK(sc);
175949906218SDoug Rabson 		} else {
176049906218SDoug Rabson 			XN_UNLOCK(sc);
1761a0ae8f04SBjoern A. Zeeb #endif
176249906218SDoug Rabson 			error = ether_ioctl(ifp, cmd, data);
1763a0ae8f04SBjoern A. Zeeb #ifdef INET
176449906218SDoug Rabson 		}
1765a0ae8f04SBjoern A. Zeeb #endif
176689e0f4d2SKip Macy 		break;
176789e0f4d2SKip Macy 	case SIOCSIFMTU:
176889e0f4d2SKip Macy 		/* XXX can we alter the MTU on a VN ?*/
176989e0f4d2SKip Macy #ifdef notyet
177089e0f4d2SKip Macy 		if (ifr->ifr_mtu > XN_JUMBO_MTU)
177189e0f4d2SKip Macy 			error = EINVAL;
177289e0f4d2SKip Macy 		else
177389e0f4d2SKip Macy #endif
177489e0f4d2SKip Macy 		{
177589e0f4d2SKip Macy 			ifp->if_mtu = ifr->ifr_mtu;
177689e0f4d2SKip Macy 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
177789e0f4d2SKip Macy 			xn_ifinit(sc);
177889e0f4d2SKip Macy 		}
177989e0f4d2SKip Macy 		break;
178089e0f4d2SKip Macy 	case SIOCSIFFLAGS:
178189e0f4d2SKip Macy 		XN_LOCK(sc);
178289e0f4d2SKip Macy 		if (ifp->if_flags & IFF_UP) {
178389e0f4d2SKip Macy 			/*
178489e0f4d2SKip Macy 			 * If only the state of the PROMISC flag changed,
178589e0f4d2SKip Macy 			 * then just use the 'set promisc mode' command
178689e0f4d2SKip Macy 			 * instead of reinitializing the entire NIC. Doing
178789e0f4d2SKip Macy 			 * a full re-init means reloading the firmware and
178889e0f4d2SKip Macy 			 * waiting for it to start up, which may take a
178989e0f4d2SKip Macy 			 * second or two.
179089e0f4d2SKip Macy 			 */
179189e0f4d2SKip Macy #ifdef notyet
179289e0f4d2SKip Macy 			/* No promiscuous mode with Xen */
179389e0f4d2SKip Macy 			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
179489e0f4d2SKip Macy 			    ifp->if_flags & IFF_PROMISC &&
179589e0f4d2SKip Macy 			    !(sc->xn_if_flags & IFF_PROMISC)) {
179689e0f4d2SKip Macy 				XN_SETBIT(sc, XN_RX_MODE,
179789e0f4d2SKip Macy 					  XN_RXMODE_RX_PROMISC);
179889e0f4d2SKip Macy 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
179989e0f4d2SKip Macy 				   !(ifp->if_flags & IFF_PROMISC) &&
180089e0f4d2SKip Macy 				   sc->xn_if_flags & IFF_PROMISC) {
180189e0f4d2SKip Macy 				XN_CLRBIT(sc, XN_RX_MODE,
180289e0f4d2SKip Macy 					  XN_RXMODE_RX_PROMISC);
180389e0f4d2SKip Macy 			} else
180489e0f4d2SKip Macy #endif
180589e0f4d2SKip Macy 				xn_ifinit_locked(sc);
180689e0f4d2SKip Macy 		} else {
180789e0f4d2SKip Macy 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
180889e0f4d2SKip Macy 				xn_stop(sc);
180989e0f4d2SKip Macy 			}
181089e0f4d2SKip Macy 		}
181189e0f4d2SKip Macy 		sc->xn_if_flags = ifp->if_flags;
181289e0f4d2SKip Macy 		XN_UNLOCK(sc);
181389e0f4d2SKip Macy 		error = 0;
181489e0f4d2SKip Macy 		break;
181589e0f4d2SKip Macy 	case SIOCSIFCAP:
181689e0f4d2SKip Macy 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
181712678024SDoug Rabson 		if (mask & IFCAP_TXCSUM) {
181812678024SDoug Rabson 			if (IFCAP_TXCSUM & ifp->if_capenable) {
181912678024SDoug Rabson 				ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
182012678024SDoug Rabson 				ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
182112678024SDoug Rabson 				    | CSUM_IP | CSUM_TSO);
182212678024SDoug Rabson 			} else {
182312678024SDoug Rabson 				ifp->if_capenable |= IFCAP_TXCSUM;
182412678024SDoug Rabson 				ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
182512678024SDoug Rabson 				    | CSUM_IP);
182689e0f4d2SKip Macy 			}
182712678024SDoug Rabson 		}
182812678024SDoug Rabson 		if (mask & IFCAP_RXCSUM) {
182912678024SDoug Rabson 			ifp->if_capenable ^= IFCAP_RXCSUM;
183012678024SDoug Rabson 		}
183112678024SDoug Rabson #if __FreeBSD_version >= 700000
183212678024SDoug Rabson 		if (mask & IFCAP_TSO4) {
183312678024SDoug Rabson 			if (IFCAP_TSO4 & ifp->if_capenable) {
183412678024SDoug Rabson 				ifp->if_capenable &= ~IFCAP_TSO4;
183512678024SDoug Rabson 				ifp->if_hwassist &= ~CSUM_TSO;
183612678024SDoug Rabson 			} else if (IFCAP_TXCSUM & ifp->if_capenable) {
183712678024SDoug Rabson 				ifp->if_capenable |= IFCAP_TSO4;
183812678024SDoug Rabson 				ifp->if_hwassist |= CSUM_TSO;
183912678024SDoug Rabson 			} else {
18403552092bSAdrian Chadd 				IPRINTK("Xen requires tx checksum offload"
184112678024SDoug Rabson 				    " be enabled to use TSO\n");
184212678024SDoug Rabson 				error = EINVAL;
184312678024SDoug Rabson 			}
184412678024SDoug Rabson 		}
184512678024SDoug Rabson 		if (mask & IFCAP_LRO) {
184612678024SDoug Rabson 			ifp->if_capenable ^= IFCAP_LRO;
184712678024SDoug Rabson 
184812678024SDoug Rabson 		}
184912678024SDoug Rabson #endif
185089e0f4d2SKip Macy 		error = 0;
185189e0f4d2SKip Macy 		break;
185289e0f4d2SKip Macy 	case SIOCADDMULTI:
185389e0f4d2SKip Macy 	case SIOCDELMULTI:
185489e0f4d2SKip Macy #ifdef notyet
185589e0f4d2SKip Macy 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
185689e0f4d2SKip Macy 			XN_LOCK(sc);
185789e0f4d2SKip Macy 			xn_setmulti(sc);
185889e0f4d2SKip Macy 			XN_UNLOCK(sc);
185989e0f4d2SKip Macy 			error = 0;
186089e0f4d2SKip Macy 		}
186189e0f4d2SKip Macy #endif
186289e0f4d2SKip Macy 		/* FALLTHROUGH */
186389e0f4d2SKip Macy 	case SIOCSIFMEDIA:
186489e0f4d2SKip Macy 	case SIOCGIFMEDIA:
18650e509842SJustin T. Gibbs 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
186689e0f4d2SKip Macy 		break;
186789e0f4d2SKip Macy 	default:
186889e0f4d2SKip Macy 		error = ether_ioctl(ifp, cmd, data);
186989e0f4d2SKip Macy 	}
187089e0f4d2SKip Macy 
187189e0f4d2SKip Macy 	return (error);
187289e0f4d2SKip Macy }
187389e0f4d2SKip Macy 
187489e0f4d2SKip Macy static void
187589e0f4d2SKip Macy xn_stop(struct netfront_info *sc)
187689e0f4d2SKip Macy {
187789e0f4d2SKip Macy 	struct ifnet *ifp;
187889e0f4d2SKip Macy 
187989e0f4d2SKip Macy 	XN_LOCK_ASSERT(sc);
188089e0f4d2SKip Macy 
188189e0f4d2SKip Macy 	ifp = sc->xn_ifp;
188289e0f4d2SKip Macy 
188389e0f4d2SKip Macy 	callout_stop(&sc->xn_stat_ch);
188489e0f4d2SKip Macy 
188589e0f4d2SKip Macy 	xn_free_rx_ring(sc);
188689e0f4d2SKip Macy 	xn_free_tx_ring(sc);
188789e0f4d2SKip Macy 
188889e0f4d2SKip Macy 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
18890e509842SJustin T. Gibbs 	if_link_state_change(ifp, LINK_STATE_DOWN);
189089e0f4d2SKip Macy }
189189e0f4d2SKip Macy 
189289e0f4d2SKip Macy /* START of Xenolinux helper functions adapted to FreeBSD */
189323dc5621SKip Macy int
189423dc5621SKip Macy network_connect(struct netfront_info *np)
189589e0f4d2SKip Macy {
18963a6d1fcfSKip Macy 	int i, requeue_idx, error;
189789e0f4d2SKip Macy 	grant_ref_t ref;
189889e0f4d2SKip Macy 	netif_rx_request_t *req;
189989e0f4d2SKip Macy 	u_int feature_rx_copy, feature_rx_flip;
190089e0f4d2SKip Macy 
1901ff662b5cSJustin T. Gibbs 	error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
19023a6d1fcfSKip Macy 	    "feature-rx-copy", NULL, "%u", &feature_rx_copy);
19033a6d1fcfSKip Macy 	if (error)
190489e0f4d2SKip Macy 		feature_rx_copy = 0;
1905ff662b5cSJustin T. Gibbs 	error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
19063a6d1fcfSKip Macy 	    "feature-rx-flip", NULL, "%u", &feature_rx_flip);
19073a6d1fcfSKip Macy 	if (error)
190889e0f4d2SKip Macy 		feature_rx_flip = 1;
190989e0f4d2SKip Macy 
191089e0f4d2SKip Macy 	/*
191189e0f4d2SKip Macy 	 * Copy packets on receive path if:
191289e0f4d2SKip Macy 	 *  (a) This was requested by user, and the backend supports it; or
191389e0f4d2SKip Macy 	 *  (b) Flipping was requested, but this is unsupported by the backend.
191489e0f4d2SKip Macy 	 */
191589e0f4d2SKip Macy 	np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
191689e0f4d2SKip Macy 				(MODPARM_rx_flip && !feature_rx_flip));
191789e0f4d2SKip Macy 
191889e0f4d2SKip Macy 	/* Recovery procedure: */
19193a6d1fcfSKip Macy 	error = talk_to_backend(np->xbdev, np);
19203a6d1fcfSKip Macy 	if (error)
19213a6d1fcfSKip Macy 		return (error);
192289e0f4d2SKip Macy 
192389e0f4d2SKip Macy 	/* Step 1: Reinitialise variables. */
192489e0f4d2SKip Macy 	netif_release_tx_bufs(np);
192589e0f4d2SKip Macy 
192689e0f4d2SKip Macy 	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1927*cf9c09e1SJustin T. Gibbs 	xn_configure_lro(np);
192889e0f4d2SKip Macy 	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
192989e0f4d2SKip Macy 		struct mbuf *m;
19303a6d1fcfSKip Macy 		u_long pfn;
193189e0f4d2SKip Macy 
193289e0f4d2SKip Macy 		if (np->rx_mbufs[i] == NULL)
193389e0f4d2SKip Macy 			continue;
193489e0f4d2SKip Macy 
193589e0f4d2SKip Macy 		m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i);
193689e0f4d2SKip Macy 		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1937931eeffaSKenneth D. Merry 
193889e0f4d2SKip Macy 		req = RING_GET_REQUEST(&np->rx, requeue_idx);
19393a6d1fcfSKip Macy 		pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
194089e0f4d2SKip Macy 
194189e0f4d2SKip Macy 		if (!np->copying_receiver) {
194289e0f4d2SKip Macy 			gnttab_grant_foreign_transfer_ref(ref,
194323dc5621SKip Macy 			    xenbus_get_otherend_id(np->xbdev),
19443a6d1fcfSKip Macy 			    pfn);
194589e0f4d2SKip Macy 		} else {
194689e0f4d2SKip Macy 			gnttab_grant_foreign_access_ref(ref,
194723dc5621SKip Macy 			    xenbus_get_otherend_id(np->xbdev),
19483a6d1fcfSKip Macy 			    PFNTOMFN(pfn), 0);
194989e0f4d2SKip Macy 		}
195089e0f4d2SKip Macy 		req->gref = ref;
195189e0f4d2SKip Macy 		req->id   = requeue_idx;
195289e0f4d2SKip Macy 
195389e0f4d2SKip Macy 		requeue_idx++;
195489e0f4d2SKip Macy 	}
195589e0f4d2SKip Macy 
195689e0f4d2SKip Macy 	np->rx.req_prod_pvt = requeue_idx;
195789e0f4d2SKip Macy 
195889e0f4d2SKip Macy 	/* Step 3: All public and private state should now be sane.  Get
195989e0f4d2SKip Macy 	 * ready to start sending and receiving packets and give the driver
196089e0f4d2SKip Macy 	 * domain a kick because we've probably just requeued some
196189e0f4d2SKip Macy 	 * packets.
196289e0f4d2SKip Macy 	 */
196389e0f4d2SKip Macy 	netfront_carrier_on(np);
196489e0f4d2SKip Macy 	notify_remote_via_irq(np->irq);
196589e0f4d2SKip Macy 	XN_TX_LOCK(np);
196689e0f4d2SKip Macy 	xn_txeof(np);
196789e0f4d2SKip Macy 	XN_TX_UNLOCK(np);
196889e0f4d2SKip Macy 	network_alloc_rx_buffers(np);
196989e0f4d2SKip Macy 
197089e0f4d2SKip Macy 	return (0);
197189e0f4d2SKip Macy }
197289e0f4d2SKip Macy 
197389e0f4d2SKip Macy static void
197489e0f4d2SKip Macy show_device(struct netfront_info *sc)
197589e0f4d2SKip Macy {
197689e0f4d2SKip Macy #ifdef DEBUG
197789e0f4d2SKip Macy 	if (sc) {
197889e0f4d2SKip Macy 		IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n",
197989e0f4d2SKip Macy 			sc->xn_ifno,
198089e0f4d2SKip Macy 			be_state_name[sc->xn_backend_state],
198189e0f4d2SKip Macy 			sc->xn_user_state ? "open" : "closed",
198289e0f4d2SKip Macy 			sc->xn_evtchn,
198389e0f4d2SKip Macy 			sc->xn_irq,
198489e0f4d2SKip Macy 			sc->xn_tx_if,
198589e0f4d2SKip Macy 			sc->xn_rx_if);
198689e0f4d2SKip Macy 	} else {
198789e0f4d2SKip Macy 		IPRINTK("<vif NULL>\n");
198889e0f4d2SKip Macy 	}
198989e0f4d2SKip Macy #endif
199089e0f4d2SKip Macy }
199189e0f4d2SKip Macy 
1992*cf9c09e1SJustin T. Gibbs static int
1993*cf9c09e1SJustin T. Gibbs xn_configure_lro(struct netfront_info *np)
1994*cf9c09e1SJustin T. Gibbs {
1995*cf9c09e1SJustin T. Gibbs 	int err;
1996*cf9c09e1SJustin T. Gibbs 
1997*cf9c09e1SJustin T. Gibbs 	err = 0;
1998*cf9c09e1SJustin T. Gibbs #if __FreeBSD_version >= 700000
1999*cf9c09e1SJustin T. Gibbs 	if ((np->xn_ifp->if_capabilities & IFCAP_LRO) != 0)
2000*cf9c09e1SJustin T. Gibbs 		tcp_lro_free(&np->xn_lro);
2001*cf9c09e1SJustin T. Gibbs 	np->xn_ifp->if_capabilities &= ~IFCAP_LRO;
2002*cf9c09e1SJustin T. Gibbs 	if (xn_enable_lro) {
2003*cf9c09e1SJustin T. Gibbs 		err = tcp_lro_init(&np->xn_lro);
2004*cf9c09e1SJustin T. Gibbs 		if (err) {
2005*cf9c09e1SJustin T. Gibbs 			device_printf(np->xbdev, "LRO initialization failed\n");
2006*cf9c09e1SJustin T. Gibbs 		} else {
2007*cf9c09e1SJustin T. Gibbs 			np->xn_lro.ifp = np->xn_ifp;
2008*cf9c09e1SJustin T. Gibbs 			np->xn_ifp->if_capabilities |= IFCAP_LRO;
2009*cf9c09e1SJustin T. Gibbs 		}
2010*cf9c09e1SJustin T. Gibbs 	}
2011*cf9c09e1SJustin T. Gibbs     	np->xn_ifp->if_capenable = np->xn_ifp->if_capabilities;
2012*cf9c09e1SJustin T. Gibbs #endif
2013*cf9c09e1SJustin T. Gibbs 	return (err);
2014*cf9c09e1SJustin T. Gibbs }
2015*cf9c09e1SJustin T. Gibbs 
201689e0f4d2SKip Macy /** Create a network device.
201789e0f4d2SKip Macy  * @param handle device handle
201889e0f4d2SKip Macy  */
201923dc5621SKip Macy int
202023dc5621SKip Macy create_netdev(device_t dev)
202189e0f4d2SKip Macy {
202289e0f4d2SKip Macy 	int i;
202389e0f4d2SKip Macy 	struct netfront_info *np;
202489e0f4d2SKip Macy 	int err;
202589e0f4d2SKip Macy 	struct ifnet *ifp;
202689e0f4d2SKip Macy 
202723dc5621SKip Macy 	np = device_get_softc(dev);
202889e0f4d2SKip Macy 
202989e0f4d2SKip Macy 	np->xbdev         = dev;
203089e0f4d2SKip Macy 
203189e0f4d2SKip Macy 	XN_LOCK_INIT(np, xennetif);
20320e509842SJustin T. Gibbs 
20330e509842SJustin T. Gibbs 	ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
20340e509842SJustin T. Gibbs 	ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
20350e509842SJustin T. Gibbs 	ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
20360e509842SJustin T. Gibbs 
203789e0f4d2SKip Macy 	np->rx_target     = RX_MIN_TARGET;
203889e0f4d2SKip Macy 	np->rx_min_target = RX_MIN_TARGET;
203989e0f4d2SKip Macy 	np->rx_max_target = RX_MAX_TARGET;
204089e0f4d2SKip Macy 
204189e0f4d2SKip Macy 	/* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
204289e0f4d2SKip Macy 	for (i = 0; i <= NET_TX_RING_SIZE; i++) {
204389e0f4d2SKip Macy 		np->tx_mbufs[i] = (void *) ((u_long) i+1);
2044ff662b5cSJustin T. Gibbs 		np->grant_tx_ref[i] = GRANT_REF_INVALID;
204589e0f4d2SKip Macy 	}
2046931eeffaSKenneth D. Merry 	np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0;
2047931eeffaSKenneth D. Merry 
204889e0f4d2SKip Macy 	for (i = 0; i <= NET_RX_RING_SIZE; i++) {
2049931eeffaSKenneth D. Merry 
205089e0f4d2SKip Macy 		np->rx_mbufs[i] = NULL;
2051ff662b5cSJustin T. Gibbs 		np->grant_rx_ref[i] = GRANT_REF_INVALID;
205289e0f4d2SKip Macy 	}
205389e0f4d2SKip Macy 	/* A grant for every tx ring slot */
2054931eeffaSKenneth D. Merry 	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2055931eeffaSKenneth D. Merry 					  &np->gref_tx_head) != 0) {
2056227ca257SKip Macy 		IPRINTK("#### netfront can't alloc tx grant refs\n");
205789e0f4d2SKip Macy 		err = ENOMEM;
205889e0f4d2SKip Macy 		goto exit;
205989e0f4d2SKip Macy 	}
206089e0f4d2SKip Macy 	/* A grant for every rx ring slot */
206189e0f4d2SKip Macy 	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
2062931eeffaSKenneth D. Merry 					  &np->gref_rx_head) != 0) {
2063227ca257SKip Macy 		WPRINTK("#### netfront can't alloc rx grant refs\n");
206489e0f4d2SKip Macy 		gnttab_free_grant_references(np->gref_tx_head);
206589e0f4d2SKip Macy 		err = ENOMEM;
206689e0f4d2SKip Macy 		goto exit;
206789e0f4d2SKip Macy 	}
206889e0f4d2SKip Macy 
206989e0f4d2SKip Macy 	err = xen_net_read_mac(dev, np->mac);
207089e0f4d2SKip Macy 	if (err) {
207123dc5621SKip Macy 		xenbus_dev_fatal(dev, err, "parsing %s/mac",
207223dc5621SKip Macy 		    xenbus_get_node(dev));
207389e0f4d2SKip Macy 		goto out;
207489e0f4d2SKip Macy 	}
207589e0f4d2SKip Macy 
207689e0f4d2SKip Macy 	/* Set up ifnet structure */
207723dc5621SKip Macy 	ifp = np->xn_ifp = if_alloc(IFT_ETHER);
207889e0f4d2SKip Macy     	ifp->if_softc = np;
207923dc5621SKip Macy     	if_initname(ifp, "xn",  device_get_unit(dev));
20803a6d1fcfSKip Macy     	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
208189e0f4d2SKip Macy     	ifp->if_ioctl = xn_ioctl;
208289e0f4d2SKip Macy     	ifp->if_output = ether_output;
208389e0f4d2SKip Macy     	ifp->if_start = xn_start;
2084227ca257SKip Macy #ifdef notyet
2085227ca257SKip Macy     	ifp->if_watchdog = xn_watchdog;
2086227ca257SKip Macy #endif
208789e0f4d2SKip Macy     	ifp->if_init = xn_ifinit;
208889e0f4d2SKip Macy     	ifp->if_mtu = ETHERMTU;
208989e0f4d2SKip Macy     	ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1;
209089e0f4d2SKip Macy 
209189e0f4d2SKip Macy     	ifp->if_hwassist = XN_CSUM_FEATURES;
209289e0f4d2SKip Macy     	ifp->if_capabilities = IFCAP_HWCSUM;
209312678024SDoug Rabson #if __FreeBSD_version >= 700000
2094931eeffaSKenneth D. Merry 	ifp->if_capabilities |= IFCAP_TSO4;
209589e0f4d2SKip Macy #endif
209612678024SDoug Rabson     	ifp->if_capenable = ifp->if_capabilities;
2097*cf9c09e1SJustin T. Gibbs 	xn_configure_lro(np);
209889e0f4d2SKip Macy 
209989e0f4d2SKip Macy     	ether_ifattach(ifp, np->mac);
210089e0f4d2SKip Macy     	callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE);
210189e0f4d2SKip Macy 	netfront_carrier_off(np);
210289e0f4d2SKip Macy 
210389e0f4d2SKip Macy 	return (0);
210489e0f4d2SKip Macy 
210589e0f4d2SKip Macy exit:
210689e0f4d2SKip Macy 	gnttab_free_grant_references(np->gref_tx_head);
210789e0f4d2SKip Macy out:
210889e0f4d2SKip Macy 	panic("do something smart");
210989e0f4d2SKip Macy 
211089e0f4d2SKip Macy }
211189e0f4d2SKip Macy 
211289e0f4d2SKip Macy /**
211389e0f4d2SKip Macy  * Handle the change of state of the backend to Closing.  We must delete our
211489e0f4d2SKip Macy  * device-layer structures now, to ensure that writes are flushed through to
211589e0f4d2SKip Macy  * the backend.  Once is this done, we can switch to Closed in
211689e0f4d2SKip Macy  * acknowledgement.
211789e0f4d2SKip Macy  */
211889e0f4d2SKip Macy #if 0
21190e509842SJustin T. Gibbs static void
21200e509842SJustin T. Gibbs netfront_closing(device_t dev)
212189e0f4d2SKip Macy {
212289e0f4d2SKip Macy #if 0
212389e0f4d2SKip Macy 	struct netfront_info *info = dev->dev_driver_data;
212489e0f4d2SKip Macy 
212589e0f4d2SKip Macy 	DPRINTK("netfront_closing: %s removed\n", dev->nodename);
212689e0f4d2SKip Macy 
212789e0f4d2SKip Macy 	close_netdev(info);
212889e0f4d2SKip Macy #endif
212989e0f4d2SKip Macy 	xenbus_switch_state(dev, XenbusStateClosed);
213089e0f4d2SKip Macy }
213189e0f4d2SKip Macy #endif
213289e0f4d2SKip Macy 
21330e509842SJustin T. Gibbs static int
21340e509842SJustin T. Gibbs netfront_detach(device_t dev)
213589e0f4d2SKip Macy {
213623dc5621SKip Macy 	struct netfront_info *info = device_get_softc(dev);
213789e0f4d2SKip Macy 
213823dc5621SKip Macy 	DPRINTK("%s\n", xenbus_get_node(dev));
213989e0f4d2SKip Macy 
214089e0f4d2SKip Macy 	netif_free(info);
214189e0f4d2SKip Macy 
214289e0f4d2SKip Macy 	return 0;
214389e0f4d2SKip Macy }
214489e0f4d2SKip Macy 
21450e509842SJustin T. Gibbs static void
21460e509842SJustin T. Gibbs netif_free(struct netfront_info *info)
214789e0f4d2SKip Macy {
214889e0f4d2SKip Macy 	netif_disconnect_backend(info);
214989e0f4d2SKip Macy #if 0
215089e0f4d2SKip Macy 	close_netdev(info);
215189e0f4d2SKip Macy #endif
215289e0f4d2SKip Macy }
215389e0f4d2SKip Macy 
21540e509842SJustin T. Gibbs static void
21550e509842SJustin T. Gibbs netif_disconnect_backend(struct netfront_info *info)
215689e0f4d2SKip Macy {
21573a6d1fcfSKip Macy 	XN_RX_LOCK(info);
21583a6d1fcfSKip Macy 	XN_TX_LOCK(info);
21593a6d1fcfSKip Macy 	netfront_carrier_off(info);
21603a6d1fcfSKip Macy 	XN_TX_UNLOCK(info);
21613a6d1fcfSKip Macy 	XN_RX_UNLOCK(info);
21623a6d1fcfSKip Macy 
2163*cf9c09e1SJustin T. Gibbs 	free_ring(&info->tx_ring_ref, &info->tx.sring);
2164*cf9c09e1SJustin T. Gibbs 	free_ring(&info->rx_ring_ref, &info->rx.sring);
216589e0f4d2SKip Macy 
216689e0f4d2SKip Macy 	if (info->irq)
21673a6d1fcfSKip Macy 		unbind_from_irqhandler(info->irq);
21683a6d1fcfSKip Macy 
216989e0f4d2SKip Macy 	info->irq = 0;
217089e0f4d2SKip Macy }
217189e0f4d2SKip Macy 
21720e509842SJustin T. Gibbs static void
2173*cf9c09e1SJustin T. Gibbs free_ring(int *ref, void *ring_ptr_ref)
217489e0f4d2SKip Macy {
2175*cf9c09e1SJustin T. Gibbs 	void **ring_ptr_ptr = ring_ptr_ref;
2176*cf9c09e1SJustin T. Gibbs 
2177*cf9c09e1SJustin T. Gibbs 	if (*ref != GRANT_REF_INVALID) {
2178*cf9c09e1SJustin T. Gibbs 		/* This API frees the associated storage. */
2179*cf9c09e1SJustin T. Gibbs 		gnttab_end_foreign_access(*ref, *ring_ptr_ptr);
2180*cf9c09e1SJustin T. Gibbs 		*ref = GRANT_REF_INVALID;
2181*cf9c09e1SJustin T. Gibbs 	}
2182*cf9c09e1SJustin T. Gibbs 	*ring_ptr_ptr = NULL;
218389e0f4d2SKip Macy }
218489e0f4d2SKip Macy 
21850e509842SJustin T. Gibbs static int
21860e509842SJustin T. Gibbs xn_ifmedia_upd(struct ifnet *ifp)
21870e509842SJustin T. Gibbs {
21880e509842SJustin T. Gibbs 	return (0);
21890e509842SJustin T. Gibbs }
21900e509842SJustin T. Gibbs 
21910e509842SJustin T. Gibbs static void
21920e509842SJustin T. Gibbs xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
21930e509842SJustin T. Gibbs {
21940e509842SJustin T. Gibbs 	ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
21950e509842SJustin T. Gibbs 	ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
21960e509842SJustin T. Gibbs }
21970e509842SJustin T. Gibbs 
219889e0f4d2SKip Macy /* ** Driver registration ** */
219923dc5621SKip Macy static device_method_t netfront_methods[] = {
220023dc5621SKip Macy 	/* Device interface */
220123dc5621SKip Macy 	DEVMETHOD(device_probe,         netfront_probe),
220223dc5621SKip Macy 	DEVMETHOD(device_attach,        netfront_attach),
220323dc5621SKip Macy 	DEVMETHOD(device_detach,        netfront_detach),
220423dc5621SKip Macy 	DEVMETHOD(device_shutdown,      bus_generic_shutdown),
2205*cf9c09e1SJustin T. Gibbs 	DEVMETHOD(device_suspend,       netfront_suspend),
220623dc5621SKip Macy 	DEVMETHOD(device_resume,        netfront_resume),
220789e0f4d2SKip Macy 
220823dc5621SKip Macy 	/* Xenbus interface */
2209ff662b5cSJustin T. Gibbs 	DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed),
221089e0f4d2SKip Macy 
221123dc5621SKip Macy 	{ 0, 0 }
221289e0f4d2SKip Macy };
221389e0f4d2SKip Macy 
221423dc5621SKip Macy static driver_t netfront_driver = {
221523dc5621SKip Macy 	"xn",
221623dc5621SKip Macy 	netfront_methods,
221723dc5621SKip Macy 	sizeof(struct netfront_info),
221889e0f4d2SKip Macy };
221923dc5621SKip Macy devclass_t netfront_devclass;
222089e0f4d2SKip Macy 
2221ff662b5cSJustin T. Gibbs DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, 0, 0);
2222