xref: /freebsd/sys/dev/xen/netfront/netfront.c (revision 931eeffaa219f744c5354d7b0ebfe17cf6440b2e)
18e0ad55aSJoel Dahl /*-
289e0f4d2SKip Macy  * Copyright (c) 2004-2006 Kip Macy
389e0f4d2SKip Macy  * All rights reserved.
489e0f4d2SKip Macy  *
58e0ad55aSJoel Dahl  * Redistribution and use in source and binary forms, with or without
68e0ad55aSJoel Dahl  * modification, are permitted provided that the following conditions
78e0ad55aSJoel Dahl  * are met:
88e0ad55aSJoel Dahl  * 1. Redistributions of source code must retain the above copyright
98e0ad55aSJoel Dahl  *    notice, this list of conditions and the following disclaimer.
108e0ad55aSJoel Dahl  * 2. Redistributions in binary form must reproduce the above copyright
118e0ad55aSJoel Dahl  *    notice, this list of conditions and the following disclaimer in the
128e0ad55aSJoel Dahl  *    documentation and/or other materials provided with the distribution.
1389e0f4d2SKip Macy  *
148e0ad55aSJoel Dahl  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
158e0ad55aSJoel Dahl  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
168e0ad55aSJoel Dahl  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
178e0ad55aSJoel Dahl  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
188e0ad55aSJoel Dahl  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
198e0ad55aSJoel Dahl  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
208e0ad55aSJoel Dahl  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
218e0ad55aSJoel Dahl  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
228e0ad55aSJoel Dahl  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
238e0ad55aSJoel Dahl  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
248e0ad55aSJoel Dahl  * SUCH DAMAGE.
2589e0f4d2SKip Macy  */
2689e0f4d2SKip Macy 
2789e0f4d2SKip Macy 
2889e0f4d2SKip Macy #include <sys/cdefs.h>
2989e0f4d2SKip Macy __FBSDID("$FreeBSD$");
3089e0f4d2SKip Macy 
3189e0f4d2SKip Macy #include <sys/param.h>
3289e0f4d2SKip Macy #include <sys/systm.h>
3389e0f4d2SKip Macy #include <sys/sockio.h>
3489e0f4d2SKip Macy #include <sys/mbuf.h>
3589e0f4d2SKip Macy #include <sys/malloc.h>
3623dc5621SKip Macy #include <sys/module.h>
3789e0f4d2SKip Macy #include <sys/kernel.h>
3889e0f4d2SKip Macy #include <sys/socket.h>
3912678024SDoug Rabson #include <sys/sysctl.h>
4089e0f4d2SKip Macy #include <sys/queue.h>
418cb07992SAdrian Chadd #include <sys/lock.h>
4289e0f4d2SKip Macy #include <sys/sx.h>
4389e0f4d2SKip Macy 
4489e0f4d2SKip Macy #include <net/if.h>
4589e0f4d2SKip Macy #include <net/if_arp.h>
4689e0f4d2SKip Macy #include <net/ethernet.h>
4789e0f4d2SKip Macy #include <net/if_dl.h>
4889e0f4d2SKip Macy #include <net/if_media.h>
4989e0f4d2SKip Macy 
5089e0f4d2SKip Macy #include <net/bpf.h>
5189e0f4d2SKip Macy 
5289e0f4d2SKip Macy #include <net/if_types.h>
5389e0f4d2SKip Macy #include <net/if.h>
5489e0f4d2SKip Macy 
5589e0f4d2SKip Macy #include <netinet/in_systm.h>
5689e0f4d2SKip Macy #include <netinet/in.h>
5789e0f4d2SKip Macy #include <netinet/ip.h>
5889e0f4d2SKip Macy #include <netinet/if_ether.h>
5912678024SDoug Rabson #if __FreeBSD_version >= 700000
6012678024SDoug Rabson #include <netinet/tcp.h>
6112678024SDoug Rabson #include <netinet/tcp_lro.h>
6212678024SDoug Rabson #endif
6389e0f4d2SKip Macy 
6489e0f4d2SKip Macy #include <vm/vm.h>
6589e0f4d2SKip Macy #include <vm/pmap.h>
6689e0f4d2SKip Macy 
6789e0f4d2SKip Macy #include <machine/clock.h>      /* for DELAY */
6889e0f4d2SKip Macy #include <machine/bus.h>
6989e0f4d2SKip Macy #include <machine/resource.h>
7089e0f4d2SKip Macy #include <machine/frame.h>
71980c7178SKip Macy #include <machine/vmparam.h>
7289e0f4d2SKip Macy 
7389e0f4d2SKip Macy #include <sys/bus.h>
7489e0f4d2SKip Macy #include <sys/rman.h>
7589e0f4d2SKip Macy 
7689e0f4d2SKip Macy #include <machine/intr_machdep.h>
7789e0f4d2SKip Macy 
7889e0f4d2SKip Macy #include <machine/xen/xen-os.h>
7912678024SDoug Rabson #include <machine/xen/xenfunc.h>
803a6d1fcfSKip Macy #include <xen/hypervisor.h>
813a6d1fcfSKip Macy #include <xen/xen_intr.h>
823a6d1fcfSKip Macy #include <xen/evtchn.h>
8389e0f4d2SKip Macy #include <xen/gnttab.h>
8489e0f4d2SKip Macy #include <xen/interface/memory.h>
8589e0f4d2SKip Macy #include <xen/interface/io/netif.h>
8623dc5621SKip Macy #include <xen/xenbus/xenbusvar.h>
8789e0f4d2SKip Macy 
8812678024SDoug Rabson #include <dev/xen/netfront/mbufq.h>
8912678024SDoug Rabson 
9023dc5621SKip Macy #include "xenbus_if.h"
9189e0f4d2SKip Macy 
92*931eeffaSKenneth D. Merry #define XN_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP | CSUM_TSO)
9312678024SDoug Rabson 
9489e0f4d2SKip Macy #define GRANT_INVALID_REF	0
9589e0f4d2SKip Macy 
9689e0f4d2SKip Macy #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
9789e0f4d2SKip Macy #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
9889e0f4d2SKip Macy 
9912678024SDoug Rabson #if __FreeBSD_version >= 700000
10012678024SDoug Rabson /*
10112678024SDoug Rabson  * Should the driver do LRO on the RX end
10212678024SDoug Rabson  *  this can be toggled on the fly, but the
10312678024SDoug Rabson  *  interface must be reset (down/up) for it
10412678024SDoug Rabson  *  to take effect.
10512678024SDoug Rabson  */
10612678024SDoug Rabson static int xn_enable_lro = 1;
10712678024SDoug Rabson TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro);
10812678024SDoug Rabson #else
10912678024SDoug Rabson 
11012678024SDoug Rabson #define IFCAP_TSO4	0
11112678024SDoug Rabson #define CSUM_TSO	0
11212678024SDoug Rabson 
11312678024SDoug Rabson #endif
11412678024SDoug Rabson 
11589e0f4d2SKip Macy #ifdef CONFIG_XEN
11689e0f4d2SKip Macy static int MODPARM_rx_copy = 0;
11789e0f4d2SKip Macy module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
11889e0f4d2SKip Macy MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
11989e0f4d2SKip Macy static int MODPARM_rx_flip = 0;
12089e0f4d2SKip Macy module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
12189e0f4d2SKip Macy MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
12289e0f4d2SKip Macy #else
12389e0f4d2SKip Macy static const int MODPARM_rx_copy = 1;
12489e0f4d2SKip Macy static const int MODPARM_rx_flip = 0;
12589e0f4d2SKip Macy #endif
12689e0f4d2SKip Macy 
127*931eeffaSKenneth D. Merry /**
128*931eeffaSKenneth D. Merry  * \brief The maximum allowed data fragments in a single transmit
129*931eeffaSKenneth D. Merry  *        request.
130*931eeffaSKenneth D. Merry  *
131*931eeffaSKenneth D. Merry  * This limit is imposed by the backend driver.  We assume here that
132*931eeffaSKenneth D. Merry  * we are dealing with a Linux driver domain and have set our limit
133*931eeffaSKenneth D. Merry  * to mirror the Linux MAX_SKB_FRAGS constant.
134*931eeffaSKenneth D. Merry  */
135*931eeffaSKenneth D. Merry #define	MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2)
136*931eeffaSKenneth D. Merry 
13789e0f4d2SKip Macy #define RX_COPY_THRESHOLD 256
13889e0f4d2SKip Macy 
13989e0f4d2SKip Macy #define net_ratelimit() 0
14089e0f4d2SKip Macy 
14189e0f4d2SKip Macy struct netfront_info;
14289e0f4d2SKip Macy struct netfront_rx_info;
14389e0f4d2SKip Macy 
14489e0f4d2SKip Macy static void xn_txeof(struct netfront_info *);
14589e0f4d2SKip Macy static void xn_rxeof(struct netfront_info *);
14689e0f4d2SKip Macy static void network_alloc_rx_buffers(struct netfront_info *);
14789e0f4d2SKip Macy 
14889e0f4d2SKip Macy static void xn_tick_locked(struct netfront_info *);
14989e0f4d2SKip Macy static void xn_tick(void *);
15089e0f4d2SKip Macy 
15189e0f4d2SKip Macy static void xn_intr(void *);
152*931eeffaSKenneth D. Merry static inline int xn_count_frags(struct mbuf *m);
153*931eeffaSKenneth D. Merry static int  xn_assemble_tx_request(struct netfront_info *sc,
154*931eeffaSKenneth D. Merry 				   struct mbuf *m_head);
15589e0f4d2SKip Macy static void xn_start_locked(struct ifnet *);
15689e0f4d2SKip Macy static void xn_start(struct ifnet *);
15789e0f4d2SKip Macy static int  xn_ioctl(struct ifnet *, u_long, caddr_t);
15889e0f4d2SKip Macy static void xn_ifinit_locked(struct netfront_info *);
15989e0f4d2SKip Macy static void xn_ifinit(void *);
16089e0f4d2SKip Macy static void xn_stop(struct netfront_info *);
16189e0f4d2SKip Macy #ifdef notyet
16289e0f4d2SKip Macy static void xn_watchdog(struct ifnet *);
16389e0f4d2SKip Macy #endif
16489e0f4d2SKip Macy 
16589e0f4d2SKip Macy static void show_device(struct netfront_info *sc);
16689e0f4d2SKip Macy #ifdef notyet
16723dc5621SKip Macy static void netfront_closing(device_t dev);
16889e0f4d2SKip Macy #endif
16989e0f4d2SKip Macy static void netif_free(struct netfront_info *info);
17023dc5621SKip Macy static int netfront_detach(device_t dev);
17189e0f4d2SKip Macy 
17223dc5621SKip Macy static int talk_to_backend(device_t dev, struct netfront_info *info);
17323dc5621SKip Macy static int create_netdev(device_t dev);
17489e0f4d2SKip Macy static void netif_disconnect_backend(struct netfront_info *info);
17523dc5621SKip Macy static int setup_device(device_t dev, struct netfront_info *info);
17689e0f4d2SKip Macy static void end_access(int ref, void *page);
17789e0f4d2SKip Macy 
1780e509842SJustin T. Gibbs static int  xn_ifmedia_upd(struct ifnet *ifp);
1790e509842SJustin T. Gibbs static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
1800e509842SJustin T. Gibbs 
18189e0f4d2SKip Macy /* Xenolinux helper functions */
18223dc5621SKip Macy int network_connect(struct netfront_info *);
18389e0f4d2SKip Macy 
18489e0f4d2SKip Macy static void xn_free_rx_ring(struct netfront_info *);
18589e0f4d2SKip Macy 
18689e0f4d2SKip Macy static void xn_free_tx_ring(struct netfront_info *);
18789e0f4d2SKip Macy 
18889e0f4d2SKip Macy static int xennet_get_responses(struct netfront_info *np,
189*931eeffaSKenneth D. Merry 	struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
190*931eeffaSKenneth D. Merry 	struct mbuf **list, int *pages_flipped_p);
19189e0f4d2SKip Macy 
19289e0f4d2SKip Macy #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT)
19389e0f4d2SKip Macy 
19489e0f4d2SKip Macy #define INVALID_P2M_ENTRY (~0UL)
19589e0f4d2SKip Macy 
19689e0f4d2SKip Macy /*
19789e0f4d2SKip Macy  * Mbuf pointers. We need these to keep track of the virtual addresses
19889e0f4d2SKip Macy  * of our mbuf chains since we can only convert from virtual to physical,
19989e0f4d2SKip Macy  * not the other way around.  The size must track the free index arrays.
20089e0f4d2SKip Macy  */
20189e0f4d2SKip Macy struct xn_chain_data {
20289e0f4d2SKip Macy 	struct mbuf    *xn_tx_chain[NET_TX_RING_SIZE+1];
203a4ec37f5SAdrian Chadd 	int		xn_tx_chain_cnt;
20489e0f4d2SKip Macy 	struct mbuf    *xn_rx_chain[NET_RX_RING_SIZE+1];
20589e0f4d2SKip Macy };
20689e0f4d2SKip Macy 
207*931eeffaSKenneth D. Merry #define NUM_ELEMENTS(x) (sizeof(x)/sizeof(*x))
20889e0f4d2SKip Macy 
20989e0f4d2SKip Macy struct net_device_stats
21089e0f4d2SKip Macy {
21189e0f4d2SKip Macy 	u_long	rx_packets;		/* total packets received	*/
21289e0f4d2SKip Macy 	u_long	tx_packets;		/* total packets transmitted	*/
21389e0f4d2SKip Macy 	u_long	rx_bytes;		/* total bytes received 	*/
21489e0f4d2SKip Macy 	u_long	tx_bytes;		/* total bytes transmitted	*/
21589e0f4d2SKip Macy 	u_long	rx_errors;		/* bad packets received		*/
21689e0f4d2SKip Macy 	u_long	tx_errors;		/* packet transmit problems	*/
21789e0f4d2SKip Macy 	u_long	rx_dropped;		/* no space in linux buffers	*/
21889e0f4d2SKip Macy 	u_long	tx_dropped;		/* no space available in linux	*/
21989e0f4d2SKip Macy 	u_long	multicast;		/* multicast packets received	*/
22089e0f4d2SKip Macy 	u_long	collisions;
22189e0f4d2SKip Macy 
22289e0f4d2SKip Macy 	/* detailed rx_errors: */
22389e0f4d2SKip Macy 	u_long	rx_length_errors;
22489e0f4d2SKip Macy 	u_long	rx_over_errors;		/* receiver ring buff overflow	*/
22589e0f4d2SKip Macy 	u_long	rx_crc_errors;		/* recved pkt with crc error	*/
22689e0f4d2SKip Macy 	u_long	rx_frame_errors;	/* recv'd frame alignment error */
22789e0f4d2SKip Macy 	u_long	rx_fifo_errors;		/* recv'r fifo overrun		*/
22889e0f4d2SKip Macy 	u_long	rx_missed_errors;	/* receiver missed packet	*/
22989e0f4d2SKip Macy 
23089e0f4d2SKip Macy 	/* detailed tx_errors */
23189e0f4d2SKip Macy 	u_long	tx_aborted_errors;
23289e0f4d2SKip Macy 	u_long	tx_carrier_errors;
23389e0f4d2SKip Macy 	u_long	tx_fifo_errors;
23489e0f4d2SKip Macy 	u_long	tx_heartbeat_errors;
23589e0f4d2SKip Macy 	u_long	tx_window_errors;
23689e0f4d2SKip Macy 
23789e0f4d2SKip Macy 	/* for cslip etc */
23889e0f4d2SKip Macy 	u_long	rx_compressed;
23989e0f4d2SKip Macy 	u_long	tx_compressed;
24089e0f4d2SKip Macy };
24189e0f4d2SKip Macy 
24289e0f4d2SKip Macy struct netfront_info {
24389e0f4d2SKip Macy 
24489e0f4d2SKip Macy 	struct ifnet *xn_ifp;
24512678024SDoug Rabson #if __FreeBSD_version >= 700000
24612678024SDoug Rabson 	struct lro_ctrl xn_lro;
24712678024SDoug Rabson #endif
24889e0f4d2SKip Macy 
24989e0f4d2SKip Macy 	struct net_device_stats stats;
25089e0f4d2SKip Macy 	u_int tx_full;
25189e0f4d2SKip Macy 
25289e0f4d2SKip Macy 	netif_tx_front_ring_t tx;
25389e0f4d2SKip Macy 	netif_rx_front_ring_t rx;
25489e0f4d2SKip Macy 
25589e0f4d2SKip Macy 	struct mtx   tx_lock;
25689e0f4d2SKip Macy 	struct mtx   rx_lock;
257227ca257SKip Macy 	struct mtx   sc_lock;
25889e0f4d2SKip Macy 
25989e0f4d2SKip Macy 	u_int handle;
26089e0f4d2SKip Macy 	u_int irq;
26189e0f4d2SKip Macy 	u_int copying_receiver;
26289e0f4d2SKip Macy 	u_int carrier;
26389e0f4d2SKip Macy 
26489e0f4d2SKip Macy 	/* Receive-ring batched refills. */
26589e0f4d2SKip Macy #define RX_MIN_TARGET 32
26689e0f4d2SKip Macy #define RX_MAX_TARGET NET_RX_RING_SIZE
2670e509842SJustin T. Gibbs 	int rx_min_target;
2680e509842SJustin T. Gibbs 	int rx_max_target;
2690e509842SJustin T. Gibbs 	int rx_target;
27089e0f4d2SKip Macy 
27189e0f4d2SKip Macy 	grant_ref_t gref_tx_head;
27289e0f4d2SKip Macy 	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
27389e0f4d2SKip Macy 	grant_ref_t gref_rx_head;
27489e0f4d2SKip Macy 	grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
27589e0f4d2SKip Macy 
27623dc5621SKip Macy 	device_t		xbdev;
27789e0f4d2SKip Macy 	int			tx_ring_ref;
27889e0f4d2SKip Macy 	int			rx_ring_ref;
27989e0f4d2SKip Macy 	uint8_t			mac[ETHER_ADDR_LEN];
28089e0f4d2SKip Macy 	struct xn_chain_data	xn_cdata;	/* mbufs */
28189e0f4d2SKip Macy 	struct mbuf_head	xn_rx_batch;	/* head of the batch queue */
28289e0f4d2SKip Macy 
28389e0f4d2SKip Macy 	int			xn_if_flags;
28489e0f4d2SKip Macy 	struct callout	        xn_stat_ch;
28589e0f4d2SKip Macy 
28689e0f4d2SKip Macy 	u_long			rx_pfn_array[NET_RX_RING_SIZE];
28789e0f4d2SKip Macy 	multicall_entry_t	rx_mcl[NET_RX_RING_SIZE+1];
28889e0f4d2SKip Macy 	mmu_update_t		rx_mmu[NET_RX_RING_SIZE];
2890e509842SJustin T. Gibbs 	struct ifmedia		sc_media;
29089e0f4d2SKip Macy };
29189e0f4d2SKip Macy 
29289e0f4d2SKip Macy #define rx_mbufs xn_cdata.xn_rx_chain
29389e0f4d2SKip Macy #define tx_mbufs xn_cdata.xn_tx_chain
29489e0f4d2SKip Macy 
29589e0f4d2SKip Macy #define XN_LOCK_INIT(_sc, _name) \
29689e0f4d2SKip Macy         mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \
29789e0f4d2SKip Macy         mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF);  \
298227ca257SKip Macy         mtx_init(&(_sc)->sc_lock, #_name"_sc", "netfront softc lock", MTX_DEF)
29989e0f4d2SKip Macy 
30089e0f4d2SKip Macy #define XN_RX_LOCK(_sc)           mtx_lock(&(_sc)->rx_lock)
30189e0f4d2SKip Macy #define XN_RX_UNLOCK(_sc)         mtx_unlock(&(_sc)->rx_lock)
30289e0f4d2SKip Macy 
30389e0f4d2SKip Macy #define XN_TX_LOCK(_sc)           mtx_lock(&(_sc)->tx_lock)
30489e0f4d2SKip Macy #define XN_TX_UNLOCK(_sc)         mtx_unlock(&(_sc)->tx_lock)
30589e0f4d2SKip Macy 
306227ca257SKip Macy #define XN_LOCK(_sc)           mtx_lock(&(_sc)->sc_lock);
307227ca257SKip Macy #define XN_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_lock);
30889e0f4d2SKip Macy 
309227ca257SKip Macy #define XN_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->sc_lock, MA_OWNED);
31089e0f4d2SKip Macy #define XN_RX_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->rx_lock, MA_OWNED);
31189e0f4d2SKip Macy #define XN_TX_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->tx_lock, MA_OWNED);
31289e0f4d2SKip Macy #define XN_LOCK_DESTROY(_sc)   mtx_destroy(&(_sc)->rx_lock); \
31389e0f4d2SKip Macy                                mtx_destroy(&(_sc)->tx_lock); \
314227ca257SKip Macy                                mtx_destroy(&(_sc)->sc_lock);
31589e0f4d2SKip Macy 
31689e0f4d2SKip Macy struct netfront_rx_info {
31789e0f4d2SKip Macy 	struct netif_rx_response rx;
31889e0f4d2SKip Macy 	struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
31989e0f4d2SKip Macy };
32089e0f4d2SKip Macy 
32189e0f4d2SKip Macy #define netfront_carrier_on(netif)	((netif)->carrier = 1)
32289e0f4d2SKip Macy #define netfront_carrier_off(netif)	((netif)->carrier = 0)
32389e0f4d2SKip Macy #define netfront_carrier_ok(netif)	((netif)->carrier)
32489e0f4d2SKip Macy 
32589e0f4d2SKip Macy /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
32689e0f4d2SKip Macy 
32789e0f4d2SKip Macy 
32889e0f4d2SKip Macy 
32989e0f4d2SKip Macy /*
33089e0f4d2SKip Macy  * Access macros for acquiring freeing slots in tx_skbs[].
33189e0f4d2SKip Macy  */
33289e0f4d2SKip Macy 
33389e0f4d2SKip Macy static inline void
334*931eeffaSKenneth D. Merry add_id_to_freelist(struct mbuf **list, uintptr_t id)
33589e0f4d2SKip Macy {
336*931eeffaSKenneth D. Merry 	KASSERT(id != 0,
337*931eeffaSKenneth D. Merry 		("%s: the head item (0) must always be free.", __func__));
33889e0f4d2SKip Macy 	list[id] = list[0];
339*931eeffaSKenneth D. Merry 	list[0]  = (struct mbuf *)id;
34089e0f4d2SKip Macy }
34189e0f4d2SKip Macy 
34289e0f4d2SKip Macy static inline unsigned short
34389e0f4d2SKip Macy get_id_from_freelist(struct mbuf **list)
34489e0f4d2SKip Macy {
345*931eeffaSKenneth D. Merry 	uintptr_t id;
346*931eeffaSKenneth D. Merry 
347*931eeffaSKenneth D. Merry 	id = (uintptr_t)list[0];
348*931eeffaSKenneth D. Merry 	KASSERT(id != 0,
349*931eeffaSKenneth D. Merry 		("%s: the head item (0) must always remain free.", __func__));
35089e0f4d2SKip Macy 	list[0] = list[id];
35189e0f4d2SKip Macy 	return (id);
35289e0f4d2SKip Macy }
35389e0f4d2SKip Macy 
35489e0f4d2SKip Macy static inline int
35589e0f4d2SKip Macy xennet_rxidx(RING_IDX idx)
35689e0f4d2SKip Macy {
35789e0f4d2SKip Macy 	return idx & (NET_RX_RING_SIZE - 1);
35889e0f4d2SKip Macy }
35989e0f4d2SKip Macy 
36089e0f4d2SKip Macy static inline struct mbuf *
361*931eeffaSKenneth D. Merry xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri)
36289e0f4d2SKip Macy {
36389e0f4d2SKip Macy 	int i = xennet_rxidx(ri);
36489e0f4d2SKip Macy 	struct mbuf *m;
36589e0f4d2SKip Macy 
36689e0f4d2SKip Macy 	m = np->rx_mbufs[i];
36789e0f4d2SKip Macy 	np->rx_mbufs[i] = NULL;
36889e0f4d2SKip Macy 	return (m);
36989e0f4d2SKip Macy }
37089e0f4d2SKip Macy 
37189e0f4d2SKip Macy static inline grant_ref_t
37289e0f4d2SKip Macy xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri)
37389e0f4d2SKip Macy {
37489e0f4d2SKip Macy 	int i = xennet_rxidx(ri);
37589e0f4d2SKip Macy 	grant_ref_t ref = np->grant_rx_ref[i];
37689e0f4d2SKip Macy 	np->grant_rx_ref[i] = GRANT_INVALID_REF;
37789e0f4d2SKip Macy 	return ref;
37889e0f4d2SKip Macy }
37989e0f4d2SKip Macy 
38089e0f4d2SKip Macy #define IPRINTK(fmt, args...) \
38189e0f4d2SKip Macy     printf("[XEN] " fmt, ##args)
382227ca257SKip Macy #ifdef INVARIANTS
38389e0f4d2SKip Macy #define WPRINTK(fmt, args...) \
38489e0f4d2SKip Macy     printf("[XEN] " fmt, ##args)
385227ca257SKip Macy #else
386227ca257SKip Macy #define WPRINTK(fmt, args...)
387227ca257SKip Macy #endif
388227ca257SKip Macy #ifdef DEBUG
38989e0f4d2SKip Macy #define DPRINTK(fmt, args...) \
39023dc5621SKip Macy     printf("[XEN] %s: " fmt, __func__, ##args)
39112678024SDoug Rabson #else
39212678024SDoug Rabson #define DPRINTK(fmt, args...)
39312678024SDoug Rabson #endif
39489e0f4d2SKip Macy 
39589e0f4d2SKip Macy /**
39689e0f4d2SKip Macy  * Read the 'mac' node at the given device's node in the store, and parse that
39789e0f4d2SKip Macy  * as colon-separated octets, placing result the given mac array.  mac must be
39889e0f4d2SKip Macy  * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
39989e0f4d2SKip Macy  * Return 0 on success, or errno on error.
40089e0f4d2SKip Macy  */
40189e0f4d2SKip Macy static int
40223dc5621SKip Macy xen_net_read_mac(device_t dev, uint8_t mac[])
40389e0f4d2SKip Macy {
4043a6d1fcfSKip Macy 	int error, i;
4053a6d1fcfSKip Macy 	char *s, *e, *macstr;
4063a6d1fcfSKip Macy 
4073a6d1fcfSKip Macy 	error = xenbus_read(XBT_NIL, xenbus_get_node(dev), "mac", NULL,
4083a6d1fcfSKip Macy 	    (void **) &macstr);
4093a6d1fcfSKip Macy 	if (error)
4103a6d1fcfSKip Macy 		return (error);
4113a6d1fcfSKip Macy 
41289e0f4d2SKip Macy 	s = macstr;
41389e0f4d2SKip Macy 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
41489e0f4d2SKip Macy 		mac[i] = strtoul(s, &e, 16);
41589e0f4d2SKip Macy 		if (s == e || (e[0] != ':' && e[0] != 0)) {
41689e0f4d2SKip Macy 			free(macstr, M_DEVBUF);
4173a6d1fcfSKip Macy 			return (ENOENT);
41889e0f4d2SKip Macy 		}
41989e0f4d2SKip Macy 		s = &e[1];
42089e0f4d2SKip Macy 	}
42189e0f4d2SKip Macy 	free(macstr, M_DEVBUF);
4223a6d1fcfSKip Macy 	return (0);
42389e0f4d2SKip Macy }
42489e0f4d2SKip Macy 
42589e0f4d2SKip Macy /**
42689e0f4d2SKip Macy  * Entry point to this code when a new device is created.  Allocate the basic
42789e0f4d2SKip Macy  * structures and the ring buffers for communication with the backend, and
42889e0f4d2SKip Macy  * inform the backend of the appropriate details for those.  Switch to
42989e0f4d2SKip Macy  * Connected state.
43089e0f4d2SKip Macy  */
43189e0f4d2SKip Macy static int
43223dc5621SKip Macy netfront_probe(device_t dev)
43323dc5621SKip Macy {
43423dc5621SKip Macy 
43523dc5621SKip Macy 	if (!strcmp(xenbus_get_type(dev), "vif")) {
43623dc5621SKip Macy 		device_set_desc(dev, "Virtual Network Interface");
43723dc5621SKip Macy 		return (0);
43823dc5621SKip Macy 	}
43923dc5621SKip Macy 
44023dc5621SKip Macy 	return (ENXIO);
44123dc5621SKip Macy }
44223dc5621SKip Macy 
44323dc5621SKip Macy static int
44423dc5621SKip Macy netfront_attach(device_t dev)
44589e0f4d2SKip Macy {
44689e0f4d2SKip Macy 	int err;
44789e0f4d2SKip Macy 
44823dc5621SKip Macy 	err = create_netdev(dev);
44989e0f4d2SKip Macy 	if (err) {
45089e0f4d2SKip Macy 		xenbus_dev_fatal(dev, err, "creating netdev");
45189e0f4d2SKip Macy 		return err;
45289e0f4d2SKip Macy 	}
45389e0f4d2SKip Macy 
45412678024SDoug Rabson #if __FreeBSD_version >= 700000
45512678024SDoug Rabson 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
45612678024SDoug Rabson 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
45712678024SDoug Rabson 	    OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW,
45812678024SDoug Rabson 	    &xn_enable_lro, 0, "Large Receive Offload");
45912678024SDoug Rabson #endif
46012678024SDoug Rabson 
46189e0f4d2SKip Macy 	return 0;
46289e0f4d2SKip Macy }
46389e0f4d2SKip Macy 
46489e0f4d2SKip Macy 
46589e0f4d2SKip Macy /**
46689e0f4d2SKip Macy  * We are reconnecting to the backend, due to a suspend/resume, or a backend
46789e0f4d2SKip Macy  * driver restart.  We tear down our netif structure and recreate it, but
46889e0f4d2SKip Macy  * leave the device-layer structures intact so that this is transparent to the
46989e0f4d2SKip Macy  * rest of the kernel.
47089e0f4d2SKip Macy  */
47189e0f4d2SKip Macy static int
47223dc5621SKip Macy netfront_resume(device_t dev)
47389e0f4d2SKip Macy {
47423dc5621SKip Macy 	struct netfront_info *info = device_get_softc(dev);
47589e0f4d2SKip Macy 
47689e0f4d2SKip Macy 	netif_disconnect_backend(info);
47789e0f4d2SKip Macy 	return (0);
47889e0f4d2SKip Macy }
47989e0f4d2SKip Macy 
48089e0f4d2SKip Macy 
48189e0f4d2SKip Macy /* Common code used when first setting up, and when resuming. */
48289e0f4d2SKip Macy static int
48323dc5621SKip Macy talk_to_backend(device_t dev, struct netfront_info *info)
48489e0f4d2SKip Macy {
48589e0f4d2SKip Macy 	const char *message;
48689e0f4d2SKip Macy 	struct xenbus_transaction xbt;
48723dc5621SKip Macy 	const char *node = xenbus_get_node(dev);
48889e0f4d2SKip Macy 	int err;
48989e0f4d2SKip Macy 
49089e0f4d2SKip Macy 	err = xen_net_read_mac(dev, info->mac);
49189e0f4d2SKip Macy 	if (err) {
49223dc5621SKip Macy 		xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
49389e0f4d2SKip Macy 		goto out;
49489e0f4d2SKip Macy 	}
49589e0f4d2SKip Macy 
49689e0f4d2SKip Macy 	/* Create shared ring, alloc event channel. */
49789e0f4d2SKip Macy 	err = setup_device(dev, info);
49889e0f4d2SKip Macy 	if (err)
49989e0f4d2SKip Macy 		goto out;
50089e0f4d2SKip Macy 
50189e0f4d2SKip Macy  again:
50289e0f4d2SKip Macy 	err = xenbus_transaction_start(&xbt);
50389e0f4d2SKip Macy 	if (err) {
50489e0f4d2SKip Macy 		xenbus_dev_fatal(dev, err, "starting transaction");
50589e0f4d2SKip Macy 		goto destroy_ring;
50689e0f4d2SKip Macy 	}
50723dc5621SKip Macy 	err = xenbus_printf(xbt, node, "tx-ring-ref","%u",
50889e0f4d2SKip Macy 			info->tx_ring_ref);
50989e0f4d2SKip Macy 	if (err) {
51089e0f4d2SKip Macy 		message = "writing tx ring-ref";
51189e0f4d2SKip Macy 		goto abort_transaction;
51289e0f4d2SKip Macy 	}
51323dc5621SKip Macy 	err = xenbus_printf(xbt, node, "rx-ring-ref","%u",
51489e0f4d2SKip Macy 			info->rx_ring_ref);
51589e0f4d2SKip Macy 	if (err) {
51689e0f4d2SKip Macy 		message = "writing rx ring-ref";
51789e0f4d2SKip Macy 		goto abort_transaction;
51889e0f4d2SKip Macy 	}
51923dc5621SKip Macy 	err = xenbus_printf(xbt, node,
52089e0f4d2SKip Macy 			"event-channel", "%u", irq_to_evtchn_port(info->irq));
52189e0f4d2SKip Macy 	if (err) {
52289e0f4d2SKip Macy 		message = "writing event-channel";
52389e0f4d2SKip Macy 		goto abort_transaction;
52489e0f4d2SKip Macy 	}
52523dc5621SKip Macy 	err = xenbus_printf(xbt, node, "request-rx-copy", "%u",
52689e0f4d2SKip Macy 			info->copying_receiver);
52789e0f4d2SKip Macy 	if (err) {
52889e0f4d2SKip Macy 		message = "writing request-rx-copy";
52989e0f4d2SKip Macy 		goto abort_transaction;
53089e0f4d2SKip Macy 	}
53123dc5621SKip Macy 	err = xenbus_printf(xbt, node, "feature-rx-notify", "%d", 1);
53289e0f4d2SKip Macy 	if (err) {
53389e0f4d2SKip Macy 		message = "writing feature-rx-notify";
53489e0f4d2SKip Macy 		goto abort_transaction;
53589e0f4d2SKip Macy 	}
53623dc5621SKip Macy 	err = xenbus_printf(xbt, node, "feature-sg", "%d", 1);
53789e0f4d2SKip Macy 	if (err) {
53889e0f4d2SKip Macy 		message = "writing feature-sg";
53989e0f4d2SKip Macy 		goto abort_transaction;
54089e0f4d2SKip Macy 	}
54112678024SDoug Rabson #if __FreeBSD_version >= 700000
54223dc5621SKip Macy 	err = xenbus_printf(xbt, node, "feature-gso-tcpv4", "%d", 1);
54389e0f4d2SKip Macy 	if (err) {
54489e0f4d2SKip Macy 		message = "writing feature-gso-tcpv4";
54589e0f4d2SKip Macy 		goto abort_transaction;
54689e0f4d2SKip Macy 	}
54789e0f4d2SKip Macy #endif
54889e0f4d2SKip Macy 
54989e0f4d2SKip Macy 	err = xenbus_transaction_end(xbt, 0);
55089e0f4d2SKip Macy 	if (err) {
55189e0f4d2SKip Macy 		if (err == EAGAIN)
55289e0f4d2SKip Macy 			goto again;
55389e0f4d2SKip Macy 		xenbus_dev_fatal(dev, err, "completing transaction");
55489e0f4d2SKip Macy 		goto destroy_ring;
55589e0f4d2SKip Macy 	}
55689e0f4d2SKip Macy 
55789e0f4d2SKip Macy 	return 0;
55889e0f4d2SKip Macy 
55989e0f4d2SKip Macy  abort_transaction:
56089e0f4d2SKip Macy 	xenbus_transaction_end(xbt, 1);
56189e0f4d2SKip Macy 	xenbus_dev_fatal(dev, err, "%s", message);
56289e0f4d2SKip Macy  destroy_ring:
56389e0f4d2SKip Macy 	netif_free(info);
56489e0f4d2SKip Macy  out:
56589e0f4d2SKip Macy 	return err;
56689e0f4d2SKip Macy }
56789e0f4d2SKip Macy 
56889e0f4d2SKip Macy 
56989e0f4d2SKip Macy static int
57023dc5621SKip Macy setup_device(device_t dev, struct netfront_info *info)
57189e0f4d2SKip Macy {
57289e0f4d2SKip Macy 	netif_tx_sring_t *txs;
57389e0f4d2SKip Macy 	netif_rx_sring_t *rxs;
5743a6d1fcfSKip Macy 	int error;
57589e0f4d2SKip Macy 	struct ifnet *ifp;
57689e0f4d2SKip Macy 
57789e0f4d2SKip Macy 	ifp = info->xn_ifp;
57889e0f4d2SKip Macy 
57989e0f4d2SKip Macy 	info->tx_ring_ref = GRANT_INVALID_REF;
58089e0f4d2SKip Macy 	info->rx_ring_ref = GRANT_INVALID_REF;
58189e0f4d2SKip Macy 	info->rx.sring = NULL;
58289e0f4d2SKip Macy 	info->tx.sring = NULL;
58389e0f4d2SKip Macy 	info->irq = 0;
58489e0f4d2SKip Macy 
58589e0f4d2SKip Macy 	txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
58689e0f4d2SKip Macy 	if (!txs) {
5873a6d1fcfSKip Macy 		error = ENOMEM;
5883a6d1fcfSKip Macy 		xenbus_dev_fatal(dev, error, "allocating tx ring page");
58989e0f4d2SKip Macy 		goto fail;
59089e0f4d2SKip Macy 	}
59189e0f4d2SKip Macy 	SHARED_RING_INIT(txs);
59289e0f4d2SKip Macy 	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
5933a6d1fcfSKip Macy 	error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref);
5943a6d1fcfSKip Macy 	if (error)
59589e0f4d2SKip Macy 		goto fail;
59689e0f4d2SKip Macy 
59789e0f4d2SKip Macy 	rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
59889e0f4d2SKip Macy 	if (!rxs) {
5993a6d1fcfSKip Macy 		error = ENOMEM;
6003a6d1fcfSKip Macy 		xenbus_dev_fatal(dev, error, "allocating rx ring page");
60189e0f4d2SKip Macy 		goto fail;
60289e0f4d2SKip Macy 	}
60389e0f4d2SKip Macy 	SHARED_RING_INIT(rxs);
60489e0f4d2SKip Macy 	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
60589e0f4d2SKip Macy 
6063a6d1fcfSKip Macy 	error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref);
6073a6d1fcfSKip Macy 	if (error)
60889e0f4d2SKip Macy 		goto fail;
60989e0f4d2SKip Macy 
6103a6d1fcfSKip Macy 	error = bind_listening_port_to_irqhandler(xenbus_get_otherend_id(dev),
6113a6d1fcfSKip Macy 	    "xn", xn_intr, info, INTR_TYPE_NET | INTR_MPSAFE, &info->irq);
61289e0f4d2SKip Macy 
6133a6d1fcfSKip Macy 	if (error) {
6143a6d1fcfSKip Macy 		xenbus_dev_fatal(dev, error,
61589e0f4d2SKip Macy 				 "bind_evtchn_to_irqhandler failed");
61689e0f4d2SKip Macy 		goto fail;
61789e0f4d2SKip Macy 	}
61889e0f4d2SKip Macy 
61989e0f4d2SKip Macy 	show_device(info);
62089e0f4d2SKip Macy 
6213a6d1fcfSKip Macy 	return (0);
62289e0f4d2SKip Macy 
62389e0f4d2SKip Macy  fail:
62489e0f4d2SKip Macy 	netif_free(info);
6253a6d1fcfSKip Macy 	return (error);
62689e0f4d2SKip Macy }
62789e0f4d2SKip Macy 
62889e0f4d2SKip Macy /**
62912678024SDoug Rabson  * If this interface has an ipv4 address, send an arp for it. This
63012678024SDoug Rabson  * helps to get the network going again after migrating hosts.
63112678024SDoug Rabson  */
63212678024SDoug Rabson static void
63312678024SDoug Rabson netfront_send_fake_arp(device_t dev, struct netfront_info *info)
63412678024SDoug Rabson {
63512678024SDoug Rabson 	struct ifnet *ifp;
63612678024SDoug Rabson 	struct ifaddr *ifa;
63712678024SDoug Rabson 
63812678024SDoug Rabson 	ifp = info->xn_ifp;
63912678024SDoug Rabson 	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
64012678024SDoug Rabson 		if (ifa->ifa_addr->sa_family == AF_INET) {
64112678024SDoug Rabson 			arp_ifinit(ifp, ifa);
64212678024SDoug Rabson 		}
64312678024SDoug Rabson 	}
64412678024SDoug Rabson }
64512678024SDoug Rabson 
64612678024SDoug Rabson /**
64789e0f4d2SKip Macy  * Callback received when the backend's state changes.
64889e0f4d2SKip Macy  */
649cfed3783SKip Macy static int
65023dc5621SKip Macy netfront_backend_changed(device_t dev, XenbusState newstate)
65189e0f4d2SKip Macy {
65223dc5621SKip Macy 	struct netfront_info *sc = device_get_softc(dev);
65389e0f4d2SKip Macy 
65423dc5621SKip Macy 	DPRINTK("newstate=%d\n", newstate);
65589e0f4d2SKip Macy 
65623dc5621SKip Macy 	switch (newstate) {
65789e0f4d2SKip Macy 	case XenbusStateInitialising:
65889e0f4d2SKip Macy 	case XenbusStateInitialised:
65989e0f4d2SKip Macy 	case XenbusStateConnected:
66089e0f4d2SKip Macy 	case XenbusStateUnknown:
66189e0f4d2SKip Macy 	case XenbusStateClosed:
662920ba15bSKip Macy 	case XenbusStateReconfigured:
663920ba15bSKip Macy 	case XenbusStateReconfiguring:
66489e0f4d2SKip Macy 		break;
66589e0f4d2SKip Macy 	case XenbusStateInitWait:
66623dc5621SKip Macy 		if (xenbus_get_state(dev) != XenbusStateInitialising)
66789e0f4d2SKip Macy 			break;
66823dc5621SKip Macy 		if (network_connect(sc) != 0)
66989e0f4d2SKip Macy 			break;
67023dc5621SKip Macy 		xenbus_set_state(dev, XenbusStateConnected);
67112678024SDoug Rabson 		netfront_send_fake_arp(dev, sc);
67223dc5621SKip Macy 		break;
67389e0f4d2SKip Macy 	case XenbusStateClosing:
67423dc5621SKip Macy 		xenbus_set_state(dev, XenbusStateClosed);
67589e0f4d2SKip Macy 		break;
67689e0f4d2SKip Macy 	}
677cfed3783SKip Macy 	return (0);
67889e0f4d2SKip Macy }
67989e0f4d2SKip Macy 
68089e0f4d2SKip Macy static void
68189e0f4d2SKip Macy xn_free_rx_ring(struct netfront_info *sc)
68289e0f4d2SKip Macy {
68389e0f4d2SKip Macy #if 0
68489e0f4d2SKip Macy 	int i;
68589e0f4d2SKip Macy 
68689e0f4d2SKip Macy 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
687*931eeffaSKenneth D. Merry 		if (sc->xn_cdata.rx_mbufs[i] != NULL) {
688*931eeffaSKenneth D. Merry 			m_freem(sc->rx_mbufs[i]);
689*931eeffaSKenneth D. Merry 			sc->rx_mbufs[i] = NULL;
69089e0f4d2SKip Macy 		}
69189e0f4d2SKip Macy 	}
69289e0f4d2SKip Macy 
69389e0f4d2SKip Macy 	sc->rx.rsp_cons = 0;
69489e0f4d2SKip Macy 	sc->xn_rx_if->req_prod = 0;
69589e0f4d2SKip Macy 	sc->xn_rx_if->event = sc->rx.rsp_cons ;
69689e0f4d2SKip Macy #endif
69789e0f4d2SKip Macy }
69889e0f4d2SKip Macy 
69989e0f4d2SKip Macy static void
70089e0f4d2SKip Macy xn_free_tx_ring(struct netfront_info *sc)
70189e0f4d2SKip Macy {
70289e0f4d2SKip Macy #if 0
70389e0f4d2SKip Macy 	int i;
70489e0f4d2SKip Macy 
70589e0f4d2SKip Macy 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
706*931eeffaSKenneth D. Merry 		if (sc->tx_mbufs[i] != NULL) {
707*931eeffaSKenneth D. Merry 			m_freem(sc->tx_mbufs[i]);
70889e0f4d2SKip Macy 			sc->xn_cdata.xn_tx_chain[i] = NULL;
70989e0f4d2SKip Macy 		}
71089e0f4d2SKip Macy 	}
71189e0f4d2SKip Macy 
71289e0f4d2SKip Macy 	return;
71389e0f4d2SKip Macy #endif
71489e0f4d2SKip Macy }
71589e0f4d2SKip Macy 
716*931eeffaSKenneth D. Merry /**
717*931eeffaSKenneth D. Merry  * \brief Verify that there is sufficient space in the Tx ring
718*931eeffaSKenneth D. Merry  *        buffer for a maximally sized request to be enqueued.
719c099cafaSAdrian Chadd  *
720*931eeffaSKenneth D. Merry  * A transmit request requires a transmit descriptor for each packet
721*931eeffaSKenneth D. Merry  * fragment, plus up to 2 entries for "options" (e.g. TSO).
722c099cafaSAdrian Chadd  */
72389e0f4d2SKip Macy static inline int
724*931eeffaSKenneth D. Merry xn_tx_slot_available(struct netfront_info *np)
72589e0f4d2SKip Macy {
726*931eeffaSKenneth D. Merry 	return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2));
72789e0f4d2SKip Macy }
728*931eeffaSKenneth D. Merry 
72989e0f4d2SKip Macy static void
73089e0f4d2SKip Macy netif_release_tx_bufs(struct netfront_info *np)
73189e0f4d2SKip Macy {
73289e0f4d2SKip Macy 	int i;
73389e0f4d2SKip Macy 
73489e0f4d2SKip Macy 	for (i = 1; i <= NET_TX_RING_SIZE; i++) {
735*931eeffaSKenneth D. Merry 		struct mbuf *m;
73689e0f4d2SKip Macy 
737*931eeffaSKenneth D. Merry 		m = np->tx_mbufs[i];
738*931eeffaSKenneth D. Merry 
739*931eeffaSKenneth D. Merry 		/*
740*931eeffaSKenneth D. Merry 		 * We assume that no kernel addresses are
741*931eeffaSKenneth D. Merry 		 * less than NET_TX_RING_SIZE.  Any entry
742*931eeffaSKenneth D. Merry 		 * in the table that is below this number
743*931eeffaSKenneth D. Merry 		 * must be an index from free-list tracking.
744*931eeffaSKenneth D. Merry 		 */
745*931eeffaSKenneth D. Merry 		if (((uintptr_t)m) <= NET_TX_RING_SIZE)
74689e0f4d2SKip Macy 			continue;
74789e0f4d2SKip Macy 		gnttab_grant_foreign_access_ref(np->grant_tx_ref[i],
74823dc5621SKip Macy 		    xenbus_get_otherend_id(np->xbdev),
74923dc5621SKip Macy 		    virt_to_mfn(mtod(m, vm_offset_t)),
75089e0f4d2SKip Macy 		    GNTMAP_readonly);
75189e0f4d2SKip Macy 		gnttab_release_grant_reference(&np->gref_tx_head,
75289e0f4d2SKip Macy 		    np->grant_tx_ref[i]);
75389e0f4d2SKip Macy 		np->grant_tx_ref[i] = GRANT_INVALID_REF;
75489e0f4d2SKip Macy 		add_id_to_freelist(np->tx_mbufs, i);
755a4ec37f5SAdrian Chadd 		np->xn_cdata.xn_tx_chain_cnt--;
756a4ec37f5SAdrian Chadd 		if (np->xn_cdata.xn_tx_chain_cnt < 0) {
757a4ec37f5SAdrian Chadd 			panic("netif_release_tx_bufs: tx_chain_cnt must be >= 0");
758a4ec37f5SAdrian Chadd 		}
75989e0f4d2SKip Macy 		m_freem(m);
76089e0f4d2SKip Macy 	}
76189e0f4d2SKip Macy }
76289e0f4d2SKip Macy 
76389e0f4d2SKip Macy static void
76489e0f4d2SKip Macy network_alloc_rx_buffers(struct netfront_info *sc)
76589e0f4d2SKip Macy {
76623dc5621SKip Macy 	int otherend_id = xenbus_get_otherend_id(sc->xbdev);
76789e0f4d2SKip Macy 	unsigned short id;
76889e0f4d2SKip Macy 	struct mbuf *m_new;
76989e0f4d2SKip Macy 	int i, batch_target, notify;
77089e0f4d2SKip Macy 	RING_IDX req_prod;
77189e0f4d2SKip Macy 	struct xen_memory_reservation reservation;
77289e0f4d2SKip Macy 	grant_ref_t ref;
77389e0f4d2SKip Macy 	int nr_flips;
77489e0f4d2SKip Macy 	netif_rx_request_t *req;
77589e0f4d2SKip Macy 	vm_offset_t vaddr;
77689e0f4d2SKip Macy 	u_long pfn;
77789e0f4d2SKip Macy 
77889e0f4d2SKip Macy 	req_prod = sc->rx.req_prod_pvt;
77989e0f4d2SKip Macy 
78089e0f4d2SKip Macy 	if (unlikely(sc->carrier == 0))
78189e0f4d2SKip Macy 		return;
78289e0f4d2SKip Macy 
78389e0f4d2SKip Macy 	/*
784*931eeffaSKenneth D. Merry 	 * Allocate mbufs greedily, even though we batch updates to the
78589e0f4d2SKip Macy 	 * receive ring. This creates a less bursty demand on the memory
786*931eeffaSKenneth D. Merry 	 * allocator, and so should reduce the chance of failed allocation
78789e0f4d2SKip Macy 	 * requests both for ourself and for other kernel subsystems.
788*931eeffaSKenneth D. Merry 	 *
789*931eeffaSKenneth D. Merry 	 * Here we attempt to maintain rx_target buffers in flight, counting
790*931eeffaSKenneth D. Merry 	 * buffers that we have yet to process in the receive ring.
79189e0f4d2SKip Macy 	 */
79289e0f4d2SKip Macy 	batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons);
79389e0f4d2SKip Macy 	for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) {
79489e0f4d2SKip Macy 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
795*931eeffaSKenneth D. Merry 		if (m_new == NULL) {
796*931eeffaSKenneth D. Merry 			printf("%s: MGETHDR failed\n", __func__);
79789e0f4d2SKip Macy 			goto no_mbuf;
798*931eeffaSKenneth D. Merry 		}
79989e0f4d2SKip Macy 
80089e0f4d2SKip Macy 		m_cljget(m_new, M_DONTWAIT, MJUMPAGESIZE);
80189e0f4d2SKip Macy 		if ((m_new->m_flags & M_EXT) == 0) {
802*931eeffaSKenneth D. Merry 			printf("%s: m_cljget failed\n", __func__);
80389e0f4d2SKip Macy 			m_freem(m_new);
80489e0f4d2SKip Macy 
80589e0f4d2SKip Macy no_mbuf:
80689e0f4d2SKip Macy 			if (i != 0)
80789e0f4d2SKip Macy 				goto refill;
80889e0f4d2SKip Macy 			/*
80989e0f4d2SKip Macy 			 * XXX set timer
81089e0f4d2SKip Macy 			 */
81189e0f4d2SKip Macy 			break;
81289e0f4d2SKip Macy 		}
81389e0f4d2SKip Macy 		m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
81489e0f4d2SKip Macy 
81589e0f4d2SKip Macy 		/* queue the mbufs allocated */
81689e0f4d2SKip Macy 		mbufq_tail(&sc->xn_rx_batch, m_new);
81789e0f4d2SKip Macy 	}
81889e0f4d2SKip Macy 
819*931eeffaSKenneth D. Merry 	/*
820*931eeffaSKenneth D. Merry 	 * If we've allocated at least half of our target number of entries,
821*931eeffaSKenneth D. Merry 	 * submit them to the backend - we have enough to make the overhead
822*931eeffaSKenneth D. Merry 	 * of submission worthwhile.  Otherwise wait for more mbufs and
823*931eeffaSKenneth D. Merry 	 * request entries to become available.
824*931eeffaSKenneth D. Merry 	 */
82589e0f4d2SKip Macy 	if (i < (sc->rx_target/2)) {
82689e0f4d2SKip Macy 		if (req_prod >sc->rx.sring->req_prod)
82789e0f4d2SKip Macy 			goto push;
82889e0f4d2SKip Macy 		return;
82989e0f4d2SKip Macy 	}
830*931eeffaSKenneth D. Merry 
831*931eeffaSKenneth D. Merry 	/*
832*931eeffaSKenneth D. Merry 	 * Double floating fill target if we risked having the backend
833*931eeffaSKenneth D. Merry 	 * run out of empty buffers for receive traffic.  We define "running
834*931eeffaSKenneth D. Merry 	 * low" as having less than a fourth of our target buffers free
835*931eeffaSKenneth D. Merry 	 * at the time we refilled the queue.
836*931eeffaSKenneth D. Merry 	 */
837*931eeffaSKenneth D. Merry 	if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) {
838*931eeffaSKenneth D. Merry 		sc->rx_target *= 2;
839*931eeffaSKenneth D. Merry 		if (sc->rx_target > sc->rx_max_target)
84089e0f4d2SKip Macy 			sc->rx_target = sc->rx_max_target;
841*931eeffaSKenneth D. Merry 	}
84289e0f4d2SKip Macy 
84389e0f4d2SKip Macy refill:
84489e0f4d2SKip Macy 	for (nr_flips = i = 0; ; i++) {
84589e0f4d2SKip Macy 		if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
84689e0f4d2SKip Macy 			break;
84789e0f4d2SKip Macy 
84889e0f4d2SKip Macy 		m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)(
84989e0f4d2SKip Macy 				vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT);
85089e0f4d2SKip Macy 
85189e0f4d2SKip Macy 		id = xennet_rxidx(req_prod + i);
85289e0f4d2SKip Macy 
853*931eeffaSKenneth D. Merry 		KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain"));
854*931eeffaSKenneth D. Merry 		sc->rx_mbufs[id] = m_new;
85589e0f4d2SKip Macy 
85689e0f4d2SKip Macy 		ref = gnttab_claim_grant_reference(&sc->gref_rx_head);
85789e0f4d2SKip Macy 		KASSERT((short)ref >= 0, ("negative ref"));
85889e0f4d2SKip Macy 		sc->grant_rx_ref[id] = ref;
85989e0f4d2SKip Macy 
86089e0f4d2SKip Macy 		vaddr = mtod(m_new, vm_offset_t);
86189e0f4d2SKip Macy 		pfn = vtophys(vaddr) >> PAGE_SHIFT;
86289e0f4d2SKip Macy 		req = RING_GET_REQUEST(&sc->rx, req_prod + i);
86389e0f4d2SKip Macy 
86489e0f4d2SKip Macy 		if (sc->copying_receiver == 0) {
86589e0f4d2SKip Macy 			gnttab_grant_foreign_transfer_ref(ref,
86623dc5621SKip Macy 			    otherend_id, pfn);
86789e0f4d2SKip Macy 			sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn);
86889e0f4d2SKip Macy 			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
86989e0f4d2SKip Macy 				/* Remove this page before passing
87089e0f4d2SKip Macy 				 * back to Xen.
87189e0f4d2SKip Macy 				 */
87289e0f4d2SKip Macy 				set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
87389e0f4d2SKip Macy 				MULTI_update_va_mapping(&sc->rx_mcl[i],
87489e0f4d2SKip Macy 				    vaddr, 0, 0);
87589e0f4d2SKip Macy 			}
87689e0f4d2SKip Macy 			nr_flips++;
87789e0f4d2SKip Macy 		} else {
87889e0f4d2SKip Macy 			gnttab_grant_foreign_access_ref(ref,
87923dc5621SKip Macy 			    otherend_id,
88089e0f4d2SKip Macy 			    PFNTOMFN(pfn), 0);
88189e0f4d2SKip Macy 		}
88289e0f4d2SKip Macy 		req->id = id;
88389e0f4d2SKip Macy 		req->gref = ref;
88489e0f4d2SKip Macy 
88589e0f4d2SKip Macy 		sc->rx_pfn_array[i] =
88689e0f4d2SKip Macy 		    vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
88789e0f4d2SKip Macy 	}
88889e0f4d2SKip Macy 
88989e0f4d2SKip Macy 	KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
89089e0f4d2SKip Macy 	KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed"));
89189e0f4d2SKip Macy 	/*
89289e0f4d2SKip Macy 	 * We may have allocated buffers which have entries outstanding
89389e0f4d2SKip Macy 	 * in the page * update queue -- make sure we flush those first!
89489e0f4d2SKip Macy 	 */
89589e0f4d2SKip Macy 	PT_UPDATES_FLUSH();
89689e0f4d2SKip Macy 	if (nr_flips != 0) {
89789e0f4d2SKip Macy #ifdef notyet
89889e0f4d2SKip Macy 		/* Tell the ballon driver what is going on. */
89989e0f4d2SKip Macy 		balloon_update_driver_allowance(i);
90089e0f4d2SKip Macy #endif
901920ba15bSKip Macy 		set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array);
90289e0f4d2SKip Macy 		reservation.nr_extents   = i;
90389e0f4d2SKip Macy 		reservation.extent_order = 0;
90489e0f4d2SKip Macy 		reservation.address_bits = 0;
90589e0f4d2SKip Macy 		reservation.domid        = DOMID_SELF;
90689e0f4d2SKip Macy 
90789e0f4d2SKip Macy 		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
90889e0f4d2SKip Macy 
90989e0f4d2SKip Macy 			/* After all PTEs have been zapped, flush the TLB. */
91089e0f4d2SKip Macy 			sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
91189e0f4d2SKip Macy 			    UVMF_TLB_FLUSH|UVMF_ALL;
91289e0f4d2SKip Macy 
91389e0f4d2SKip Macy 			/* Give away a batch of pages. */
91489e0f4d2SKip Macy 			sc->rx_mcl[i].op = __HYPERVISOR_memory_op;
91589e0f4d2SKip Macy 			sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
91689e0f4d2SKip Macy 			sc->rx_mcl[i].args[1] =  (u_long)&reservation;
91789e0f4d2SKip Macy 			/* Zap PTEs and give away pages in one big multicall. */
91889e0f4d2SKip Macy 			(void)HYPERVISOR_multicall(sc->rx_mcl, i+1);
91989e0f4d2SKip Macy 
92089e0f4d2SKip Macy 			/* Check return status of HYPERVISOR_dom_mem_op(). */
92189e0f4d2SKip Macy 			if (unlikely(sc->rx_mcl[i].result != i))
92289e0f4d2SKip Macy 				panic("Unable to reduce memory reservation\n");
92389e0f4d2SKip Macy 			} else {
92489e0f4d2SKip Macy 				if (HYPERVISOR_memory_op(
92589e0f4d2SKip Macy 				    XENMEM_decrease_reservation, &reservation)
92689e0f4d2SKip Macy 				    != i)
92789e0f4d2SKip Macy 					panic("Unable to reduce memory "
92889e0f4d2SKip Macy 					    "reservation\n");
92989e0f4d2SKip Macy 		}
93089e0f4d2SKip Macy 	} else {
93189e0f4d2SKip Macy 		wmb();
93289e0f4d2SKip Macy 	}
93389e0f4d2SKip Macy 
93489e0f4d2SKip Macy 	/* Above is a suitable barrier to ensure backend will see requests. */
93589e0f4d2SKip Macy 	sc->rx.req_prod_pvt = req_prod + i;
93689e0f4d2SKip Macy push:
93789e0f4d2SKip Macy 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify);
93889e0f4d2SKip Macy 	if (notify)
93989e0f4d2SKip Macy 		notify_remote_via_irq(sc->irq);
94089e0f4d2SKip Macy }
94189e0f4d2SKip Macy 
94289e0f4d2SKip Macy static void
94389e0f4d2SKip Macy xn_rxeof(struct netfront_info *np)
94489e0f4d2SKip Macy {
94589e0f4d2SKip Macy 	struct ifnet *ifp;
94612678024SDoug Rabson #if __FreeBSD_version >= 700000
94712678024SDoug Rabson 	struct lro_ctrl *lro = &np->xn_lro;
94812678024SDoug Rabson 	struct lro_entry *queued;
94912678024SDoug Rabson #endif
95089e0f4d2SKip Macy 	struct netfront_rx_info rinfo;
95189e0f4d2SKip Macy 	struct netif_rx_response *rx = &rinfo.rx;
95289e0f4d2SKip Macy 	struct netif_extra_info *extras = rinfo.extras;
95389e0f4d2SKip Macy 	RING_IDX i, rp;
95489e0f4d2SKip Macy 	multicall_entry_t *mcl;
95589e0f4d2SKip Macy 	struct mbuf *m;
95683b92f6eSKip Macy 	struct mbuf_head rxq, errq;
95749906218SDoug Rabson 	int err, pages_flipped = 0, work_to_do;
95889e0f4d2SKip Macy 
95949906218SDoug Rabson 	do {
96089e0f4d2SKip Macy 		XN_RX_LOCK_ASSERT(np);
96189e0f4d2SKip Macy 		if (!netfront_carrier_ok(np))
96289e0f4d2SKip Macy 			return;
96389e0f4d2SKip Macy 
96489e0f4d2SKip Macy 		mbufq_init(&errq);
96589e0f4d2SKip Macy 		mbufq_init(&rxq);
96689e0f4d2SKip Macy 
96789e0f4d2SKip Macy 		ifp = np->xn_ifp;
96889e0f4d2SKip Macy 
96989e0f4d2SKip Macy 		rp = np->rx.sring->rsp_prod;
97089e0f4d2SKip Macy 		rmb();	/* Ensure we see queued responses up to 'rp'. */
97189e0f4d2SKip Macy 
97289e0f4d2SKip Macy 		i = np->rx.rsp_cons;
97389e0f4d2SKip Macy 		while ((i != rp)) {
97489e0f4d2SKip Macy 			memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
97589e0f4d2SKip Macy 			memset(extras, 0, sizeof(rinfo.extras));
97689e0f4d2SKip Macy 
97783b92f6eSKip Macy 			m = NULL;
978*931eeffaSKenneth D. Merry 			err = xennet_get_responses(np, &rinfo, rp, &i, &m,
97989e0f4d2SKip Macy 			    &pages_flipped);
98089e0f4d2SKip Macy 
98189e0f4d2SKip Macy 			if (unlikely(err)) {
98283b92f6eSKip Macy 				if (m)
98389e0f4d2SKip Macy 					mbufq_tail(&errq, m);
98489e0f4d2SKip Macy 				np->stats.rx_errors++;
98589e0f4d2SKip Macy 				continue;
98689e0f4d2SKip Macy 			}
98789e0f4d2SKip Macy 
98889e0f4d2SKip Macy 			m->m_pkthdr.rcvif = ifp;
98989e0f4d2SKip Macy 			if ( rx->flags & NETRXF_data_validated ) {
99089e0f4d2SKip Macy 				/* Tell the stack the checksums are okay */
99189e0f4d2SKip Macy 				/*
99289e0f4d2SKip Macy 				 * XXX this isn't necessarily the case - need to add
99389e0f4d2SKip Macy 				 * check
99489e0f4d2SKip Macy 				 */
99589e0f4d2SKip Macy 
99689e0f4d2SKip Macy 				m->m_pkthdr.csum_flags |=
99789e0f4d2SKip Macy 					(CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID
99889e0f4d2SKip Macy 					    | CSUM_PSEUDO_HDR);
99989e0f4d2SKip Macy 				m->m_pkthdr.csum_data = 0xffff;
100089e0f4d2SKip Macy 			}
100189e0f4d2SKip Macy 
100289e0f4d2SKip Macy 			np->stats.rx_packets++;
100383b92f6eSKip Macy 			np->stats.rx_bytes += m->m_pkthdr.len;
100489e0f4d2SKip Macy 
100589e0f4d2SKip Macy 			mbufq_tail(&rxq, m);
1006*931eeffaSKenneth D. Merry 			np->rx.rsp_cons = i;
100789e0f4d2SKip Macy 		}
100889e0f4d2SKip Macy 
100989e0f4d2SKip Macy 		if (pages_flipped) {
101089e0f4d2SKip Macy 			/* Some pages are no longer absent... */
101189e0f4d2SKip Macy #ifdef notyet
101289e0f4d2SKip Macy 			balloon_update_driver_allowance(-pages_flipped);
101389e0f4d2SKip Macy #endif
101489e0f4d2SKip Macy 			/* Do all the remapping work, and M->P updates, in one big
101589e0f4d2SKip Macy 			 * hypercall.
101689e0f4d2SKip Macy 			 */
101789e0f4d2SKip Macy 			if (!!xen_feature(XENFEAT_auto_translated_physmap)) {
101889e0f4d2SKip Macy 				mcl = np->rx_mcl + pages_flipped;
101989e0f4d2SKip Macy 				mcl->op = __HYPERVISOR_mmu_update;
102089e0f4d2SKip Macy 				mcl->args[0] = (u_long)np->rx_mmu;
102189e0f4d2SKip Macy 				mcl->args[1] = pages_flipped;
102289e0f4d2SKip Macy 				mcl->args[2] = 0;
102389e0f4d2SKip Macy 				mcl->args[3] = DOMID_SELF;
102489e0f4d2SKip Macy 				(void)HYPERVISOR_multicall(np->rx_mcl,
102589e0f4d2SKip Macy 				    pages_flipped + 1);
102689e0f4d2SKip Macy 			}
102789e0f4d2SKip Macy 		}
102889e0f4d2SKip Macy 
102989e0f4d2SKip Macy 		while ((m = mbufq_dequeue(&errq)))
103089e0f4d2SKip Macy 			m_freem(m);
103189e0f4d2SKip Macy 
103289e0f4d2SKip Macy 		/*
103389e0f4d2SKip Macy 		 * Process all the mbufs after the remapping is complete.
103489e0f4d2SKip Macy 		 * Break the mbuf chain first though.
103589e0f4d2SKip Macy 		 */
103689e0f4d2SKip Macy 		while ((m = mbufq_dequeue(&rxq)) != NULL) {
103789e0f4d2SKip Macy 			ifp->if_ipackets++;
103889e0f4d2SKip Macy 
103989e0f4d2SKip Macy 			/*
104089e0f4d2SKip Macy 			 * Do we really need to drop the rx lock?
104189e0f4d2SKip Macy 			 */
104289e0f4d2SKip Macy 			XN_RX_UNLOCK(np);
104312678024SDoug Rabson #if __FreeBSD_version >= 700000
104412678024SDoug Rabson 			/* Use LRO if possible */
104512678024SDoug Rabson 			if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
104612678024SDoug Rabson 			    lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
104712678024SDoug Rabson 				/*
104812678024SDoug Rabson 				 * If LRO fails, pass up to the stack
104912678024SDoug Rabson 				 * directly.
105012678024SDoug Rabson 				 */
105189e0f4d2SKip Macy 				(*ifp->if_input)(ifp, m);
105212678024SDoug Rabson 			}
105312678024SDoug Rabson #else
105412678024SDoug Rabson 			(*ifp->if_input)(ifp, m);
105512678024SDoug Rabson #endif
105689e0f4d2SKip Macy 			XN_RX_LOCK(np);
105789e0f4d2SKip Macy 		}
105889e0f4d2SKip Macy 
105989e0f4d2SKip Macy 		np->rx.rsp_cons = i;
106089e0f4d2SKip Macy 
106112678024SDoug Rabson #if __FreeBSD_version >= 700000
106212678024SDoug Rabson 		/*
106312678024SDoug Rabson 		 * Flush any outstanding LRO work
106412678024SDoug Rabson 		 */
106512678024SDoug Rabson 		while (!SLIST_EMPTY(&lro->lro_active)) {
106612678024SDoug Rabson 			queued = SLIST_FIRST(&lro->lro_active);
106712678024SDoug Rabson 			SLIST_REMOVE_HEAD(&lro->lro_active, next);
106812678024SDoug Rabson 			tcp_lro_flush(lro, queued);
106912678024SDoug Rabson 		}
107012678024SDoug Rabson #endif
107112678024SDoug Rabson 
107289e0f4d2SKip Macy #if 0
107389e0f4d2SKip Macy 		/* If we get a callback with very few responses, reduce fill target. */
107489e0f4d2SKip Macy 		/* NB. Note exponential increase, linear decrease. */
107589e0f4d2SKip Macy 		if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
107689e0f4d2SKip Macy 			((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
107789e0f4d2SKip Macy 			np->rx_target = np->rx_min_target;
107889e0f4d2SKip Macy #endif
107989e0f4d2SKip Macy 
108089e0f4d2SKip Macy 		network_alloc_rx_buffers(np);
108189e0f4d2SKip Macy 
108249906218SDoug Rabson 		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do);
108349906218SDoug Rabson 	} while (work_to_do);
108489e0f4d2SKip Macy }
108589e0f4d2SKip Macy 
108689e0f4d2SKip Macy static void
108789e0f4d2SKip Macy xn_txeof(struct netfront_info *np)
108889e0f4d2SKip Macy {
108989e0f4d2SKip Macy 	RING_IDX i, prod;
109089e0f4d2SKip Macy 	unsigned short id;
109189e0f4d2SKip Macy 	struct ifnet *ifp;
109212678024SDoug Rabson 	netif_tx_response_t *txr;
109389e0f4d2SKip Macy 	struct mbuf *m;
109489e0f4d2SKip Macy 
109589e0f4d2SKip Macy 	XN_TX_LOCK_ASSERT(np);
109689e0f4d2SKip Macy 
109789e0f4d2SKip Macy 	if (!netfront_carrier_ok(np))
109889e0f4d2SKip Macy 		return;
109989e0f4d2SKip Macy 
110089e0f4d2SKip Macy 	ifp = np->xn_ifp;
110189e0f4d2SKip Macy 
110289e0f4d2SKip Macy 	do {
110389e0f4d2SKip Macy 		prod = np->tx.sring->rsp_prod;
110489e0f4d2SKip Macy 		rmb(); /* Ensure we see responses up to 'rp'. */
110589e0f4d2SKip Macy 
110689e0f4d2SKip Macy 		for (i = np->tx.rsp_cons; i != prod; i++) {
110712678024SDoug Rabson 			txr = RING_GET_RESPONSE(&np->tx, i);
110812678024SDoug Rabson 			if (txr->status == NETIF_RSP_NULL)
110912678024SDoug Rabson 				continue;
111012678024SDoug Rabson 
1111*931eeffaSKenneth D. Merry 			if (txr->status != NETIF_RSP_OKAY) {
1112*931eeffaSKenneth D. Merry 				printf("%s: WARNING: response is %d!\n",
1113*931eeffaSKenneth D. Merry 				       __func__, txr->status);
1114*931eeffaSKenneth D. Merry 			}
111512678024SDoug Rabson 			id = txr->id;
1116*931eeffaSKenneth D. Merry 			m = np->tx_mbufs[id];
11172d8fae98SAdrian Chadd 			KASSERT(m != NULL, ("mbuf not found in xn_tx_chain"));
1118*931eeffaSKenneth D. Merry 			KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
1119*931eeffaSKenneth D. Merry 				("mbuf already on the free list, but we're "
1120*931eeffaSKenneth D. Merry 				"trying to free it again!"));
11212d8fae98SAdrian Chadd 			M_ASSERTVALID(m);
112289e0f4d2SKip Macy 
112312678024SDoug Rabson 			/*
112412678024SDoug Rabson 			 * Increment packet count if this is the last
112512678024SDoug Rabson 			 * mbuf of the chain.
112612678024SDoug Rabson 			 */
112712678024SDoug Rabson 			if (!m->m_next)
112889e0f4d2SKip Macy 				ifp->if_opackets++;
112989e0f4d2SKip Macy 			if (unlikely(gnttab_query_foreign_access(
113089e0f4d2SKip Macy 			    np->grant_tx_ref[id]) != 0)) {
1131*931eeffaSKenneth D. Merry 				panic("grant id %u still in use by the backend",
1132*931eeffaSKenneth D. Merry 				      id);
113389e0f4d2SKip Macy 			}
113489e0f4d2SKip Macy 			gnttab_end_foreign_access_ref(
1135920ba15bSKip Macy 				np->grant_tx_ref[id]);
113689e0f4d2SKip Macy 			gnttab_release_grant_reference(
113789e0f4d2SKip Macy 				&np->gref_tx_head, np->grant_tx_ref[id]);
113889e0f4d2SKip Macy 			np->grant_tx_ref[id] = GRANT_INVALID_REF;
113989e0f4d2SKip Macy 
1140*931eeffaSKenneth D. Merry 			np->tx_mbufs[id] = NULL;
1141*931eeffaSKenneth D. Merry 			add_id_to_freelist(np->tx_mbufs, id);
1142a4ec37f5SAdrian Chadd 			np->xn_cdata.xn_tx_chain_cnt--;
114312678024SDoug Rabson 			m_free(m);
1144d76e4550SAdrian Chadd 			/* Only mark the queue active if we've freed up at least one slot to try */
1145d76e4550SAdrian Chadd 			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
114689e0f4d2SKip Macy 		}
114789e0f4d2SKip Macy 		np->tx.rsp_cons = prod;
114889e0f4d2SKip Macy 
114989e0f4d2SKip Macy 		/*
115089e0f4d2SKip Macy 		 * Set a new event, then check for race with update of
115189e0f4d2SKip Macy 		 * tx_cons. Note that it is essential to schedule a
115289e0f4d2SKip Macy 		 * callback, no matter how few buffers are pending. Even if
115389e0f4d2SKip Macy 		 * there is space in the transmit ring, higher layers may
115489e0f4d2SKip Macy 		 * be blocked because too much data is outstanding: in such
115589e0f4d2SKip Macy 		 * cases notification from Xen is likely to be the only kick
115689e0f4d2SKip Macy 		 * that we'll get.
115789e0f4d2SKip Macy 		 */
115889e0f4d2SKip Macy 		np->tx.sring->rsp_event =
115989e0f4d2SKip Macy 		    prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
116089e0f4d2SKip Macy 
116189e0f4d2SKip Macy 		mb();
116289e0f4d2SKip Macy 	} while (prod != np->tx.sring->rsp_prod);
116389e0f4d2SKip Macy 
116489e0f4d2SKip Macy 	if (np->tx_full &&
116589e0f4d2SKip Macy 	    ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
116689e0f4d2SKip Macy 		np->tx_full = 0;
116789e0f4d2SKip Macy #if 0
116889e0f4d2SKip Macy 		if (np->user_state == UST_OPEN)
116989e0f4d2SKip Macy 			netif_wake_queue(dev);
117089e0f4d2SKip Macy #endif
117189e0f4d2SKip Macy 	}
117289e0f4d2SKip Macy 
117389e0f4d2SKip Macy }
117489e0f4d2SKip Macy 
117589e0f4d2SKip Macy static void
117689e0f4d2SKip Macy xn_intr(void *xsc)
117789e0f4d2SKip Macy {
117889e0f4d2SKip Macy 	struct netfront_info *np = xsc;
117989e0f4d2SKip Macy 	struct ifnet *ifp = np->xn_ifp;
118089e0f4d2SKip Macy 
118189e0f4d2SKip Macy #if 0
118289e0f4d2SKip Macy 	if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod &&
118389e0f4d2SKip Macy 	    likely(netfront_carrier_ok(np)) &&
118489e0f4d2SKip Macy 	    ifp->if_drv_flags & IFF_DRV_RUNNING))
118589e0f4d2SKip Macy 		return;
118689e0f4d2SKip Macy #endif
1187*931eeffaSKenneth D. Merry 	if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) {
118889e0f4d2SKip Macy 		XN_TX_LOCK(np);
118989e0f4d2SKip Macy 		xn_txeof(np);
119089e0f4d2SKip Macy 		XN_TX_UNLOCK(np);
119189e0f4d2SKip Macy 	}
119289e0f4d2SKip Macy 
119389e0f4d2SKip Macy 	XN_RX_LOCK(np);
119489e0f4d2SKip Macy 	xn_rxeof(np);
119589e0f4d2SKip Macy 	XN_RX_UNLOCK(np);
119689e0f4d2SKip Macy 
119789e0f4d2SKip Macy 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
119889e0f4d2SKip Macy 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
119989e0f4d2SKip Macy 		xn_start(ifp);
120089e0f4d2SKip Macy }
120189e0f4d2SKip Macy 
120289e0f4d2SKip Macy 
120389e0f4d2SKip Macy static void
120489e0f4d2SKip Macy xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m,
120589e0f4d2SKip Macy 	grant_ref_t ref)
120689e0f4d2SKip Macy {
120789e0f4d2SKip Macy 	int new = xennet_rxidx(np->rx.req_prod_pvt);
120889e0f4d2SKip Macy 
120989e0f4d2SKip Macy 	KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL"));
121089e0f4d2SKip Macy 	np->rx_mbufs[new] = m;
121189e0f4d2SKip Macy 	np->grant_rx_ref[new] = ref;
121289e0f4d2SKip Macy 	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
121389e0f4d2SKip Macy 	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
121489e0f4d2SKip Macy 	np->rx.req_prod_pvt++;
121589e0f4d2SKip Macy }
121689e0f4d2SKip Macy 
121789e0f4d2SKip Macy static int
121889e0f4d2SKip Macy xennet_get_extras(struct netfront_info *np,
1219*931eeffaSKenneth D. Merry     struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons)
122089e0f4d2SKip Macy {
122189e0f4d2SKip Macy 	struct netif_extra_info *extra;
122289e0f4d2SKip Macy 
122389e0f4d2SKip Macy 	int err = 0;
122489e0f4d2SKip Macy 
122589e0f4d2SKip Macy 	do {
122689e0f4d2SKip Macy 		struct mbuf *m;
122789e0f4d2SKip Macy 		grant_ref_t ref;
122889e0f4d2SKip Macy 
1229*931eeffaSKenneth D. Merry 		if (unlikely(*cons + 1 == rp)) {
123089e0f4d2SKip Macy #if 0
123189e0f4d2SKip Macy 			if (net_ratelimit())
123289e0f4d2SKip Macy 				WPRINTK("Missing extra info\n");
123389e0f4d2SKip Macy #endif
1234*931eeffaSKenneth D. Merry 			err = EINVAL;
123589e0f4d2SKip Macy 			break;
123689e0f4d2SKip Macy 		}
123789e0f4d2SKip Macy 
123889e0f4d2SKip Macy 		extra = (struct netif_extra_info *)
1239*931eeffaSKenneth D. Merry 		RING_GET_RESPONSE(&np->rx, ++(*cons));
124089e0f4d2SKip Macy 
124189e0f4d2SKip Macy 		if (unlikely(!extra->type ||
124289e0f4d2SKip Macy 			extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
124389e0f4d2SKip Macy #if 0
124489e0f4d2SKip Macy 			if (net_ratelimit())
124589e0f4d2SKip Macy 				WPRINTK("Invalid extra type: %d\n",
124689e0f4d2SKip Macy 					extra->type);
124789e0f4d2SKip Macy #endif
1248*931eeffaSKenneth D. Merry 			err = EINVAL;
124989e0f4d2SKip Macy 		} else {
125089e0f4d2SKip Macy 			memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
125189e0f4d2SKip Macy 		}
125289e0f4d2SKip Macy 
1253*931eeffaSKenneth D. Merry 		m = xennet_get_rx_mbuf(np, *cons);
1254*931eeffaSKenneth D. Merry 		ref = xennet_get_rx_ref(np, *cons);
125589e0f4d2SKip Macy 		xennet_move_rx_slot(np, m, ref);
125689e0f4d2SKip Macy 	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
125789e0f4d2SKip Macy 
125889e0f4d2SKip Macy 	return err;
125989e0f4d2SKip Macy }
126089e0f4d2SKip Macy 
126189e0f4d2SKip Macy static int
126289e0f4d2SKip Macy xennet_get_responses(struct netfront_info *np,
1263*931eeffaSKenneth D. Merry 	struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
126483b92f6eSKip Macy 	struct mbuf  **list,
126589e0f4d2SKip Macy 	int *pages_flipped_p)
126689e0f4d2SKip Macy {
126789e0f4d2SKip Macy 	int pages_flipped = *pages_flipped_p;
126889e0f4d2SKip Macy 	struct mmu_update *mmu;
126989e0f4d2SKip Macy 	struct multicall_entry *mcl;
127089e0f4d2SKip Macy 	struct netif_rx_response *rx = &rinfo->rx;
127189e0f4d2SKip Macy 	struct netif_extra_info *extras = rinfo->extras;
127283b92f6eSKip Macy 	struct mbuf *m, *m0, *m_prev;
1273*931eeffaSKenneth D. Merry 	grant_ref_t ref = xennet_get_rx_ref(np, *cons);
1274*931eeffaSKenneth D. Merry 	RING_IDX ref_cons = *cons;
1275*931eeffaSKenneth D. Merry 	int max = 5 /* MAX_TX_REQ_FRAGS + (rx->status <= RX_COPY_THRESHOLD) */;
127689e0f4d2SKip Macy 	int frags = 1;
127789e0f4d2SKip Macy 	int err = 0;
127889e0f4d2SKip Macy 	u_long ret;
127989e0f4d2SKip Macy 
1280*931eeffaSKenneth D. Merry 	m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons);
128183b92f6eSKip Macy 
128283b92f6eSKip Macy 
128389e0f4d2SKip Macy 	if (rx->flags & NETRXF_extra_info) {
1284*931eeffaSKenneth D. Merry 		err = xennet_get_extras(np, extras, rp, cons);
128589e0f4d2SKip Macy 	}
128689e0f4d2SKip Macy 
128783b92f6eSKip Macy 
128883b92f6eSKip Macy 	if (m0 != NULL) {
128983b92f6eSKip Macy 		m0->m_pkthdr.len = 0;
129083b92f6eSKip Macy 		m0->m_next = NULL;
129183b92f6eSKip Macy 	}
129283b92f6eSKip Macy 
129389e0f4d2SKip Macy 	for (;;) {
129489e0f4d2SKip Macy 		u_long mfn;
129589e0f4d2SKip Macy 
129683b92f6eSKip Macy #if 0
1297227ca257SKip Macy 		DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
129883b92f6eSKip Macy 			rx->status, rx->offset, frags);
129983b92f6eSKip Macy #endif
130089e0f4d2SKip Macy 		if (unlikely(rx->status < 0 ||
130189e0f4d2SKip Macy 			rx->offset + rx->status > PAGE_SIZE)) {
1302*931eeffaSKenneth D. Merry 
130389e0f4d2SKip Macy #if 0
130489e0f4d2SKip Macy 			if (net_ratelimit())
130589e0f4d2SKip Macy 				WPRINTK("rx->offset: %x, size: %u\n",
130689e0f4d2SKip Macy 					rx->offset, rx->status);
130789e0f4d2SKip Macy #endif
130889e0f4d2SKip Macy 			xennet_move_rx_slot(np, m, ref);
1309*931eeffaSKenneth D. Merry 			if (m0 == m)
1310*931eeffaSKenneth D. Merry 				m0 = NULL;
1311*931eeffaSKenneth D. Merry 			m = NULL;
1312*931eeffaSKenneth D. Merry 			err = EINVAL;
1313*931eeffaSKenneth D. Merry 			goto next_skip_queue;
131489e0f4d2SKip Macy 		}
131589e0f4d2SKip Macy 
131689e0f4d2SKip Macy 		/*
131789e0f4d2SKip Macy 		 * This definitely indicates a bug, either in this driver or in
131889e0f4d2SKip Macy 		 * the backend driver. In future this should flag the bad
131989e0f4d2SKip Macy 		 * situation to the system controller to reboot the backed.
132089e0f4d2SKip Macy 		 */
132189e0f4d2SKip Macy 		if (ref == GRANT_INVALID_REF) {
1322*931eeffaSKenneth D. Merry 
132389e0f4d2SKip Macy #if 0
132489e0f4d2SKip Macy 			if (net_ratelimit())
132589e0f4d2SKip Macy 				WPRINTK("Bad rx response id %d.\n", rx->id);
132689e0f4d2SKip Macy #endif
1327*931eeffaSKenneth D. Merry 			err = EINVAL;
132889e0f4d2SKip Macy 			goto next;
132989e0f4d2SKip Macy 		}
133089e0f4d2SKip Macy 
133189e0f4d2SKip Macy 		if (!np->copying_receiver) {
133289e0f4d2SKip Macy 			/* Memory pressure, insufficient buffer
133389e0f4d2SKip Macy 			 * headroom, ...
133489e0f4d2SKip Macy 			 */
133589e0f4d2SKip Macy 			if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
1336*931eeffaSKenneth D. Merry 				WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
133789e0f4d2SKip Macy 					rx->id, rx->status);
133889e0f4d2SKip Macy 				xennet_move_rx_slot(np, m, ref);
1339*931eeffaSKenneth D. Merry 				err = ENOMEM;
134089e0f4d2SKip Macy 				goto next;
134189e0f4d2SKip Macy 			}
134289e0f4d2SKip Macy 
134389e0f4d2SKip Macy 			if (!xen_feature( XENFEAT_auto_translated_physmap)) {
134489e0f4d2SKip Macy 				/* Remap the page. */
134589e0f4d2SKip Macy 				void *vaddr = mtod(m, void *);
134689e0f4d2SKip Macy 				uint32_t pfn;
134789e0f4d2SKip Macy 
134889e0f4d2SKip Macy 				mcl = np->rx_mcl + pages_flipped;
134989e0f4d2SKip Macy 				mmu = np->rx_mmu + pages_flipped;
135089e0f4d2SKip Macy 
135189e0f4d2SKip Macy 				MULTI_update_va_mapping(mcl, (u_long)vaddr,
13526ae0e31bSKip Macy 				    (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW |
135389e0f4d2SKip Macy 				    PG_V | PG_M | PG_A, 0);
13543a6d1fcfSKip Macy 				pfn = (uintptr_t)m->m_ext.ext_arg1;
135589e0f4d2SKip Macy 				mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) |
135689e0f4d2SKip Macy 				    MMU_MACHPHYS_UPDATE;
135789e0f4d2SKip Macy 				mmu->val = pfn;
135889e0f4d2SKip Macy 
135989e0f4d2SKip Macy 				set_phys_to_machine(pfn, mfn);
136089e0f4d2SKip Macy 			}
136189e0f4d2SKip Macy 			pages_flipped++;
136289e0f4d2SKip Macy 		} else {
1363920ba15bSKip Macy 			ret = gnttab_end_foreign_access_ref(ref);
136489e0f4d2SKip Macy 			KASSERT(ret, ("ret != 0"));
136589e0f4d2SKip Macy 		}
136689e0f4d2SKip Macy 
136789e0f4d2SKip Macy 		gnttab_release_grant_reference(&np->gref_rx_head, ref);
136889e0f4d2SKip Macy 
136989e0f4d2SKip Macy next:
13703a539122SAdrian Chadd 		if (m == NULL)
13713a539122SAdrian Chadd 			break;
13723a539122SAdrian Chadd 
137383b92f6eSKip Macy 		m->m_len = rx->status;
137483b92f6eSKip Macy 		m->m_data += rx->offset;
137583b92f6eSKip Macy 		m0->m_pkthdr.len += rx->status;
137683b92f6eSKip Macy 
1377*931eeffaSKenneth D. Merry next_skip_queue:
137889e0f4d2SKip Macy 		if (!(rx->flags & NETRXF_more_data))
137989e0f4d2SKip Macy 			break;
138089e0f4d2SKip Macy 
1381*931eeffaSKenneth D. Merry 		if (*cons + frags == rp) {
138289e0f4d2SKip Macy 			if (net_ratelimit())
138389e0f4d2SKip Macy 				WPRINTK("Need more frags\n");
1384*931eeffaSKenneth D. Merry 			err = ENOENT;
1385*931eeffaSKenneth D. Merry 			printf("%s: cons %u frags %u rp %u, not enough frags\n",
1386*931eeffaSKenneth D. Merry 			       __func__, *cons, frags, rp);
138789e0f4d2SKip Macy 				break;
138889e0f4d2SKip Macy 		}
1389*931eeffaSKenneth D. Merry 		/*
1390*931eeffaSKenneth D. Merry 		 * Note that m can be NULL, if rx->status < 0 or if
1391*931eeffaSKenneth D. Merry 		 * rx->offset + rx->status > PAGE_SIZE above.
1392*931eeffaSKenneth D. Merry 		 */
139383b92f6eSKip Macy 		m_prev = m;
139489e0f4d2SKip Macy 
1395*931eeffaSKenneth D. Merry 		rx = RING_GET_RESPONSE(&np->rx, *cons + frags);
1396*931eeffaSKenneth D. Merry 		m = xennet_get_rx_mbuf(np, *cons + frags);
139783b92f6eSKip Macy 
1398*931eeffaSKenneth D. Merry 		/*
1399*931eeffaSKenneth D. Merry 		 * m_prev == NULL can happen if rx->status < 0 or if
1400*931eeffaSKenneth D. Merry 		 * rx->offset + * rx->status > PAGE_SIZE above.
1401*931eeffaSKenneth D. Merry 		 */
1402*931eeffaSKenneth D. Merry 		if (m_prev != NULL)
140383b92f6eSKip Macy 			m_prev->m_next = m;
1404*931eeffaSKenneth D. Merry 
1405*931eeffaSKenneth D. Merry 		/*
1406*931eeffaSKenneth D. Merry 		 * m0 can be NULL if rx->status < 0 or if * rx->offset +
1407*931eeffaSKenneth D. Merry 		 * rx->status > PAGE_SIZE above.
1408*931eeffaSKenneth D. Merry 		 */
1409*931eeffaSKenneth D. Merry 		if (m0 == NULL)
1410*931eeffaSKenneth D. Merry 			m0 = m;
141183b92f6eSKip Macy 		m->m_next = NULL;
1412*931eeffaSKenneth D. Merry 		ref = xennet_get_rx_ref(np, *cons + frags);
1413*931eeffaSKenneth D. Merry 		ref_cons = *cons + frags;
141489e0f4d2SKip Macy 		frags++;
141589e0f4d2SKip Macy 	}
141683b92f6eSKip Macy 	*list = m0;
141789e0f4d2SKip Macy 
141889e0f4d2SKip Macy 	if (unlikely(frags > max)) {
141989e0f4d2SKip Macy 		if (net_ratelimit())
142089e0f4d2SKip Macy 			WPRINTK("Too many frags\n");
1421*931eeffaSKenneth D. Merry 		printf("%s: too many frags %d > max %d\n", __func__, frags,
1422*931eeffaSKenneth D. Merry 		       max);
1423*931eeffaSKenneth D. Merry 		err = E2BIG;
142489e0f4d2SKip Macy 	}
142589e0f4d2SKip Macy 
1426*931eeffaSKenneth D. Merry 	*cons += frags;
142789e0f4d2SKip Macy 
142889e0f4d2SKip Macy 	*pages_flipped_p = pages_flipped;
142989e0f4d2SKip Macy 
143089e0f4d2SKip Macy 	return err;
143189e0f4d2SKip Macy }
143289e0f4d2SKip Macy 
143389e0f4d2SKip Macy static void
143489e0f4d2SKip Macy xn_tick_locked(struct netfront_info *sc)
143589e0f4d2SKip Macy {
143689e0f4d2SKip Macy 	XN_RX_LOCK_ASSERT(sc);
143789e0f4d2SKip Macy 	callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
143889e0f4d2SKip Macy 
143989e0f4d2SKip Macy 	/* XXX placeholder for printing debug information */
144089e0f4d2SKip Macy 
144189e0f4d2SKip Macy }
144289e0f4d2SKip Macy 
144389e0f4d2SKip Macy 
144489e0f4d2SKip Macy static void
144589e0f4d2SKip Macy xn_tick(void *xsc)
144689e0f4d2SKip Macy {
144789e0f4d2SKip Macy 	struct netfront_info *sc;
144889e0f4d2SKip Macy 
144989e0f4d2SKip Macy 	sc = xsc;
145089e0f4d2SKip Macy 	XN_RX_LOCK(sc);
145189e0f4d2SKip Macy 	xn_tick_locked(sc);
145289e0f4d2SKip Macy 	XN_RX_UNLOCK(sc);
145389e0f4d2SKip Macy 
145489e0f4d2SKip Macy }
145589e0f4d2SKip Macy 
1456*931eeffaSKenneth D. Merry /**
1457*931eeffaSKenneth D. Merry  * \brief Count the number of fragments in an mbuf chain.
1458*931eeffaSKenneth D. Merry  *
1459*931eeffaSKenneth D. Merry  * Surprisingly, there isn't an M* macro for this.
1460c099cafaSAdrian Chadd  */
1461*931eeffaSKenneth D. Merry static inline int
1462*931eeffaSKenneth D. Merry xn_count_frags(struct mbuf *m)
1463*931eeffaSKenneth D. Merry {
1464*931eeffaSKenneth D. Merry 	int nfrags;
1465*931eeffaSKenneth D. Merry 
1466*931eeffaSKenneth D. Merry 	for (nfrags = 0; m != NULL; m = m->m_next)
1467*931eeffaSKenneth D. Merry 		nfrags++;
1468*931eeffaSKenneth D. Merry 
1469*931eeffaSKenneth D. Merry 	return (nfrags);
147089e0f4d2SKip Macy }
147189e0f4d2SKip Macy 
1472*931eeffaSKenneth D. Merry /**
1473*931eeffaSKenneth D. Merry  * Given an mbuf chain, make sure we have enough room and then push
1474*931eeffaSKenneth D. Merry  * it onto the transmit ring.
1475*931eeffaSKenneth D. Merry  */
1476*931eeffaSKenneth D. Merry static int
1477*931eeffaSKenneth D. Merry xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head)
1478*931eeffaSKenneth D. Merry {
1479*931eeffaSKenneth D. Merry 	struct ifnet *ifp;
1480*931eeffaSKenneth D. Merry 	struct mbuf *m;
1481*931eeffaSKenneth D. Merry 	u_int nfrags;
1482*931eeffaSKenneth D. Merry 	netif_extra_info_t *extra;
1483*931eeffaSKenneth D. Merry 	int otherend_id;
1484*931eeffaSKenneth D. Merry 
1485*931eeffaSKenneth D. Merry 	ifp = sc->xn_ifp;
1486*931eeffaSKenneth D. Merry 
1487*931eeffaSKenneth D. Merry 	/**
148812678024SDoug Rabson 	 * Defragment the mbuf if necessary.
148912678024SDoug Rabson 	 */
1490*931eeffaSKenneth D. Merry 	nfrags = xn_count_frags(m_head);
1491*931eeffaSKenneth D. Merry 
1492*931eeffaSKenneth D. Merry 	/*
1493*931eeffaSKenneth D. Merry 	 * Check to see whether this request is longer than netback
1494*931eeffaSKenneth D. Merry 	 * can handle, and try to defrag it.
1495*931eeffaSKenneth D. Merry 	 */
1496*931eeffaSKenneth D. Merry 	/**
1497*931eeffaSKenneth D. Merry 	 * It is a bit lame, but the netback driver in Linux can't
1498*931eeffaSKenneth D. Merry 	 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of
1499*931eeffaSKenneth D. Merry 	 * the Linux network stack.
1500*931eeffaSKenneth D. Merry 	 */
1501*931eeffaSKenneth D. Merry 	if (nfrags > MAX_TX_REQ_FRAGS) {
150212678024SDoug Rabson 		m = m_defrag(m_head, M_DONTWAIT);
150312678024SDoug Rabson 		if (!m) {
1504*931eeffaSKenneth D. Merry 			/*
1505*931eeffaSKenneth D. Merry 			 * Defrag failed, so free the mbuf and
1506*931eeffaSKenneth D. Merry 			 * therefore drop the packet.
1507*931eeffaSKenneth D. Merry 			 */
150812678024SDoug Rabson 			m_freem(m_head);
1509*931eeffaSKenneth D. Merry 			return (EMSGSIZE);
151012678024SDoug Rabson 		}
151112678024SDoug Rabson 		m_head = m;
151212678024SDoug Rabson 	}
151389e0f4d2SKip Macy 
1514a4ec37f5SAdrian Chadd 	/* Determine how many fragments now exist */
1515*931eeffaSKenneth D. Merry 	nfrags = xn_count_frags(m_head);
1516a4ec37f5SAdrian Chadd 
1517a4ec37f5SAdrian Chadd 	/*
1518*931eeffaSKenneth D. Merry 	 * Check to see whether the defragmented packet has too many
1519*931eeffaSKenneth D. Merry 	 * segments for the Linux netback driver.
1520a4ec37f5SAdrian Chadd 	 */
1521*931eeffaSKenneth D. Merry 	/**
1522*931eeffaSKenneth D. Merry 	 * The FreeBSD TCP stack, with TSO enabled, can produce a chain
1523*931eeffaSKenneth D. Merry 	 * of mbufs longer than Linux can handle.  Make sure we don't
1524*931eeffaSKenneth D. Merry 	 * pass a too-long chain over to the other side by dropping the
1525*931eeffaSKenneth D. Merry 	 * packet.  It doesn't look like there is currently a way to
1526*931eeffaSKenneth D. Merry 	 * tell the TCP stack to generate a shorter chain of packets.
15273fb28bbbSAdrian Chadd 	 */
1528*931eeffaSKenneth D. Merry 	if (nfrags > MAX_TX_REQ_FRAGS) {
1529*931eeffaSKenneth D. Merry 		m_freem(m_head);
1530*931eeffaSKenneth D. Merry 		return (EMSGSIZE);
1531a4ec37f5SAdrian Chadd 	}
1532a4ec37f5SAdrian Chadd 
15333fb28bbbSAdrian Chadd 	/*
1534*931eeffaSKenneth D. Merry 	 * This check should be redundant.  We've already verified that we
1535*931eeffaSKenneth D. Merry 	 * have enough slots in the ring to handle a packet of maximum
1536*931eeffaSKenneth D. Merry 	 * size, and that our packet is less than the maximum size.  Keep
1537*931eeffaSKenneth D. Merry 	 * it in here as an assert for now just to make certain that
1538*931eeffaSKenneth D. Merry 	 * xn_tx_chain_cnt is accurate.
15393fb28bbbSAdrian Chadd 	 */
1540*931eeffaSKenneth D. Merry 	KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE,
1541*931eeffaSKenneth D. Merry 		("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE "
1542*931eeffaSKenneth D. Merry 		 "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt,
1543*931eeffaSKenneth D. Merry                     (int) nfrags, (int) NET_TX_RING_SIZE));
1544a4ec37f5SAdrian Chadd 
154589e0f4d2SKip Macy 	/*
154689e0f4d2SKip Macy 	 * Start packing the mbufs in this chain into
154789e0f4d2SKip Macy 	 * the fragment pointers. Stop when we run out
154889e0f4d2SKip Macy 	 * of fragments or hit the end of the mbuf chain.
154989e0f4d2SKip Macy 	 */
155012678024SDoug Rabson 	m = m_head;
155112678024SDoug Rabson 	extra = NULL;
1552*931eeffaSKenneth D. Merry 	otherend_id = xenbus_get_otherend_id(sc->xbdev);
155312678024SDoug Rabson 	for (m = m_head; m; m = m->m_next) {
1554*931eeffaSKenneth D. Merry 		netif_tx_request_t *tx;
1555*931eeffaSKenneth D. Merry 		uintptr_t id;
1556*931eeffaSKenneth D. Merry 		grant_ref_t ref;
1557*931eeffaSKenneth D. Merry 		u_long mfn; /* XXX Wrong type? */
1558*931eeffaSKenneth D. Merry 
1559*931eeffaSKenneth D. Merry 		tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt);
1560*931eeffaSKenneth D. Merry 		id = get_id_from_freelist(sc->tx_mbufs);
1561a4ec37f5SAdrian Chadd 		if (id == 0)
1562a4ec37f5SAdrian Chadd 			panic("xn_start_locked: was allocated the freelist head!\n");
1563a4ec37f5SAdrian Chadd 		sc->xn_cdata.xn_tx_chain_cnt++;
1564*931eeffaSKenneth D. Merry 		if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE)
1565*931eeffaSKenneth D. Merry 			panic("xn_start_locked: tx_chain_cnt must be <= NET_TX_RING_SIZE\n");
1566*931eeffaSKenneth D. Merry 		sc->tx_mbufs[id] = m;
156789e0f4d2SKip Macy 		tx->id = id;
156889e0f4d2SKip Macy 		ref = gnttab_claim_grant_reference(&sc->gref_tx_head);
156989e0f4d2SKip Macy 		KASSERT((short)ref >= 0, ("Negative ref"));
157012678024SDoug Rabson 		mfn = virt_to_mfn(mtod(m, vm_offset_t));
157123dc5621SKip Macy 		gnttab_grant_foreign_access_ref(ref, otherend_id,
157289e0f4d2SKip Macy 		    mfn, GNTMAP_readonly);
157389e0f4d2SKip Macy 		tx->gref = sc->grant_tx_ref[id] = ref;
157412678024SDoug Rabson 		tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1);
157589e0f4d2SKip Macy 		tx->flags = 0;
157612678024SDoug Rabson 		if (m == m_head) {
157712678024SDoug Rabson 			/*
157812678024SDoug Rabson 			 * The first fragment has the entire packet
157912678024SDoug Rabson 			 * size, subsequent fragments have just the
158012678024SDoug Rabson 			 * fragment size. The backend works out the
158112678024SDoug Rabson 			 * true size of the first fragment by
158212678024SDoug Rabson 			 * subtracting the sizes of the other
158312678024SDoug Rabson 			 * fragments.
158412678024SDoug Rabson 			 */
158512678024SDoug Rabson 			tx->size = m->m_pkthdr.len;
158689e0f4d2SKip Macy 
158712678024SDoug Rabson 			/*
1588*931eeffaSKenneth D. Merry 			 * The first fragment contains the checksum flags
1589*931eeffaSKenneth D. Merry 			 * and is optionally followed by extra data for
1590*931eeffaSKenneth D. Merry 			 * TSO etc.
1591*931eeffaSKenneth D. Merry 			 */
1592*931eeffaSKenneth D. Merry 			/**
1593*931eeffaSKenneth D. Merry 			 * CSUM_TSO requires checksum offloading.
1594*931eeffaSKenneth D. Merry 			 * Some versions of FreeBSD fail to
1595*931eeffaSKenneth D. Merry 			 * set CSUM_TCP in the CSUM_TSO case,
1596*931eeffaSKenneth D. Merry 			 * so we have to test for CSUM_TSO
1597*931eeffaSKenneth D. Merry 			 * explicitly.
159812678024SDoug Rabson 			 */
159912678024SDoug Rabson 			if (m->m_pkthdr.csum_flags
1600*931eeffaSKenneth D. Merry 			    & (CSUM_DELAY_DATA | CSUM_TSO)) {
160112678024SDoug Rabson 				tx->flags |= (NETTXF_csum_blank
160212678024SDoug Rabson 				    | NETTXF_data_validated);
160312678024SDoug Rabson 			}
160412678024SDoug Rabson #if __FreeBSD_version >= 700000
160512678024SDoug Rabson 			if (m->m_pkthdr.csum_flags & CSUM_TSO) {
160612678024SDoug Rabson 				struct netif_extra_info *gso =
160712678024SDoug Rabson 					(struct netif_extra_info *)
1608*931eeffaSKenneth D. Merry 					RING_GET_REQUEST(&sc->tx,
1609*931eeffaSKenneth D. Merry 							 ++sc->tx.req_prod_pvt);
161089e0f4d2SKip Macy 
161112678024SDoug Rabson 				tx->flags |= NETTXF_extra_info;
161289e0f4d2SKip Macy 
161312678024SDoug Rabson 				gso->u.gso.size = m->m_pkthdr.tso_segsz;
161412678024SDoug Rabson 				gso->u.gso.type =
161512678024SDoug Rabson 					XEN_NETIF_GSO_TYPE_TCPV4;
161612678024SDoug Rabson 				gso->u.gso.pad = 0;
161712678024SDoug Rabson 				gso->u.gso.features = 0;
161812678024SDoug Rabson 
161912678024SDoug Rabson 				gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
162012678024SDoug Rabson 				gso->flags = 0;
162112678024SDoug Rabson 			}
162212678024SDoug Rabson #endif
162312678024SDoug Rabson 		} else {
162412678024SDoug Rabson 			tx->size = m->m_len;
162512678024SDoug Rabson 		}
1626*931eeffaSKenneth D. Merry 		if (m->m_next)
162712678024SDoug Rabson 			tx->flags |= NETTXF_more_data;
162812678024SDoug Rabson 
1629*931eeffaSKenneth D. Merry 		sc->tx.req_prod_pvt++;
1630*931eeffaSKenneth D. Merry 	}
163112678024SDoug Rabson 	BPF_MTAP(ifp, m_head);
163212678024SDoug Rabson 
163312678024SDoug Rabson 	sc->stats.tx_bytes += m_head->m_pkthdr.len;
163489e0f4d2SKip Macy 	sc->stats.tx_packets++;
1635*931eeffaSKenneth D. Merry 
1636*931eeffaSKenneth D. Merry 	return (0);
163789e0f4d2SKip Macy }
163889e0f4d2SKip Macy 
1639*931eeffaSKenneth D. Merry static void
1640*931eeffaSKenneth D. Merry xn_start_locked(struct ifnet *ifp)
1641*931eeffaSKenneth D. Merry {
1642*931eeffaSKenneth D. Merry 	struct netfront_info *sc;
1643*931eeffaSKenneth D. Merry 	struct mbuf *m_head;
1644*931eeffaSKenneth D. Merry 	int notify;
1645*931eeffaSKenneth D. Merry 
1646*931eeffaSKenneth D. Merry 	sc = ifp->if_softc;
1647*931eeffaSKenneth D. Merry 
1648*931eeffaSKenneth D. Merry 	if (!netfront_carrier_ok(sc))
1649*931eeffaSKenneth D. Merry 		return;
1650*931eeffaSKenneth D. Merry 
1651*931eeffaSKenneth D. Merry 	/*
1652*931eeffaSKenneth D. Merry 	 * While we have enough transmit slots available for at least one
1653*931eeffaSKenneth D. Merry 	 * maximum-sized packet, pull mbufs off the queue and put them on
1654*931eeffaSKenneth D. Merry 	 * the transmit ring.
1655*931eeffaSKenneth D. Merry 	 */
1656*931eeffaSKenneth D. Merry 	while (xn_tx_slot_available(sc)) {
1657*931eeffaSKenneth D. Merry 		IF_DEQUEUE(&ifp->if_snd, m_head);
1658*931eeffaSKenneth D. Merry 		if (m_head == NULL)
1659*931eeffaSKenneth D. Merry 			break;
1660*931eeffaSKenneth D. Merry 
1661*931eeffaSKenneth D. Merry 		if (xn_assemble_tx_request(sc, m_head) != 0)
1662*931eeffaSKenneth D. Merry 			break;
1663*931eeffaSKenneth D. Merry 	}
1664*931eeffaSKenneth D. Merry 
166589e0f4d2SKip Macy 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify);
166689e0f4d2SKip Macy 	if (notify)
166789e0f4d2SKip Macy 		notify_remote_via_irq(sc->irq);
166889e0f4d2SKip Macy 
166989e0f4d2SKip Macy 	if (RING_FULL(&sc->tx)) {
167089e0f4d2SKip Macy 		sc->tx_full = 1;
167189e0f4d2SKip Macy #if 0
167289e0f4d2SKip Macy 		netif_stop_queue(dev);
167389e0f4d2SKip Macy #endif
167489e0f4d2SKip Macy 	}
167589e0f4d2SKip Macy }
167689e0f4d2SKip Macy 
1677*931eeffaSKenneth D. Merry 
167889e0f4d2SKip Macy static void
167989e0f4d2SKip Macy xn_start(struct ifnet *ifp)
168089e0f4d2SKip Macy {
168189e0f4d2SKip Macy 	struct netfront_info *sc;
168289e0f4d2SKip Macy 	sc = ifp->if_softc;
168389e0f4d2SKip Macy 	XN_TX_LOCK(sc);
168489e0f4d2SKip Macy 	xn_start_locked(ifp);
168589e0f4d2SKip Macy 	XN_TX_UNLOCK(sc);
168689e0f4d2SKip Macy }
168789e0f4d2SKip Macy 
168889e0f4d2SKip Macy /* equivalent of network_open() in Linux */
168989e0f4d2SKip Macy static void
169089e0f4d2SKip Macy xn_ifinit_locked(struct netfront_info *sc)
169189e0f4d2SKip Macy {
169289e0f4d2SKip Macy 	struct ifnet *ifp;
169389e0f4d2SKip Macy 
169489e0f4d2SKip Macy 	XN_LOCK_ASSERT(sc);
169589e0f4d2SKip Macy 
169689e0f4d2SKip Macy 	ifp = sc->xn_ifp;
169789e0f4d2SKip Macy 
169889e0f4d2SKip Macy 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
169989e0f4d2SKip Macy 		return;
170089e0f4d2SKip Macy 
170189e0f4d2SKip Macy 	xn_stop(sc);
170289e0f4d2SKip Macy 
170389e0f4d2SKip Macy 	network_alloc_rx_buffers(sc);
170489e0f4d2SKip Macy 	sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1;
170589e0f4d2SKip Macy 
170689e0f4d2SKip Macy 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
170789e0f4d2SKip Macy 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
17080e509842SJustin T. Gibbs 	if_link_state_change(ifp, LINK_STATE_UP);
170989e0f4d2SKip Macy 
171089e0f4d2SKip Macy 	callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
171189e0f4d2SKip Macy 
171289e0f4d2SKip Macy }
171389e0f4d2SKip Macy 
171489e0f4d2SKip Macy 
171589e0f4d2SKip Macy static void
171689e0f4d2SKip Macy xn_ifinit(void *xsc)
171789e0f4d2SKip Macy {
171889e0f4d2SKip Macy 	struct netfront_info *sc = xsc;
171989e0f4d2SKip Macy 
172089e0f4d2SKip Macy 	XN_LOCK(sc);
172189e0f4d2SKip Macy 	xn_ifinit_locked(sc);
172289e0f4d2SKip Macy 	XN_UNLOCK(sc);
172389e0f4d2SKip Macy 
172489e0f4d2SKip Macy }
172589e0f4d2SKip Macy 
172689e0f4d2SKip Macy 
172789e0f4d2SKip Macy static int
172889e0f4d2SKip Macy xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
172989e0f4d2SKip Macy {
173089e0f4d2SKip Macy 	struct netfront_info *sc = ifp->if_softc;
173189e0f4d2SKip Macy 	struct ifreq *ifr = (struct ifreq *) data;
173289e0f4d2SKip Macy 	struct ifaddr *ifa = (struct ifaddr *)data;
173389e0f4d2SKip Macy 
173489e0f4d2SKip Macy 	int mask, error = 0;
173589e0f4d2SKip Macy 	switch(cmd) {
173689e0f4d2SKip Macy 	case SIOCSIFADDR:
173789e0f4d2SKip Macy 	case SIOCGIFADDR:
173889e0f4d2SKip Macy 		XN_LOCK(sc);
173989e0f4d2SKip Macy 		if (ifa->ifa_addr->sa_family == AF_INET) {
174089e0f4d2SKip Macy 			ifp->if_flags |= IFF_UP;
174189e0f4d2SKip Macy 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
174289e0f4d2SKip Macy 				xn_ifinit_locked(sc);
174389e0f4d2SKip Macy 			arp_ifinit(ifp, ifa);
174489e0f4d2SKip Macy 			XN_UNLOCK(sc);
174549906218SDoug Rabson 		} else {
174649906218SDoug Rabson 			XN_UNLOCK(sc);
174749906218SDoug Rabson 			error = ether_ioctl(ifp, cmd, data);
174849906218SDoug Rabson 		}
174989e0f4d2SKip Macy 		break;
175089e0f4d2SKip Macy 	case SIOCSIFMTU:
175189e0f4d2SKip Macy 		/* XXX can we alter the MTU on a VN ?*/
175289e0f4d2SKip Macy #ifdef notyet
175389e0f4d2SKip Macy 		if (ifr->ifr_mtu > XN_JUMBO_MTU)
175489e0f4d2SKip Macy 			error = EINVAL;
175589e0f4d2SKip Macy 		else
175689e0f4d2SKip Macy #endif
175789e0f4d2SKip Macy 		{
175889e0f4d2SKip Macy 			ifp->if_mtu = ifr->ifr_mtu;
175989e0f4d2SKip Macy 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
176089e0f4d2SKip Macy 			xn_ifinit(sc);
176189e0f4d2SKip Macy 		}
176289e0f4d2SKip Macy 		break;
176389e0f4d2SKip Macy 	case SIOCSIFFLAGS:
176489e0f4d2SKip Macy 		XN_LOCK(sc);
176589e0f4d2SKip Macy 		if (ifp->if_flags & IFF_UP) {
176689e0f4d2SKip Macy 			/*
176789e0f4d2SKip Macy 			 * If only the state of the PROMISC flag changed,
176889e0f4d2SKip Macy 			 * then just use the 'set promisc mode' command
176989e0f4d2SKip Macy 			 * instead of reinitializing the entire NIC. Doing
177089e0f4d2SKip Macy 			 * a full re-init means reloading the firmware and
177189e0f4d2SKip Macy 			 * waiting for it to start up, which may take a
177289e0f4d2SKip Macy 			 * second or two.
177389e0f4d2SKip Macy 			 */
177489e0f4d2SKip Macy #ifdef notyet
177589e0f4d2SKip Macy 			/* No promiscuous mode with Xen */
177689e0f4d2SKip Macy 			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
177789e0f4d2SKip Macy 			    ifp->if_flags & IFF_PROMISC &&
177889e0f4d2SKip Macy 			    !(sc->xn_if_flags & IFF_PROMISC)) {
177989e0f4d2SKip Macy 				XN_SETBIT(sc, XN_RX_MODE,
178089e0f4d2SKip Macy 					  XN_RXMODE_RX_PROMISC);
178189e0f4d2SKip Macy 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
178289e0f4d2SKip Macy 				   !(ifp->if_flags & IFF_PROMISC) &&
178389e0f4d2SKip Macy 				   sc->xn_if_flags & IFF_PROMISC) {
178489e0f4d2SKip Macy 				XN_CLRBIT(sc, XN_RX_MODE,
178589e0f4d2SKip Macy 					  XN_RXMODE_RX_PROMISC);
178689e0f4d2SKip Macy 			} else
178789e0f4d2SKip Macy #endif
178889e0f4d2SKip Macy 				xn_ifinit_locked(sc);
178989e0f4d2SKip Macy 		} else {
179089e0f4d2SKip Macy 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
179189e0f4d2SKip Macy 				xn_stop(sc);
179289e0f4d2SKip Macy 			}
179389e0f4d2SKip Macy 		}
179489e0f4d2SKip Macy 		sc->xn_if_flags = ifp->if_flags;
179589e0f4d2SKip Macy 		XN_UNLOCK(sc);
179689e0f4d2SKip Macy 		error = 0;
179789e0f4d2SKip Macy 		break;
179889e0f4d2SKip Macy 	case SIOCSIFCAP:
179989e0f4d2SKip Macy 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
180012678024SDoug Rabson 		if (mask & IFCAP_TXCSUM) {
180112678024SDoug Rabson 			if (IFCAP_TXCSUM & ifp->if_capenable) {
180212678024SDoug Rabson 				ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
180312678024SDoug Rabson 				ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
180412678024SDoug Rabson 				    | CSUM_IP | CSUM_TSO);
180512678024SDoug Rabson 			} else {
180612678024SDoug Rabson 				ifp->if_capenable |= IFCAP_TXCSUM;
180712678024SDoug Rabson 				ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
180812678024SDoug Rabson 				    | CSUM_IP);
180989e0f4d2SKip Macy 			}
181012678024SDoug Rabson 		}
181112678024SDoug Rabson 		if (mask & IFCAP_RXCSUM) {
181212678024SDoug Rabson 			ifp->if_capenable ^= IFCAP_RXCSUM;
181312678024SDoug Rabson 		}
181412678024SDoug Rabson #if __FreeBSD_version >= 700000
181512678024SDoug Rabson 		if (mask & IFCAP_TSO4) {
181612678024SDoug Rabson 			if (IFCAP_TSO4 & ifp->if_capenable) {
181712678024SDoug Rabson 				ifp->if_capenable &= ~IFCAP_TSO4;
181812678024SDoug Rabson 				ifp->if_hwassist &= ~CSUM_TSO;
181912678024SDoug Rabson 			} else if (IFCAP_TXCSUM & ifp->if_capenable) {
182012678024SDoug Rabson 				ifp->if_capenable |= IFCAP_TSO4;
182112678024SDoug Rabson 				ifp->if_hwassist |= CSUM_TSO;
182212678024SDoug Rabson 			} else {
18233552092bSAdrian Chadd 				IPRINTK("Xen requires tx checksum offload"
182412678024SDoug Rabson 				    " be enabled to use TSO\n");
182512678024SDoug Rabson 				error = EINVAL;
182612678024SDoug Rabson 			}
182712678024SDoug Rabson 		}
182812678024SDoug Rabson 		if (mask & IFCAP_LRO) {
182912678024SDoug Rabson 			ifp->if_capenable ^= IFCAP_LRO;
183012678024SDoug Rabson 
183112678024SDoug Rabson 		}
183212678024SDoug Rabson #endif
183389e0f4d2SKip Macy 		error = 0;
183489e0f4d2SKip Macy 		break;
183589e0f4d2SKip Macy 	case SIOCADDMULTI:
183689e0f4d2SKip Macy 	case SIOCDELMULTI:
183789e0f4d2SKip Macy #ifdef notyet
183889e0f4d2SKip Macy 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
183989e0f4d2SKip Macy 			XN_LOCK(sc);
184089e0f4d2SKip Macy 			xn_setmulti(sc);
184189e0f4d2SKip Macy 			XN_UNLOCK(sc);
184289e0f4d2SKip Macy 			error = 0;
184389e0f4d2SKip Macy 		}
184489e0f4d2SKip Macy #endif
184589e0f4d2SKip Macy 		/* FALLTHROUGH */
184689e0f4d2SKip Macy 	case SIOCSIFMEDIA:
184789e0f4d2SKip Macy 	case SIOCGIFMEDIA:
18480e509842SJustin T. Gibbs 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
184989e0f4d2SKip Macy 		break;
185089e0f4d2SKip Macy 	default:
185189e0f4d2SKip Macy 		error = ether_ioctl(ifp, cmd, data);
185289e0f4d2SKip Macy 	}
185389e0f4d2SKip Macy 
185489e0f4d2SKip Macy 	return (error);
185589e0f4d2SKip Macy }
185689e0f4d2SKip Macy 
185789e0f4d2SKip Macy static void
185889e0f4d2SKip Macy xn_stop(struct netfront_info *sc)
185989e0f4d2SKip Macy {
186089e0f4d2SKip Macy 	struct ifnet *ifp;
186189e0f4d2SKip Macy 
186289e0f4d2SKip Macy 	XN_LOCK_ASSERT(sc);
186389e0f4d2SKip Macy 
186489e0f4d2SKip Macy 	ifp = sc->xn_ifp;
186589e0f4d2SKip Macy 
186689e0f4d2SKip Macy 	callout_stop(&sc->xn_stat_ch);
186789e0f4d2SKip Macy 
186889e0f4d2SKip Macy 	xn_free_rx_ring(sc);
186989e0f4d2SKip Macy 	xn_free_tx_ring(sc);
187089e0f4d2SKip Macy 
187189e0f4d2SKip Macy 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
18720e509842SJustin T. Gibbs 	if_link_state_change(ifp, LINK_STATE_DOWN);
187389e0f4d2SKip Macy }
187489e0f4d2SKip Macy 
187589e0f4d2SKip Macy /* START of Xenolinux helper functions adapted to FreeBSD */
187623dc5621SKip Macy int
187723dc5621SKip Macy network_connect(struct netfront_info *np)
187889e0f4d2SKip Macy {
18793a6d1fcfSKip Macy 	int i, requeue_idx, error;
188089e0f4d2SKip Macy 	grant_ref_t ref;
188189e0f4d2SKip Macy 	netif_rx_request_t *req;
188289e0f4d2SKip Macy 	u_int feature_rx_copy, feature_rx_flip;
188389e0f4d2SKip Macy 
18843a6d1fcfSKip Macy 	error = xenbus_scanf(XBT_NIL, xenbus_get_otherend_path(np->xbdev),
18853a6d1fcfSKip Macy 	    "feature-rx-copy", NULL, "%u", &feature_rx_copy);
18863a6d1fcfSKip Macy 	if (error)
188789e0f4d2SKip Macy 		feature_rx_copy = 0;
18883a6d1fcfSKip Macy 	error = xenbus_scanf(XBT_NIL, xenbus_get_otherend_path(np->xbdev),
18893a6d1fcfSKip Macy 	    "feature-rx-flip", NULL, "%u", &feature_rx_flip);
18903a6d1fcfSKip Macy 	if (error)
189189e0f4d2SKip Macy 		feature_rx_flip = 1;
189289e0f4d2SKip Macy 
189389e0f4d2SKip Macy 	/*
189489e0f4d2SKip Macy 	 * Copy packets on receive path if:
189589e0f4d2SKip Macy 	 *  (a) This was requested by user, and the backend supports it; or
189689e0f4d2SKip Macy 	 *  (b) Flipping was requested, but this is unsupported by the backend.
189789e0f4d2SKip Macy 	 */
189889e0f4d2SKip Macy 	np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
189989e0f4d2SKip Macy 				(MODPARM_rx_flip && !feature_rx_flip));
190089e0f4d2SKip Macy 
190189e0f4d2SKip Macy 	/* Recovery procedure: */
19023a6d1fcfSKip Macy 	error = talk_to_backend(np->xbdev, np);
19033a6d1fcfSKip Macy 	if (error)
19043a6d1fcfSKip Macy 		return (error);
190589e0f4d2SKip Macy 
190689e0f4d2SKip Macy 	/* Step 1: Reinitialise variables. */
190789e0f4d2SKip Macy 	netif_release_tx_bufs(np);
190889e0f4d2SKip Macy 
190989e0f4d2SKip Macy 	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
191089e0f4d2SKip Macy 	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
191189e0f4d2SKip Macy 		struct mbuf *m;
19123a6d1fcfSKip Macy 		u_long pfn;
191389e0f4d2SKip Macy 
191489e0f4d2SKip Macy 		if (np->rx_mbufs[i] == NULL)
191589e0f4d2SKip Macy 			continue;
191689e0f4d2SKip Macy 
191789e0f4d2SKip Macy 		m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i);
191889e0f4d2SKip Macy 		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1919*931eeffaSKenneth D. Merry 
192089e0f4d2SKip Macy 		req = RING_GET_REQUEST(&np->rx, requeue_idx);
19213a6d1fcfSKip Macy 		pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
192289e0f4d2SKip Macy 
192389e0f4d2SKip Macy 		if (!np->copying_receiver) {
192489e0f4d2SKip Macy 			gnttab_grant_foreign_transfer_ref(ref,
192523dc5621SKip Macy 			    xenbus_get_otherend_id(np->xbdev),
19263a6d1fcfSKip Macy 			    pfn);
192789e0f4d2SKip Macy 		} else {
192889e0f4d2SKip Macy 			gnttab_grant_foreign_access_ref(ref,
192923dc5621SKip Macy 			    xenbus_get_otherend_id(np->xbdev),
19303a6d1fcfSKip Macy 			    PFNTOMFN(pfn), 0);
193189e0f4d2SKip Macy 		}
193289e0f4d2SKip Macy 		req->gref = ref;
193389e0f4d2SKip Macy 		req->id   = requeue_idx;
193489e0f4d2SKip Macy 
193589e0f4d2SKip Macy 		requeue_idx++;
193689e0f4d2SKip Macy 	}
193789e0f4d2SKip Macy 
193889e0f4d2SKip Macy 	np->rx.req_prod_pvt = requeue_idx;
193989e0f4d2SKip Macy 
194089e0f4d2SKip Macy 	/* Step 3: All public and private state should now be sane.  Get
194189e0f4d2SKip Macy 	 * ready to start sending and receiving packets and give the driver
194289e0f4d2SKip Macy 	 * domain a kick because we've probably just requeued some
194389e0f4d2SKip Macy 	 * packets.
194489e0f4d2SKip Macy 	 */
194589e0f4d2SKip Macy 	netfront_carrier_on(np);
194689e0f4d2SKip Macy 	notify_remote_via_irq(np->irq);
194789e0f4d2SKip Macy 	XN_TX_LOCK(np);
194889e0f4d2SKip Macy 	xn_txeof(np);
194989e0f4d2SKip Macy 	XN_TX_UNLOCK(np);
195089e0f4d2SKip Macy 	network_alloc_rx_buffers(np);
195189e0f4d2SKip Macy 
195289e0f4d2SKip Macy 	return (0);
195389e0f4d2SKip Macy }
195489e0f4d2SKip Macy 
195589e0f4d2SKip Macy static void
195689e0f4d2SKip Macy show_device(struct netfront_info *sc)
195789e0f4d2SKip Macy {
195889e0f4d2SKip Macy #ifdef DEBUG
195989e0f4d2SKip Macy 	if (sc) {
196089e0f4d2SKip Macy 		IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n",
196189e0f4d2SKip Macy 			sc->xn_ifno,
196289e0f4d2SKip Macy 			be_state_name[sc->xn_backend_state],
196389e0f4d2SKip Macy 			sc->xn_user_state ? "open" : "closed",
196489e0f4d2SKip Macy 			sc->xn_evtchn,
196589e0f4d2SKip Macy 			sc->xn_irq,
196689e0f4d2SKip Macy 			sc->xn_tx_if,
196789e0f4d2SKip Macy 			sc->xn_rx_if);
196889e0f4d2SKip Macy 	} else {
196989e0f4d2SKip Macy 		IPRINTK("<vif NULL>\n");
197089e0f4d2SKip Macy 	}
197189e0f4d2SKip Macy #endif
197289e0f4d2SKip Macy }
197389e0f4d2SKip Macy 
197489e0f4d2SKip Macy /** Create a network device.
197589e0f4d2SKip Macy  * @param handle device handle
197689e0f4d2SKip Macy  */
197723dc5621SKip Macy int
197823dc5621SKip Macy create_netdev(device_t dev)
197989e0f4d2SKip Macy {
198089e0f4d2SKip Macy 	int i;
198189e0f4d2SKip Macy 	struct netfront_info *np;
198289e0f4d2SKip Macy 	int err;
198389e0f4d2SKip Macy 	struct ifnet *ifp;
198489e0f4d2SKip Macy 
198523dc5621SKip Macy 	np = device_get_softc(dev);
198689e0f4d2SKip Macy 
198789e0f4d2SKip Macy 	np->xbdev         = dev;
198889e0f4d2SKip Macy 
198989e0f4d2SKip Macy 	XN_LOCK_INIT(np, xennetif);
19900e509842SJustin T. Gibbs 
19910e509842SJustin T. Gibbs 	ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
19920e509842SJustin T. Gibbs 	ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
19930e509842SJustin T. Gibbs 	ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
19940e509842SJustin T. Gibbs 
199589e0f4d2SKip Macy 	np->rx_target     = RX_MIN_TARGET;
199689e0f4d2SKip Macy 	np->rx_min_target = RX_MIN_TARGET;
199789e0f4d2SKip Macy 	np->rx_max_target = RX_MAX_TARGET;
199889e0f4d2SKip Macy 
199989e0f4d2SKip Macy 	/* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
200089e0f4d2SKip Macy 	for (i = 0; i <= NET_TX_RING_SIZE; i++) {
200189e0f4d2SKip Macy 		np->tx_mbufs[i] = (void *) ((u_long) i+1);
200289e0f4d2SKip Macy 		np->grant_tx_ref[i] = GRANT_INVALID_REF;
200389e0f4d2SKip Macy 	}
2004*931eeffaSKenneth D. Merry 	np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0;
2005*931eeffaSKenneth D. Merry 
200689e0f4d2SKip Macy 	for (i = 0; i <= NET_RX_RING_SIZE; i++) {
2007*931eeffaSKenneth D. Merry 
200889e0f4d2SKip Macy 		np->rx_mbufs[i] = NULL;
200989e0f4d2SKip Macy 		np->grant_rx_ref[i] = GRANT_INVALID_REF;
201089e0f4d2SKip Macy 	}
201189e0f4d2SKip Macy 	/* A grant for every tx ring slot */
2012*931eeffaSKenneth D. Merry 	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2013*931eeffaSKenneth D. Merry 					  &np->gref_tx_head) != 0) {
2014227ca257SKip Macy 		IPRINTK("#### netfront can't alloc tx grant refs\n");
201589e0f4d2SKip Macy 		err = ENOMEM;
201689e0f4d2SKip Macy 		goto exit;
201789e0f4d2SKip Macy 	}
201889e0f4d2SKip Macy 	/* A grant for every rx ring slot */
201989e0f4d2SKip Macy 	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
2020*931eeffaSKenneth D. Merry 					  &np->gref_rx_head) != 0) {
2021227ca257SKip Macy 		WPRINTK("#### netfront can't alloc rx grant refs\n");
202289e0f4d2SKip Macy 		gnttab_free_grant_references(np->gref_tx_head);
202389e0f4d2SKip Macy 		err = ENOMEM;
202489e0f4d2SKip Macy 		goto exit;
202589e0f4d2SKip Macy 	}
202689e0f4d2SKip Macy 
202789e0f4d2SKip Macy 	err = xen_net_read_mac(dev, np->mac);
202889e0f4d2SKip Macy 	if (err) {
202923dc5621SKip Macy 		xenbus_dev_fatal(dev, err, "parsing %s/mac",
203023dc5621SKip Macy 		    xenbus_get_node(dev));
203189e0f4d2SKip Macy 		goto out;
203289e0f4d2SKip Macy 	}
203389e0f4d2SKip Macy 
203489e0f4d2SKip Macy 	/* Set up ifnet structure */
203523dc5621SKip Macy 	ifp = np->xn_ifp = if_alloc(IFT_ETHER);
203689e0f4d2SKip Macy     	ifp->if_softc = np;
203723dc5621SKip Macy     	if_initname(ifp, "xn",  device_get_unit(dev));
20383a6d1fcfSKip Macy     	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
203989e0f4d2SKip Macy     	ifp->if_ioctl = xn_ioctl;
204089e0f4d2SKip Macy     	ifp->if_output = ether_output;
204189e0f4d2SKip Macy     	ifp->if_start = xn_start;
2042227ca257SKip Macy #ifdef notyet
2043227ca257SKip Macy     	ifp->if_watchdog = xn_watchdog;
2044227ca257SKip Macy #endif
204589e0f4d2SKip Macy     	ifp->if_init = xn_ifinit;
204689e0f4d2SKip Macy     	ifp->if_mtu = ETHERMTU;
204789e0f4d2SKip Macy     	ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1;
204889e0f4d2SKip Macy 
204989e0f4d2SKip Macy     	ifp->if_hwassist = XN_CSUM_FEATURES;
205089e0f4d2SKip Macy     	ifp->if_capabilities = IFCAP_HWCSUM;
205112678024SDoug Rabson #if __FreeBSD_version >= 700000
2052*931eeffaSKenneth D. Merry 	ifp->if_capabilities |= IFCAP_TSO4;
205312678024SDoug Rabson 	if (xn_enable_lro) {
205412678024SDoug Rabson 		int err = tcp_lro_init(&np->xn_lro);
205512678024SDoug Rabson 		if (err) {
205612678024SDoug Rabson 			device_printf(dev, "LRO initialization failed\n");
205712678024SDoug Rabson 			goto exit;
205812678024SDoug Rabson 		}
205912678024SDoug Rabson 		np->xn_lro.ifp = ifp;
206012678024SDoug Rabson 		ifp->if_capabilities |= IFCAP_LRO;
206112678024SDoug Rabson 	}
206289e0f4d2SKip Macy #endif
206312678024SDoug Rabson     	ifp->if_capenable = ifp->if_capabilities;
206489e0f4d2SKip Macy 
206589e0f4d2SKip Macy     	ether_ifattach(ifp, np->mac);
206689e0f4d2SKip Macy     	callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE);
206789e0f4d2SKip Macy 	netfront_carrier_off(np);
206889e0f4d2SKip Macy 
206989e0f4d2SKip Macy 	return (0);
207089e0f4d2SKip Macy 
207189e0f4d2SKip Macy exit:
207289e0f4d2SKip Macy 	gnttab_free_grant_references(np->gref_tx_head);
207389e0f4d2SKip Macy out:
207489e0f4d2SKip Macy 	panic("do something smart");
207589e0f4d2SKip Macy 
207689e0f4d2SKip Macy }
207789e0f4d2SKip Macy 
207889e0f4d2SKip Macy /**
207989e0f4d2SKip Macy  * Handle the change of state of the backend to Closing.  We must delete our
208089e0f4d2SKip Macy  * device-layer structures now, to ensure that writes are flushed through to
208189e0f4d2SKip Macy  * the backend.  Once is this done, we can switch to Closed in
208289e0f4d2SKip Macy  * acknowledgement.
208389e0f4d2SKip Macy  */
208489e0f4d2SKip Macy #if 0
20850e509842SJustin T. Gibbs static void
20860e509842SJustin T. Gibbs netfront_closing(device_t dev)
208789e0f4d2SKip Macy {
208889e0f4d2SKip Macy #if 0
208989e0f4d2SKip Macy 	struct netfront_info *info = dev->dev_driver_data;
209089e0f4d2SKip Macy 
209189e0f4d2SKip Macy 	DPRINTK("netfront_closing: %s removed\n", dev->nodename);
209289e0f4d2SKip Macy 
209389e0f4d2SKip Macy 	close_netdev(info);
209489e0f4d2SKip Macy #endif
209589e0f4d2SKip Macy 	xenbus_switch_state(dev, XenbusStateClosed);
209689e0f4d2SKip Macy }
209789e0f4d2SKip Macy #endif
209889e0f4d2SKip Macy 
20990e509842SJustin T. Gibbs static int
21000e509842SJustin T. Gibbs netfront_detach(device_t dev)
210189e0f4d2SKip Macy {
210223dc5621SKip Macy 	struct netfront_info *info = device_get_softc(dev);
210389e0f4d2SKip Macy 
210423dc5621SKip Macy 	DPRINTK("%s\n", xenbus_get_node(dev));
210589e0f4d2SKip Macy 
210689e0f4d2SKip Macy 	netif_free(info);
210789e0f4d2SKip Macy 
210889e0f4d2SKip Macy 	return 0;
210989e0f4d2SKip Macy }
211089e0f4d2SKip Macy 
21110e509842SJustin T. Gibbs static void
21120e509842SJustin T. Gibbs netif_free(struct netfront_info *info)
211389e0f4d2SKip Macy {
211489e0f4d2SKip Macy 	netif_disconnect_backend(info);
211589e0f4d2SKip Macy #if 0
211689e0f4d2SKip Macy 	close_netdev(info);
211789e0f4d2SKip Macy #endif
211889e0f4d2SKip Macy }
211989e0f4d2SKip Macy 
21200e509842SJustin T. Gibbs static void
21210e509842SJustin T. Gibbs netif_disconnect_backend(struct netfront_info *info)
212289e0f4d2SKip Macy {
21233a6d1fcfSKip Macy 	XN_RX_LOCK(info);
21243a6d1fcfSKip Macy 	XN_TX_LOCK(info);
21253a6d1fcfSKip Macy 	netfront_carrier_off(info);
21263a6d1fcfSKip Macy 	XN_TX_UNLOCK(info);
21273a6d1fcfSKip Macy 	XN_RX_UNLOCK(info);
21283a6d1fcfSKip Macy 
212989e0f4d2SKip Macy 	end_access(info->tx_ring_ref, info->tx.sring);
213089e0f4d2SKip Macy 	end_access(info->rx_ring_ref, info->rx.sring);
213189e0f4d2SKip Macy 	info->tx_ring_ref = GRANT_INVALID_REF;
213289e0f4d2SKip Macy 	info->rx_ring_ref = GRANT_INVALID_REF;
213389e0f4d2SKip Macy 	info->tx.sring = NULL;
213489e0f4d2SKip Macy 	info->rx.sring = NULL;
213589e0f4d2SKip Macy 
213689e0f4d2SKip Macy 	if (info->irq)
21373a6d1fcfSKip Macy 		unbind_from_irqhandler(info->irq);
21383a6d1fcfSKip Macy 
213989e0f4d2SKip Macy 	info->irq = 0;
214089e0f4d2SKip Macy }
214189e0f4d2SKip Macy 
214289e0f4d2SKip Macy 
21430e509842SJustin T. Gibbs static void
21440e509842SJustin T. Gibbs end_access(int ref, void *page)
214589e0f4d2SKip Macy {
214689e0f4d2SKip Macy 	if (ref != GRANT_INVALID_REF)
2147920ba15bSKip Macy 		gnttab_end_foreign_access(ref, page);
214889e0f4d2SKip Macy }
214989e0f4d2SKip Macy 
21500e509842SJustin T. Gibbs static int
21510e509842SJustin T. Gibbs xn_ifmedia_upd(struct ifnet *ifp)
21520e509842SJustin T. Gibbs {
21530e509842SJustin T. Gibbs 	return (0);
21540e509842SJustin T. Gibbs }
21550e509842SJustin T. Gibbs 
21560e509842SJustin T. Gibbs static void
21570e509842SJustin T. Gibbs xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
21580e509842SJustin T. Gibbs {
21590e509842SJustin T. Gibbs 	ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
21600e509842SJustin T. Gibbs 	ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
21610e509842SJustin T. Gibbs }
21620e509842SJustin T. Gibbs 
216389e0f4d2SKip Macy /* ** Driver registration ** */
216423dc5621SKip Macy static device_method_t netfront_methods[] = {
216523dc5621SKip Macy 	/* Device interface */
216623dc5621SKip Macy 	DEVMETHOD(device_probe,         netfront_probe),
216723dc5621SKip Macy 	DEVMETHOD(device_attach,        netfront_attach),
216823dc5621SKip Macy 	DEVMETHOD(device_detach,        netfront_detach),
216923dc5621SKip Macy 	DEVMETHOD(device_shutdown,      bus_generic_shutdown),
217023dc5621SKip Macy 	DEVMETHOD(device_suspend,       bus_generic_suspend),
217123dc5621SKip Macy 	DEVMETHOD(device_resume,        netfront_resume),
217289e0f4d2SKip Macy 
217323dc5621SKip Macy 	/* Xenbus interface */
217423dc5621SKip Macy 	DEVMETHOD(xenbus_backend_changed, netfront_backend_changed),
217589e0f4d2SKip Macy 
217623dc5621SKip Macy 	{ 0, 0 }
217789e0f4d2SKip Macy };
217889e0f4d2SKip Macy 
217923dc5621SKip Macy static driver_t netfront_driver = {
218023dc5621SKip Macy 	"xn",
218123dc5621SKip Macy 	netfront_methods,
218223dc5621SKip Macy 	sizeof(struct netfront_info),
218389e0f4d2SKip Macy };
218423dc5621SKip Macy devclass_t netfront_devclass;
218589e0f4d2SKip Macy 
218623dc5621SKip Macy DRIVER_MODULE(xe, xenbus, netfront_driver, netfront_devclass, 0, 0);
2187