189e0f4d2SKip Macy /* 289e0f4d2SKip Macy * 389e0f4d2SKip Macy * Copyright (c) 2004-2006 Kip Macy 489e0f4d2SKip Macy * All rights reserved. 589e0f4d2SKip Macy * 689e0f4d2SKip Macy * 789e0f4d2SKip Macy * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 889e0f4d2SKip Macy * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 989e0f4d2SKip Macy * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1089e0f4d2SKip Macy * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1189e0f4d2SKip Macy * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 1289e0f4d2SKip Macy * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 1389e0f4d2SKip Macy * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 1489e0f4d2SKip Macy * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 1589e0f4d2SKip Macy * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 1689e0f4d2SKip Macy * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 1789e0f4d2SKip Macy */ 1889e0f4d2SKip Macy 1989e0f4d2SKip Macy 2089e0f4d2SKip Macy #include <sys/cdefs.h> 2189e0f4d2SKip Macy __FBSDID("$FreeBSD$"); 2289e0f4d2SKip Macy 2389e0f4d2SKip Macy #include <sys/param.h> 2489e0f4d2SKip Macy #include <sys/systm.h> 2589e0f4d2SKip Macy #include <sys/sockio.h> 2689e0f4d2SKip Macy #include <sys/mbuf.h> 2789e0f4d2SKip Macy #include <sys/malloc.h> 2823dc5621SKip Macy #include <sys/module.h> 2989e0f4d2SKip Macy #include <sys/kernel.h> 3089e0f4d2SKip Macy #include <sys/socket.h> 3112678024SDoug Rabson #include <sys/sysctl.h> 3289e0f4d2SKip Macy #include <sys/queue.h> 338cb07992SAdrian Chadd #include <sys/lock.h> 3489e0f4d2SKip Macy #include <sys/sx.h> 3589e0f4d2SKip Macy 3689e0f4d2SKip Macy #include <net/if.h> 3789e0f4d2SKip Macy #include <net/if_arp.h> 3889e0f4d2SKip Macy #include <net/ethernet.h> 3989e0f4d2SKip Macy #include <net/if_dl.h> 4089e0f4d2SKip Macy #include <net/if_media.h> 4189e0f4d2SKip Macy 4289e0f4d2SKip Macy #include <net/bpf.h> 4389e0f4d2SKip Macy 4489e0f4d2SKip Macy #include <net/if_types.h> 4589e0f4d2SKip Macy #include <net/if.h> 4689e0f4d2SKip Macy 4789e0f4d2SKip Macy #include <netinet/in_systm.h> 4889e0f4d2SKip Macy #include <netinet/in.h> 4989e0f4d2SKip Macy #include <netinet/ip.h> 5089e0f4d2SKip Macy #include <netinet/if_ether.h> 5112678024SDoug Rabson #if __FreeBSD_version >= 700000 5212678024SDoug Rabson #include <netinet/tcp.h> 5312678024SDoug Rabson #include <netinet/tcp_lro.h> 5412678024SDoug Rabson #endif 5589e0f4d2SKip Macy 5689e0f4d2SKip Macy #include <vm/vm.h> 5789e0f4d2SKip Macy #include <vm/pmap.h> 5889e0f4d2SKip Macy 5989e0f4d2SKip Macy #include <machine/clock.h> /* for DELAY */ 6089e0f4d2SKip Macy #include <machine/bus.h> 6189e0f4d2SKip Macy #include <machine/resource.h> 6289e0f4d2SKip Macy #include <machine/frame.h> 63980c7178SKip Macy #include <machine/vmparam.h> 6489e0f4d2SKip Macy 6589e0f4d2SKip Macy #include <sys/bus.h> 6689e0f4d2SKip Macy #include <sys/rman.h> 6789e0f4d2SKip Macy 6889e0f4d2SKip Macy #include <machine/intr_machdep.h> 6989e0f4d2SKip Macy 7089e0f4d2SKip Macy #include <machine/xen/xen-os.h> 7112678024SDoug Rabson #include <machine/xen/xenfunc.h> 723a6d1fcfSKip Macy #include <xen/hypervisor.h> 733a6d1fcfSKip Macy #include <xen/xen_intr.h> 743a6d1fcfSKip Macy #include <xen/evtchn.h> 7589e0f4d2SKip Macy #include <xen/gnttab.h> 7689e0f4d2SKip Macy #include <xen/interface/memory.h> 7789e0f4d2SKip Macy #include <xen/interface/io/netif.h> 7823dc5621SKip Macy #include <xen/xenbus/xenbusvar.h> 7989e0f4d2SKip Macy 8012678024SDoug Rabson #include <dev/xen/netfront/mbufq.h> 8112678024SDoug Rabson 8223dc5621SKip Macy #include "xenbus_if.h" 8389e0f4d2SKip Macy 8412678024SDoug Rabson #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP | CSUM_TSO) 8512678024SDoug Rabson 8689e0f4d2SKip Macy #define GRANT_INVALID_REF 0 8789e0f4d2SKip Macy 8889e0f4d2SKip Macy #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 8989e0f4d2SKip Macy #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 9089e0f4d2SKip Macy 9112678024SDoug Rabson #if __FreeBSD_version >= 700000 9212678024SDoug Rabson /* 9312678024SDoug Rabson * Should the driver do LRO on the RX end 9412678024SDoug Rabson * this can be toggled on the fly, but the 9512678024SDoug Rabson * interface must be reset (down/up) for it 9612678024SDoug Rabson * to take effect. 9712678024SDoug Rabson */ 9812678024SDoug Rabson static int xn_enable_lro = 1; 9912678024SDoug Rabson TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro); 10012678024SDoug Rabson #else 10112678024SDoug Rabson 10212678024SDoug Rabson #define IFCAP_TSO4 0 10312678024SDoug Rabson #define CSUM_TSO 0 10412678024SDoug Rabson 10512678024SDoug Rabson #endif 10612678024SDoug Rabson 10789e0f4d2SKip Macy #ifdef CONFIG_XEN 10889e0f4d2SKip Macy static int MODPARM_rx_copy = 0; 10989e0f4d2SKip Macy module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); 11089e0f4d2SKip Macy MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); 11189e0f4d2SKip Macy static int MODPARM_rx_flip = 0; 11289e0f4d2SKip Macy module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); 11389e0f4d2SKip Macy MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); 11489e0f4d2SKip Macy #else 11589e0f4d2SKip Macy static const int MODPARM_rx_copy = 1; 11689e0f4d2SKip Macy static const int MODPARM_rx_flip = 0; 11789e0f4d2SKip Macy #endif 11889e0f4d2SKip Macy 11912678024SDoug Rabson #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) 12089e0f4d2SKip Macy #define RX_COPY_THRESHOLD 256 12189e0f4d2SKip Macy 12289e0f4d2SKip Macy #define net_ratelimit() 0 12389e0f4d2SKip Macy 12489e0f4d2SKip Macy struct netfront_info; 12589e0f4d2SKip Macy struct netfront_rx_info; 12689e0f4d2SKip Macy 12789e0f4d2SKip Macy static void xn_txeof(struct netfront_info *); 12889e0f4d2SKip Macy static void xn_rxeof(struct netfront_info *); 12989e0f4d2SKip Macy static void network_alloc_rx_buffers(struct netfront_info *); 13089e0f4d2SKip Macy 13189e0f4d2SKip Macy static void xn_tick_locked(struct netfront_info *); 13289e0f4d2SKip Macy static void xn_tick(void *); 13389e0f4d2SKip Macy 13489e0f4d2SKip Macy static void xn_intr(void *); 13589e0f4d2SKip Macy static void xn_start_locked(struct ifnet *); 13689e0f4d2SKip Macy static void xn_start(struct ifnet *); 13789e0f4d2SKip Macy static int xn_ioctl(struct ifnet *, u_long, caddr_t); 13889e0f4d2SKip Macy static void xn_ifinit_locked(struct netfront_info *); 13989e0f4d2SKip Macy static void xn_ifinit(void *); 14089e0f4d2SKip Macy static void xn_stop(struct netfront_info *); 14189e0f4d2SKip Macy #ifdef notyet 14289e0f4d2SKip Macy static void xn_watchdog(struct ifnet *); 14389e0f4d2SKip Macy #endif 14489e0f4d2SKip Macy 14589e0f4d2SKip Macy static void show_device(struct netfront_info *sc); 14689e0f4d2SKip Macy #ifdef notyet 14723dc5621SKip Macy static void netfront_closing(device_t dev); 14889e0f4d2SKip Macy #endif 14989e0f4d2SKip Macy static void netif_free(struct netfront_info *info); 15023dc5621SKip Macy static int netfront_detach(device_t dev); 15189e0f4d2SKip Macy 15223dc5621SKip Macy static int talk_to_backend(device_t dev, struct netfront_info *info); 15323dc5621SKip Macy static int create_netdev(device_t dev); 15489e0f4d2SKip Macy static void netif_disconnect_backend(struct netfront_info *info); 15523dc5621SKip Macy static int setup_device(device_t dev, struct netfront_info *info); 15689e0f4d2SKip Macy static void end_access(int ref, void *page); 15789e0f4d2SKip Macy 15889e0f4d2SKip Macy /* Xenolinux helper functions */ 15923dc5621SKip Macy int network_connect(struct netfront_info *); 16089e0f4d2SKip Macy 16189e0f4d2SKip Macy static void xn_free_rx_ring(struct netfront_info *); 16289e0f4d2SKip Macy 16389e0f4d2SKip Macy static void xn_free_tx_ring(struct netfront_info *); 16489e0f4d2SKip Macy 16589e0f4d2SKip Macy static int xennet_get_responses(struct netfront_info *np, 16683b92f6eSKip Macy struct netfront_rx_info *rinfo, RING_IDX rp, struct mbuf **list, 16789e0f4d2SKip Macy int *pages_flipped_p); 16889e0f4d2SKip Macy 16989e0f4d2SKip Macy #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT) 17089e0f4d2SKip Macy 17189e0f4d2SKip Macy #define INVALID_P2M_ENTRY (~0UL) 17289e0f4d2SKip Macy 17389e0f4d2SKip Macy /* 17489e0f4d2SKip Macy * Mbuf pointers. We need these to keep track of the virtual addresses 17589e0f4d2SKip Macy * of our mbuf chains since we can only convert from virtual to physical, 17689e0f4d2SKip Macy * not the other way around. The size must track the free index arrays. 17789e0f4d2SKip Macy */ 17889e0f4d2SKip Macy struct xn_chain_data { 17989e0f4d2SKip Macy struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1]; 180a4ec37f5SAdrian Chadd int xn_tx_chain_cnt; 18189e0f4d2SKip Macy struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1]; 18289e0f4d2SKip Macy }; 18389e0f4d2SKip Macy 18489e0f4d2SKip Macy 18589e0f4d2SKip Macy struct net_device_stats 18689e0f4d2SKip Macy { 18789e0f4d2SKip Macy u_long rx_packets; /* total packets received */ 18889e0f4d2SKip Macy u_long tx_packets; /* total packets transmitted */ 18989e0f4d2SKip Macy u_long rx_bytes; /* total bytes received */ 19089e0f4d2SKip Macy u_long tx_bytes; /* total bytes transmitted */ 19189e0f4d2SKip Macy u_long rx_errors; /* bad packets received */ 19289e0f4d2SKip Macy u_long tx_errors; /* packet transmit problems */ 19389e0f4d2SKip Macy u_long rx_dropped; /* no space in linux buffers */ 19489e0f4d2SKip Macy u_long tx_dropped; /* no space available in linux */ 19589e0f4d2SKip Macy u_long multicast; /* multicast packets received */ 19689e0f4d2SKip Macy u_long collisions; 19789e0f4d2SKip Macy 19889e0f4d2SKip Macy /* detailed rx_errors: */ 19989e0f4d2SKip Macy u_long rx_length_errors; 20089e0f4d2SKip Macy u_long rx_over_errors; /* receiver ring buff overflow */ 20189e0f4d2SKip Macy u_long rx_crc_errors; /* recved pkt with crc error */ 20289e0f4d2SKip Macy u_long rx_frame_errors; /* recv'd frame alignment error */ 20389e0f4d2SKip Macy u_long rx_fifo_errors; /* recv'r fifo overrun */ 20489e0f4d2SKip Macy u_long rx_missed_errors; /* receiver missed packet */ 20589e0f4d2SKip Macy 20689e0f4d2SKip Macy /* detailed tx_errors */ 20789e0f4d2SKip Macy u_long tx_aborted_errors; 20889e0f4d2SKip Macy u_long tx_carrier_errors; 20989e0f4d2SKip Macy u_long tx_fifo_errors; 21089e0f4d2SKip Macy u_long tx_heartbeat_errors; 21189e0f4d2SKip Macy u_long tx_window_errors; 21289e0f4d2SKip Macy 21389e0f4d2SKip Macy /* for cslip etc */ 21489e0f4d2SKip Macy u_long rx_compressed; 21589e0f4d2SKip Macy u_long tx_compressed; 21689e0f4d2SKip Macy }; 21789e0f4d2SKip Macy 21889e0f4d2SKip Macy struct netfront_info { 21989e0f4d2SKip Macy 22089e0f4d2SKip Macy struct ifnet *xn_ifp; 22112678024SDoug Rabson #if __FreeBSD_version >= 700000 22212678024SDoug Rabson struct lro_ctrl xn_lro; 22312678024SDoug Rabson #endif 22489e0f4d2SKip Macy 22589e0f4d2SKip Macy struct net_device_stats stats; 22689e0f4d2SKip Macy u_int tx_full; 22789e0f4d2SKip Macy 22889e0f4d2SKip Macy netif_tx_front_ring_t tx; 22989e0f4d2SKip Macy netif_rx_front_ring_t rx; 23089e0f4d2SKip Macy 23189e0f4d2SKip Macy struct mtx tx_lock; 23289e0f4d2SKip Macy struct mtx rx_lock; 23389e0f4d2SKip Macy struct sx sc_lock; 23489e0f4d2SKip Macy 23589e0f4d2SKip Macy u_int handle; 23689e0f4d2SKip Macy u_int irq; 23789e0f4d2SKip Macy u_int copying_receiver; 23889e0f4d2SKip Macy u_int carrier; 23989e0f4d2SKip Macy 24089e0f4d2SKip Macy /* Receive-ring batched refills. */ 24189e0f4d2SKip Macy #define RX_MIN_TARGET 32 24289e0f4d2SKip Macy #define RX_MAX_TARGET NET_RX_RING_SIZE 24389e0f4d2SKip Macy int rx_min_target, rx_max_target, rx_target; 24489e0f4d2SKip Macy 24589e0f4d2SKip Macy /* 24689e0f4d2SKip Macy * {tx,rx}_skbs store outstanding skbuffs. The first entry in each 24789e0f4d2SKip Macy * array is an index into a chain of free entries. 24889e0f4d2SKip Macy */ 24989e0f4d2SKip Macy 25089e0f4d2SKip Macy grant_ref_t gref_tx_head; 25189e0f4d2SKip Macy grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; 25289e0f4d2SKip Macy grant_ref_t gref_rx_head; 25389e0f4d2SKip Macy grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1]; 25489e0f4d2SKip Macy 25589e0f4d2SKip Macy #define TX_MAX_TARGET min(NET_RX_RING_SIZE, 256) 25623dc5621SKip Macy device_t xbdev; 25789e0f4d2SKip Macy int tx_ring_ref; 25889e0f4d2SKip Macy int rx_ring_ref; 25989e0f4d2SKip Macy uint8_t mac[ETHER_ADDR_LEN]; 26089e0f4d2SKip Macy struct xn_chain_data xn_cdata; /* mbufs */ 26189e0f4d2SKip Macy struct mbuf_head xn_rx_batch; /* head of the batch queue */ 26289e0f4d2SKip Macy 26389e0f4d2SKip Macy int xn_if_flags; 26489e0f4d2SKip Macy struct callout xn_stat_ch; 26589e0f4d2SKip Macy 26689e0f4d2SKip Macy u_long rx_pfn_array[NET_RX_RING_SIZE]; 26789e0f4d2SKip Macy multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; 26889e0f4d2SKip Macy mmu_update_t rx_mmu[NET_RX_RING_SIZE]; 26989e0f4d2SKip Macy }; 27089e0f4d2SKip Macy 27189e0f4d2SKip Macy #define rx_mbufs xn_cdata.xn_rx_chain 27289e0f4d2SKip Macy #define tx_mbufs xn_cdata.xn_tx_chain 27389e0f4d2SKip Macy 27489e0f4d2SKip Macy #define XN_LOCK_INIT(_sc, _name) \ 27589e0f4d2SKip Macy mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \ 27689e0f4d2SKip Macy mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF); \ 27789e0f4d2SKip Macy sx_init(&(_sc)->sc_lock, #_name"_rx") 27889e0f4d2SKip Macy 27989e0f4d2SKip Macy #define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock) 28089e0f4d2SKip Macy #define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock) 28189e0f4d2SKip Macy 28289e0f4d2SKip Macy #define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock) 28389e0f4d2SKip Macy #define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock) 28489e0f4d2SKip Macy 28589e0f4d2SKip Macy #define XN_LOCK(_sc) sx_xlock(&(_sc)->sc_lock); 28689e0f4d2SKip Macy #define XN_UNLOCK(_sc) sx_xunlock(&(_sc)->sc_lock); 28789e0f4d2SKip Macy 28889e0f4d2SKip Macy #define XN_LOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_lock, SX_LOCKED); 28989e0f4d2SKip Macy #define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED); 29089e0f4d2SKip Macy #define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED); 29189e0f4d2SKip Macy #define XN_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_lock); \ 29289e0f4d2SKip Macy mtx_destroy(&(_sc)->tx_lock); \ 29389e0f4d2SKip Macy sx_destroy(&(_sc)->sc_lock); 29489e0f4d2SKip Macy 29589e0f4d2SKip Macy struct netfront_rx_info { 29689e0f4d2SKip Macy struct netif_rx_response rx; 29789e0f4d2SKip Macy struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 29889e0f4d2SKip Macy }; 29989e0f4d2SKip Macy 30089e0f4d2SKip Macy #define netfront_carrier_on(netif) ((netif)->carrier = 1) 30189e0f4d2SKip Macy #define netfront_carrier_off(netif) ((netif)->carrier = 0) 30289e0f4d2SKip Macy #define netfront_carrier_ok(netif) ((netif)->carrier) 30389e0f4d2SKip Macy 30489e0f4d2SKip Macy /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */ 30589e0f4d2SKip Macy 30689e0f4d2SKip Macy 30789e0f4d2SKip Macy 30889e0f4d2SKip Macy /* 30989e0f4d2SKip Macy * Access macros for acquiring freeing slots in tx_skbs[]. 31089e0f4d2SKip Macy */ 31189e0f4d2SKip Macy 31289e0f4d2SKip Macy static inline void 31389e0f4d2SKip Macy add_id_to_freelist(struct mbuf **list, unsigned short id) 31489e0f4d2SKip Macy { 3150e6993e4SAdrian Chadd KASSERT(id != 0, ("add_id_to_freelist: the head item (0) must always be free.")); 31689e0f4d2SKip Macy list[id] = list[0]; 31789e0f4d2SKip Macy list[0] = (void *)(u_long)id; 31889e0f4d2SKip Macy } 31989e0f4d2SKip Macy 32089e0f4d2SKip Macy static inline unsigned short 32189e0f4d2SKip Macy get_id_from_freelist(struct mbuf **list) 32289e0f4d2SKip Macy { 32389e0f4d2SKip Macy u_int id = (u_int)(u_long)list[0]; 3240e6993e4SAdrian Chadd KASSERT(id != 0, ("get_id_from_freelist: the head item (0) must always remain free.")); 32589e0f4d2SKip Macy list[0] = list[id]; 32689e0f4d2SKip Macy return (id); 32789e0f4d2SKip Macy } 32889e0f4d2SKip Macy 32989e0f4d2SKip Macy static inline int 33089e0f4d2SKip Macy xennet_rxidx(RING_IDX idx) 33189e0f4d2SKip Macy { 33289e0f4d2SKip Macy return idx & (NET_RX_RING_SIZE - 1); 33389e0f4d2SKip Macy } 33489e0f4d2SKip Macy 33589e0f4d2SKip Macy static inline struct mbuf * 33689e0f4d2SKip Macy xennet_get_rx_mbuf(struct netfront_info *np, 33789e0f4d2SKip Macy RING_IDX ri) 33889e0f4d2SKip Macy { 33989e0f4d2SKip Macy int i = xennet_rxidx(ri); 34089e0f4d2SKip Macy struct mbuf *m; 34189e0f4d2SKip Macy 34289e0f4d2SKip Macy m = np->rx_mbufs[i]; 34389e0f4d2SKip Macy np->rx_mbufs[i] = NULL; 34489e0f4d2SKip Macy return (m); 34589e0f4d2SKip Macy } 34689e0f4d2SKip Macy 34789e0f4d2SKip Macy static inline grant_ref_t 34889e0f4d2SKip Macy xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) 34989e0f4d2SKip Macy { 35089e0f4d2SKip Macy int i = xennet_rxidx(ri); 35189e0f4d2SKip Macy grant_ref_t ref = np->grant_rx_ref[i]; 35289e0f4d2SKip Macy np->grant_rx_ref[i] = GRANT_INVALID_REF; 35389e0f4d2SKip Macy return ref; 35489e0f4d2SKip Macy } 35589e0f4d2SKip Macy 35689e0f4d2SKip Macy #define IPRINTK(fmt, args...) \ 35789e0f4d2SKip Macy printf("[XEN] " fmt, ##args) 35889e0f4d2SKip Macy #define WPRINTK(fmt, args...) \ 35989e0f4d2SKip Macy printf("[XEN] " fmt, ##args) 36012678024SDoug Rabson #if 0 36189e0f4d2SKip Macy #define DPRINTK(fmt, args...) \ 36223dc5621SKip Macy printf("[XEN] %s: " fmt, __func__, ##args) 36312678024SDoug Rabson #else 36412678024SDoug Rabson #define DPRINTK(fmt, args...) 36512678024SDoug Rabson #endif 36689e0f4d2SKip Macy 36789e0f4d2SKip Macy /** 36889e0f4d2SKip Macy * Read the 'mac' node at the given device's node in the store, and parse that 36989e0f4d2SKip Macy * as colon-separated octets, placing result the given mac array. mac must be 37089e0f4d2SKip Macy * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h). 37189e0f4d2SKip Macy * Return 0 on success, or errno on error. 37289e0f4d2SKip Macy */ 37389e0f4d2SKip Macy static int 37423dc5621SKip Macy xen_net_read_mac(device_t dev, uint8_t mac[]) 37589e0f4d2SKip Macy { 3763a6d1fcfSKip Macy int error, i; 3773a6d1fcfSKip Macy char *s, *e, *macstr; 3783a6d1fcfSKip Macy 3793a6d1fcfSKip Macy error = xenbus_read(XBT_NIL, xenbus_get_node(dev), "mac", NULL, 3803a6d1fcfSKip Macy (void **) &macstr); 3813a6d1fcfSKip Macy if (error) 3823a6d1fcfSKip Macy return (error); 3833a6d1fcfSKip Macy 38489e0f4d2SKip Macy s = macstr; 38589e0f4d2SKip Macy for (i = 0; i < ETHER_ADDR_LEN; i++) { 38689e0f4d2SKip Macy mac[i] = strtoul(s, &e, 16); 38789e0f4d2SKip Macy if (s == e || (e[0] != ':' && e[0] != 0)) { 38889e0f4d2SKip Macy free(macstr, M_DEVBUF); 3893a6d1fcfSKip Macy return (ENOENT); 39089e0f4d2SKip Macy } 39189e0f4d2SKip Macy s = &e[1]; 39289e0f4d2SKip Macy } 39389e0f4d2SKip Macy free(macstr, M_DEVBUF); 3943a6d1fcfSKip Macy return (0); 39589e0f4d2SKip Macy } 39689e0f4d2SKip Macy 39789e0f4d2SKip Macy /** 39889e0f4d2SKip Macy * Entry point to this code when a new device is created. Allocate the basic 39989e0f4d2SKip Macy * structures and the ring buffers for communication with the backend, and 40089e0f4d2SKip Macy * inform the backend of the appropriate details for those. Switch to 40189e0f4d2SKip Macy * Connected state. 40289e0f4d2SKip Macy */ 40389e0f4d2SKip Macy static int 40423dc5621SKip Macy netfront_probe(device_t dev) 40523dc5621SKip Macy { 40623dc5621SKip Macy 40723dc5621SKip Macy if (!strcmp(xenbus_get_type(dev), "vif")) { 40823dc5621SKip Macy device_set_desc(dev, "Virtual Network Interface"); 40923dc5621SKip Macy return (0); 41023dc5621SKip Macy } 41123dc5621SKip Macy 41223dc5621SKip Macy return (ENXIO); 41323dc5621SKip Macy } 41423dc5621SKip Macy 41523dc5621SKip Macy static int 41623dc5621SKip Macy netfront_attach(device_t dev) 41789e0f4d2SKip Macy { 41889e0f4d2SKip Macy int err; 41989e0f4d2SKip Macy 42023dc5621SKip Macy err = create_netdev(dev); 42189e0f4d2SKip Macy if (err) { 42289e0f4d2SKip Macy xenbus_dev_fatal(dev, err, "creating netdev"); 42389e0f4d2SKip Macy return err; 42489e0f4d2SKip Macy } 42589e0f4d2SKip Macy 42612678024SDoug Rabson #if __FreeBSD_version >= 700000 42712678024SDoug Rabson SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 42812678024SDoug Rabson SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 42912678024SDoug Rabson OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW, 43012678024SDoug Rabson &xn_enable_lro, 0, "Large Receive Offload"); 43112678024SDoug Rabson #endif 43212678024SDoug Rabson 43389e0f4d2SKip Macy return 0; 43489e0f4d2SKip Macy } 43589e0f4d2SKip Macy 43689e0f4d2SKip Macy 43789e0f4d2SKip Macy /** 43889e0f4d2SKip Macy * We are reconnecting to the backend, due to a suspend/resume, or a backend 43989e0f4d2SKip Macy * driver restart. We tear down our netif structure and recreate it, but 44089e0f4d2SKip Macy * leave the device-layer structures intact so that this is transparent to the 44189e0f4d2SKip Macy * rest of the kernel. 44289e0f4d2SKip Macy */ 44389e0f4d2SKip Macy static int 44423dc5621SKip Macy netfront_resume(device_t dev) 44589e0f4d2SKip Macy { 44623dc5621SKip Macy struct netfront_info *info = device_get_softc(dev); 44789e0f4d2SKip Macy 44889e0f4d2SKip Macy netif_disconnect_backend(info); 44989e0f4d2SKip Macy return (0); 45089e0f4d2SKip Macy } 45189e0f4d2SKip Macy 45289e0f4d2SKip Macy 45389e0f4d2SKip Macy /* Common code used when first setting up, and when resuming. */ 45489e0f4d2SKip Macy static int 45523dc5621SKip Macy talk_to_backend(device_t dev, struct netfront_info *info) 45689e0f4d2SKip Macy { 45789e0f4d2SKip Macy const char *message; 45889e0f4d2SKip Macy struct xenbus_transaction xbt; 45923dc5621SKip Macy const char *node = xenbus_get_node(dev); 46089e0f4d2SKip Macy int err; 46189e0f4d2SKip Macy 46289e0f4d2SKip Macy err = xen_net_read_mac(dev, info->mac); 46389e0f4d2SKip Macy if (err) { 46423dc5621SKip Macy xenbus_dev_fatal(dev, err, "parsing %s/mac", node); 46589e0f4d2SKip Macy goto out; 46689e0f4d2SKip Macy } 46789e0f4d2SKip Macy 46889e0f4d2SKip Macy /* Create shared ring, alloc event channel. */ 46989e0f4d2SKip Macy err = setup_device(dev, info); 47089e0f4d2SKip Macy if (err) 47189e0f4d2SKip Macy goto out; 47289e0f4d2SKip Macy 47389e0f4d2SKip Macy again: 47489e0f4d2SKip Macy err = xenbus_transaction_start(&xbt); 47589e0f4d2SKip Macy if (err) { 47689e0f4d2SKip Macy xenbus_dev_fatal(dev, err, "starting transaction"); 47789e0f4d2SKip Macy goto destroy_ring; 47889e0f4d2SKip Macy } 47923dc5621SKip Macy err = xenbus_printf(xbt, node, "tx-ring-ref","%u", 48089e0f4d2SKip Macy info->tx_ring_ref); 48189e0f4d2SKip Macy if (err) { 48289e0f4d2SKip Macy message = "writing tx ring-ref"; 48389e0f4d2SKip Macy goto abort_transaction; 48489e0f4d2SKip Macy } 48523dc5621SKip Macy err = xenbus_printf(xbt, node, "rx-ring-ref","%u", 48689e0f4d2SKip Macy info->rx_ring_ref); 48789e0f4d2SKip Macy if (err) { 48889e0f4d2SKip Macy message = "writing rx ring-ref"; 48989e0f4d2SKip Macy goto abort_transaction; 49089e0f4d2SKip Macy } 49123dc5621SKip Macy err = xenbus_printf(xbt, node, 49289e0f4d2SKip Macy "event-channel", "%u", irq_to_evtchn_port(info->irq)); 49389e0f4d2SKip Macy if (err) { 49489e0f4d2SKip Macy message = "writing event-channel"; 49589e0f4d2SKip Macy goto abort_transaction; 49689e0f4d2SKip Macy } 49723dc5621SKip Macy err = xenbus_printf(xbt, node, "request-rx-copy", "%u", 49889e0f4d2SKip Macy info->copying_receiver); 49989e0f4d2SKip Macy if (err) { 50089e0f4d2SKip Macy message = "writing request-rx-copy"; 50189e0f4d2SKip Macy goto abort_transaction; 50289e0f4d2SKip Macy } 50323dc5621SKip Macy err = xenbus_printf(xbt, node, "feature-rx-notify", "%d", 1); 50489e0f4d2SKip Macy if (err) { 50589e0f4d2SKip Macy message = "writing feature-rx-notify"; 50689e0f4d2SKip Macy goto abort_transaction; 50789e0f4d2SKip Macy } 50823dc5621SKip Macy err = xenbus_printf(xbt, node, "feature-sg", "%d", 1); 50989e0f4d2SKip Macy if (err) { 51089e0f4d2SKip Macy message = "writing feature-sg"; 51189e0f4d2SKip Macy goto abort_transaction; 51289e0f4d2SKip Macy } 51312678024SDoug Rabson #if __FreeBSD_version >= 700000 51423dc5621SKip Macy err = xenbus_printf(xbt, node, "feature-gso-tcpv4", "%d", 1); 51589e0f4d2SKip Macy if (err) { 51689e0f4d2SKip Macy message = "writing feature-gso-tcpv4"; 51789e0f4d2SKip Macy goto abort_transaction; 51889e0f4d2SKip Macy } 51989e0f4d2SKip Macy #endif 52089e0f4d2SKip Macy 52189e0f4d2SKip Macy err = xenbus_transaction_end(xbt, 0); 52289e0f4d2SKip Macy if (err) { 52389e0f4d2SKip Macy if (err == EAGAIN) 52489e0f4d2SKip Macy goto again; 52589e0f4d2SKip Macy xenbus_dev_fatal(dev, err, "completing transaction"); 52689e0f4d2SKip Macy goto destroy_ring; 52789e0f4d2SKip Macy } 52889e0f4d2SKip Macy 52989e0f4d2SKip Macy return 0; 53089e0f4d2SKip Macy 53189e0f4d2SKip Macy abort_transaction: 53289e0f4d2SKip Macy xenbus_transaction_end(xbt, 1); 53389e0f4d2SKip Macy xenbus_dev_fatal(dev, err, "%s", message); 53489e0f4d2SKip Macy destroy_ring: 53589e0f4d2SKip Macy netif_free(info); 53689e0f4d2SKip Macy out: 53789e0f4d2SKip Macy return err; 53889e0f4d2SKip Macy } 53989e0f4d2SKip Macy 54089e0f4d2SKip Macy 54189e0f4d2SKip Macy static int 54223dc5621SKip Macy setup_device(device_t dev, struct netfront_info *info) 54389e0f4d2SKip Macy { 54489e0f4d2SKip Macy netif_tx_sring_t *txs; 54589e0f4d2SKip Macy netif_rx_sring_t *rxs; 5463a6d1fcfSKip Macy int error; 54789e0f4d2SKip Macy struct ifnet *ifp; 54889e0f4d2SKip Macy 54989e0f4d2SKip Macy ifp = info->xn_ifp; 55089e0f4d2SKip Macy 55189e0f4d2SKip Macy info->tx_ring_ref = GRANT_INVALID_REF; 55289e0f4d2SKip Macy info->rx_ring_ref = GRANT_INVALID_REF; 55389e0f4d2SKip Macy info->rx.sring = NULL; 55489e0f4d2SKip Macy info->tx.sring = NULL; 55589e0f4d2SKip Macy info->irq = 0; 55689e0f4d2SKip Macy 55789e0f4d2SKip Macy txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 55889e0f4d2SKip Macy if (!txs) { 5593a6d1fcfSKip Macy error = ENOMEM; 5603a6d1fcfSKip Macy xenbus_dev_fatal(dev, error, "allocating tx ring page"); 56189e0f4d2SKip Macy goto fail; 56289e0f4d2SKip Macy } 56389e0f4d2SKip Macy SHARED_RING_INIT(txs); 56489e0f4d2SKip Macy FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 5653a6d1fcfSKip Macy error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref); 5663a6d1fcfSKip Macy if (error) 56789e0f4d2SKip Macy goto fail; 56889e0f4d2SKip Macy 56989e0f4d2SKip Macy rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 57089e0f4d2SKip Macy if (!rxs) { 5713a6d1fcfSKip Macy error = ENOMEM; 5723a6d1fcfSKip Macy xenbus_dev_fatal(dev, error, "allocating rx ring page"); 57389e0f4d2SKip Macy goto fail; 57489e0f4d2SKip Macy } 57589e0f4d2SKip Macy SHARED_RING_INIT(rxs); 57689e0f4d2SKip Macy FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 57789e0f4d2SKip Macy 5783a6d1fcfSKip Macy error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref); 5793a6d1fcfSKip Macy if (error) 58089e0f4d2SKip Macy goto fail; 58189e0f4d2SKip Macy 5823a6d1fcfSKip Macy error = bind_listening_port_to_irqhandler(xenbus_get_otherend_id(dev), 5833a6d1fcfSKip Macy "xn", xn_intr, info, INTR_TYPE_NET | INTR_MPSAFE, &info->irq); 58489e0f4d2SKip Macy 5853a6d1fcfSKip Macy if (error) { 5863a6d1fcfSKip Macy xenbus_dev_fatal(dev, error, 58789e0f4d2SKip Macy "bind_evtchn_to_irqhandler failed"); 58889e0f4d2SKip Macy goto fail; 58989e0f4d2SKip Macy } 59089e0f4d2SKip Macy 59189e0f4d2SKip Macy show_device(info); 59289e0f4d2SKip Macy 5933a6d1fcfSKip Macy return (0); 59489e0f4d2SKip Macy 59589e0f4d2SKip Macy fail: 59689e0f4d2SKip Macy netif_free(info); 5973a6d1fcfSKip Macy return (error); 59889e0f4d2SKip Macy } 59989e0f4d2SKip Macy 60089e0f4d2SKip Macy /** 60112678024SDoug Rabson * If this interface has an ipv4 address, send an arp for it. This 60212678024SDoug Rabson * helps to get the network going again after migrating hosts. 60312678024SDoug Rabson */ 60412678024SDoug Rabson static void 60512678024SDoug Rabson netfront_send_fake_arp(device_t dev, struct netfront_info *info) 60612678024SDoug Rabson { 60712678024SDoug Rabson struct ifnet *ifp; 60812678024SDoug Rabson struct ifaddr *ifa; 60912678024SDoug Rabson 61012678024SDoug Rabson ifp = info->xn_ifp; 61112678024SDoug Rabson TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 61212678024SDoug Rabson if (ifa->ifa_addr->sa_family == AF_INET) { 61312678024SDoug Rabson arp_ifinit(ifp, ifa); 61412678024SDoug Rabson } 61512678024SDoug Rabson } 61612678024SDoug Rabson } 61712678024SDoug Rabson 61812678024SDoug Rabson /** 61989e0f4d2SKip Macy * Callback received when the backend's state changes. 62089e0f4d2SKip Macy */ 62189e0f4d2SKip Macy static void 62223dc5621SKip Macy netfront_backend_changed(device_t dev, XenbusState newstate) 62389e0f4d2SKip Macy { 62423dc5621SKip Macy struct netfront_info *sc = device_get_softc(dev); 62589e0f4d2SKip Macy 62623dc5621SKip Macy DPRINTK("newstate=%d\n", newstate); 62789e0f4d2SKip Macy 62823dc5621SKip Macy switch (newstate) { 62989e0f4d2SKip Macy case XenbusStateInitialising: 63089e0f4d2SKip Macy case XenbusStateInitialised: 63189e0f4d2SKip Macy case XenbusStateConnected: 63289e0f4d2SKip Macy case XenbusStateUnknown: 63389e0f4d2SKip Macy case XenbusStateClosed: 634920ba15bSKip Macy case XenbusStateReconfigured: 635920ba15bSKip Macy case XenbusStateReconfiguring: 63689e0f4d2SKip Macy break; 63789e0f4d2SKip Macy case XenbusStateInitWait: 63823dc5621SKip Macy if (xenbus_get_state(dev) != XenbusStateInitialising) 63989e0f4d2SKip Macy break; 64023dc5621SKip Macy if (network_connect(sc) != 0) 64189e0f4d2SKip Macy break; 64223dc5621SKip Macy xenbus_set_state(dev, XenbusStateConnected); 64312678024SDoug Rabson netfront_send_fake_arp(dev, sc); 64423dc5621SKip Macy break; 64589e0f4d2SKip Macy case XenbusStateClosing: 64623dc5621SKip Macy xenbus_set_state(dev, XenbusStateClosed); 64789e0f4d2SKip Macy break; 64889e0f4d2SKip Macy } 64989e0f4d2SKip Macy } 65089e0f4d2SKip Macy 65189e0f4d2SKip Macy static void 65289e0f4d2SKip Macy xn_free_rx_ring(struct netfront_info *sc) 65389e0f4d2SKip Macy { 65489e0f4d2SKip Macy #if 0 65589e0f4d2SKip Macy int i; 65689e0f4d2SKip Macy 65789e0f4d2SKip Macy for (i = 0; i < NET_RX_RING_SIZE; i++) { 65889e0f4d2SKip Macy if (sc->xn_cdata.xn_rx_chain[i] != NULL) { 65989e0f4d2SKip Macy m_freem(sc->xn_cdata.xn_rx_chain[i]); 66089e0f4d2SKip Macy sc->xn_cdata.xn_rx_chain[i] = NULL; 66189e0f4d2SKip Macy } 66289e0f4d2SKip Macy } 66389e0f4d2SKip Macy 66489e0f4d2SKip Macy sc->rx.rsp_cons = 0; 66589e0f4d2SKip Macy sc->xn_rx_if->req_prod = 0; 66689e0f4d2SKip Macy sc->xn_rx_if->event = sc->rx.rsp_cons ; 66789e0f4d2SKip Macy #endif 66889e0f4d2SKip Macy } 66989e0f4d2SKip Macy 67089e0f4d2SKip Macy static void 67189e0f4d2SKip Macy xn_free_tx_ring(struct netfront_info *sc) 67289e0f4d2SKip Macy { 67389e0f4d2SKip Macy #if 0 67489e0f4d2SKip Macy int i; 67589e0f4d2SKip Macy 67689e0f4d2SKip Macy for (i = 0; i < NET_TX_RING_SIZE; i++) { 67789e0f4d2SKip Macy if (sc->xn_cdata.xn_tx_chain[i] != NULL) { 67889e0f4d2SKip Macy m_freem(sc->xn_cdata.xn_tx_chain[i]); 67989e0f4d2SKip Macy sc->xn_cdata.xn_tx_chain[i] = NULL; 68089e0f4d2SKip Macy } 68189e0f4d2SKip Macy } 68289e0f4d2SKip Macy 68389e0f4d2SKip Macy return; 68489e0f4d2SKip Macy #endif 68589e0f4d2SKip Macy } 68689e0f4d2SKip Macy 687c099cafaSAdrian Chadd /* 688c099cafaSAdrian Chadd * Do some brief math on the number of descriptors available to 689c099cafaSAdrian Chadd * determine how many slots are available. 690c099cafaSAdrian Chadd * 691c099cafaSAdrian Chadd * Firstly - wouldn't something with RING_FREE_REQUESTS() be more applicable? 692c099cafaSAdrian Chadd * Secondly - MAX_SKB_FRAGS is a Linux construct which may not apply here. 693c099cafaSAdrian Chadd * Thirdly - it isn't used here anyway; the magic constant '24' is possibly 694c099cafaSAdrian Chadd * wrong? 695c099cafaSAdrian Chadd * The "2" is presumably to ensure there are also enough slots available for 696c099cafaSAdrian Chadd * the ring entries used for "options" (eg, the TSO entry before a packet 697c099cafaSAdrian Chadd * is queued); I'm not sure why its 2 and not 1. Perhaps to make sure there's 698c099cafaSAdrian Chadd * a "free" node in the tx mbuf list (node 0) to represent the freelist? 699c099cafaSAdrian Chadd * 700c099cafaSAdrian Chadd * This only figures out whether any xenbus ring descriptors are available; 701c099cafaSAdrian Chadd * it doesn't at all reflect how many tx mbuf ring descriptors are also 702c099cafaSAdrian Chadd * available. 703c099cafaSAdrian Chadd */ 70489e0f4d2SKip Macy static inline int 70589e0f4d2SKip Macy netfront_tx_slot_available(struct netfront_info *np) 70689e0f4d2SKip Macy { 70789e0f4d2SKip Macy return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < 70889e0f4d2SKip Macy (TX_MAX_TARGET - /* MAX_SKB_FRAGS */ 24 - 2)); 70989e0f4d2SKip Macy } 71089e0f4d2SKip Macy static void 71189e0f4d2SKip Macy netif_release_tx_bufs(struct netfront_info *np) 71289e0f4d2SKip Macy { 71389e0f4d2SKip Macy struct mbuf *m; 71489e0f4d2SKip Macy int i; 71589e0f4d2SKip Macy 71689e0f4d2SKip Macy for (i = 1; i <= NET_TX_RING_SIZE; i++) { 71789e0f4d2SKip Macy m = np->xn_cdata.xn_tx_chain[i]; 71889e0f4d2SKip Macy 71989e0f4d2SKip Macy if (((u_long)m) < KERNBASE) 72089e0f4d2SKip Macy continue; 72189e0f4d2SKip Macy gnttab_grant_foreign_access_ref(np->grant_tx_ref[i], 72223dc5621SKip Macy xenbus_get_otherend_id(np->xbdev), 72323dc5621SKip Macy virt_to_mfn(mtod(m, vm_offset_t)), 72489e0f4d2SKip Macy GNTMAP_readonly); 72589e0f4d2SKip Macy gnttab_release_grant_reference(&np->gref_tx_head, 72689e0f4d2SKip Macy np->grant_tx_ref[i]); 72789e0f4d2SKip Macy np->grant_tx_ref[i] = GRANT_INVALID_REF; 72889e0f4d2SKip Macy add_id_to_freelist(np->tx_mbufs, i); 729a4ec37f5SAdrian Chadd np->xn_cdata.xn_tx_chain_cnt--; 730a4ec37f5SAdrian Chadd if (np->xn_cdata.xn_tx_chain_cnt < 0) { 731a4ec37f5SAdrian Chadd panic("netif_release_tx_bufs: tx_chain_cnt must be >= 0"); 732a4ec37f5SAdrian Chadd } 73389e0f4d2SKip Macy m_freem(m); 73489e0f4d2SKip Macy } 73589e0f4d2SKip Macy } 73689e0f4d2SKip Macy 73789e0f4d2SKip Macy static void 73889e0f4d2SKip Macy network_alloc_rx_buffers(struct netfront_info *sc) 73989e0f4d2SKip Macy { 74023dc5621SKip Macy int otherend_id = xenbus_get_otherend_id(sc->xbdev); 74189e0f4d2SKip Macy unsigned short id; 74289e0f4d2SKip Macy struct mbuf *m_new; 74389e0f4d2SKip Macy int i, batch_target, notify; 74489e0f4d2SKip Macy RING_IDX req_prod; 74589e0f4d2SKip Macy struct xen_memory_reservation reservation; 74689e0f4d2SKip Macy grant_ref_t ref; 74789e0f4d2SKip Macy int nr_flips; 74889e0f4d2SKip Macy netif_rx_request_t *req; 74989e0f4d2SKip Macy vm_offset_t vaddr; 75089e0f4d2SKip Macy u_long pfn; 75189e0f4d2SKip Macy 75289e0f4d2SKip Macy req_prod = sc->rx.req_prod_pvt; 75389e0f4d2SKip Macy 75489e0f4d2SKip Macy if (unlikely(sc->carrier == 0)) 75589e0f4d2SKip Macy return; 75689e0f4d2SKip Macy 75789e0f4d2SKip Macy /* 75889e0f4d2SKip Macy * Allocate skbuffs greedily, even though we batch updates to the 75989e0f4d2SKip Macy * receive ring. This creates a less bursty demand on the memory 76089e0f4d2SKip Macy * allocator, so should reduce the chance of failed allocation 76189e0f4d2SKip Macy * requests both for ourself and for other kernel subsystems. 76289e0f4d2SKip Macy */ 76389e0f4d2SKip Macy batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons); 76489e0f4d2SKip Macy for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) { 76589e0f4d2SKip Macy MGETHDR(m_new, M_DONTWAIT, MT_DATA); 76689e0f4d2SKip Macy if (m_new == NULL) 76789e0f4d2SKip Macy goto no_mbuf; 76889e0f4d2SKip Macy 76989e0f4d2SKip Macy m_cljget(m_new, M_DONTWAIT, MJUMPAGESIZE); 77089e0f4d2SKip Macy if ((m_new->m_flags & M_EXT) == 0) { 77189e0f4d2SKip Macy m_freem(m_new); 77289e0f4d2SKip Macy 77389e0f4d2SKip Macy no_mbuf: 77489e0f4d2SKip Macy if (i != 0) 77589e0f4d2SKip Macy goto refill; 77689e0f4d2SKip Macy /* 77789e0f4d2SKip Macy * XXX set timer 77889e0f4d2SKip Macy */ 77989e0f4d2SKip Macy break; 78089e0f4d2SKip Macy } 78189e0f4d2SKip Macy m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE; 78289e0f4d2SKip Macy 78389e0f4d2SKip Macy /* queue the mbufs allocated */ 78489e0f4d2SKip Macy mbufq_tail(&sc->xn_rx_batch, m_new); 78589e0f4d2SKip Macy } 78689e0f4d2SKip Macy 78789e0f4d2SKip Macy /* Is the batch large enough to be worthwhile? */ 78889e0f4d2SKip Macy if (i < (sc->rx_target/2)) { 78989e0f4d2SKip Macy if (req_prod >sc->rx.sring->req_prod) 79089e0f4d2SKip Macy goto push; 79189e0f4d2SKip Macy return; 79289e0f4d2SKip Macy } 79389e0f4d2SKip Macy /* Adjust floating fill target if we risked running out of buffers. */ 79489e0f4d2SKip Macy if ( ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) && 79589e0f4d2SKip Macy ((sc->rx_target *= 2) > sc->rx_max_target) ) 79689e0f4d2SKip Macy sc->rx_target = sc->rx_max_target; 79789e0f4d2SKip Macy 79889e0f4d2SKip Macy refill: 79989e0f4d2SKip Macy for (nr_flips = i = 0; ; i++) { 80089e0f4d2SKip Macy if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL) 80189e0f4d2SKip Macy break; 80289e0f4d2SKip Macy 80389e0f4d2SKip Macy m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)( 80489e0f4d2SKip Macy vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT); 80589e0f4d2SKip Macy 80689e0f4d2SKip Macy id = xennet_rxidx(req_prod + i); 80789e0f4d2SKip Macy 80889e0f4d2SKip Macy KASSERT(sc->xn_cdata.xn_rx_chain[id] == NULL, 80989e0f4d2SKip Macy ("non-NULL xm_rx_chain")); 81089e0f4d2SKip Macy sc->xn_cdata.xn_rx_chain[id] = m_new; 81189e0f4d2SKip Macy 81289e0f4d2SKip Macy ref = gnttab_claim_grant_reference(&sc->gref_rx_head); 81389e0f4d2SKip Macy KASSERT((short)ref >= 0, ("negative ref")); 81489e0f4d2SKip Macy sc->grant_rx_ref[id] = ref; 81589e0f4d2SKip Macy 81689e0f4d2SKip Macy vaddr = mtod(m_new, vm_offset_t); 81789e0f4d2SKip Macy pfn = vtophys(vaddr) >> PAGE_SHIFT; 81889e0f4d2SKip Macy req = RING_GET_REQUEST(&sc->rx, req_prod + i); 81989e0f4d2SKip Macy 82089e0f4d2SKip Macy if (sc->copying_receiver == 0) { 82189e0f4d2SKip Macy gnttab_grant_foreign_transfer_ref(ref, 82223dc5621SKip Macy otherend_id, pfn); 82389e0f4d2SKip Macy sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn); 82489e0f4d2SKip Macy if (!xen_feature(XENFEAT_auto_translated_physmap)) { 82589e0f4d2SKip Macy /* Remove this page before passing 82689e0f4d2SKip Macy * back to Xen. 82789e0f4d2SKip Macy */ 82889e0f4d2SKip Macy set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 82989e0f4d2SKip Macy MULTI_update_va_mapping(&sc->rx_mcl[i], 83089e0f4d2SKip Macy vaddr, 0, 0); 83189e0f4d2SKip Macy } 83289e0f4d2SKip Macy nr_flips++; 83389e0f4d2SKip Macy } else { 83489e0f4d2SKip Macy gnttab_grant_foreign_access_ref(ref, 83523dc5621SKip Macy otherend_id, 83689e0f4d2SKip Macy PFNTOMFN(pfn), 0); 83789e0f4d2SKip Macy } 83889e0f4d2SKip Macy req->id = id; 83989e0f4d2SKip Macy req->gref = ref; 84089e0f4d2SKip Macy 84189e0f4d2SKip Macy sc->rx_pfn_array[i] = 84289e0f4d2SKip Macy vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT; 84389e0f4d2SKip Macy } 84489e0f4d2SKip Macy 84589e0f4d2SKip Macy KASSERT(i, ("no mbufs processed")); /* should have returned earlier */ 84689e0f4d2SKip Macy KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed")); 84789e0f4d2SKip Macy /* 84889e0f4d2SKip Macy * We may have allocated buffers which have entries outstanding 84989e0f4d2SKip Macy * in the page * update queue -- make sure we flush those first! 85089e0f4d2SKip Macy */ 85189e0f4d2SKip Macy PT_UPDATES_FLUSH(); 85289e0f4d2SKip Macy if (nr_flips != 0) { 85389e0f4d2SKip Macy #ifdef notyet 85489e0f4d2SKip Macy /* Tell the ballon driver what is going on. */ 85589e0f4d2SKip Macy balloon_update_driver_allowance(i); 85689e0f4d2SKip Macy #endif 857920ba15bSKip Macy set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array); 85889e0f4d2SKip Macy reservation.nr_extents = i; 85989e0f4d2SKip Macy reservation.extent_order = 0; 86089e0f4d2SKip Macy reservation.address_bits = 0; 86189e0f4d2SKip Macy reservation.domid = DOMID_SELF; 86289e0f4d2SKip Macy 86389e0f4d2SKip Macy if (!xen_feature(XENFEAT_auto_translated_physmap)) { 86489e0f4d2SKip Macy 86589e0f4d2SKip Macy /* After all PTEs have been zapped, flush the TLB. */ 86689e0f4d2SKip Macy sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = 86789e0f4d2SKip Macy UVMF_TLB_FLUSH|UVMF_ALL; 86889e0f4d2SKip Macy 86989e0f4d2SKip Macy /* Give away a batch of pages. */ 87089e0f4d2SKip Macy sc->rx_mcl[i].op = __HYPERVISOR_memory_op; 87189e0f4d2SKip Macy sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation; 87289e0f4d2SKip Macy sc->rx_mcl[i].args[1] = (u_long)&reservation; 87389e0f4d2SKip Macy /* Zap PTEs and give away pages in one big multicall. */ 87489e0f4d2SKip Macy (void)HYPERVISOR_multicall(sc->rx_mcl, i+1); 87589e0f4d2SKip Macy 87689e0f4d2SKip Macy /* Check return status of HYPERVISOR_dom_mem_op(). */ 87789e0f4d2SKip Macy if (unlikely(sc->rx_mcl[i].result != i)) 87889e0f4d2SKip Macy panic("Unable to reduce memory reservation\n"); 87989e0f4d2SKip Macy } else { 88089e0f4d2SKip Macy if (HYPERVISOR_memory_op( 88189e0f4d2SKip Macy XENMEM_decrease_reservation, &reservation) 88289e0f4d2SKip Macy != i) 88389e0f4d2SKip Macy panic("Unable to reduce memory " 88489e0f4d2SKip Macy "reservation\n"); 88589e0f4d2SKip Macy } 88689e0f4d2SKip Macy } else { 88789e0f4d2SKip Macy wmb(); 88889e0f4d2SKip Macy } 88989e0f4d2SKip Macy 89089e0f4d2SKip Macy /* Above is a suitable barrier to ensure backend will see requests. */ 89189e0f4d2SKip Macy sc->rx.req_prod_pvt = req_prod + i; 89289e0f4d2SKip Macy push: 89389e0f4d2SKip Macy RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify); 89489e0f4d2SKip Macy if (notify) 89589e0f4d2SKip Macy notify_remote_via_irq(sc->irq); 89689e0f4d2SKip Macy } 89789e0f4d2SKip Macy 89889e0f4d2SKip Macy static void 89989e0f4d2SKip Macy xn_rxeof(struct netfront_info *np) 90089e0f4d2SKip Macy { 90189e0f4d2SKip Macy struct ifnet *ifp; 90212678024SDoug Rabson #if __FreeBSD_version >= 700000 90312678024SDoug Rabson struct lro_ctrl *lro = &np->xn_lro; 90412678024SDoug Rabson struct lro_entry *queued; 90512678024SDoug Rabson #endif 90689e0f4d2SKip Macy struct netfront_rx_info rinfo; 90789e0f4d2SKip Macy struct netif_rx_response *rx = &rinfo.rx; 90889e0f4d2SKip Macy struct netif_extra_info *extras = rinfo.extras; 90989e0f4d2SKip Macy RING_IDX i, rp; 91089e0f4d2SKip Macy multicall_entry_t *mcl; 91189e0f4d2SKip Macy struct mbuf *m; 91283b92f6eSKip Macy struct mbuf_head rxq, errq; 91349906218SDoug Rabson int err, pages_flipped = 0, work_to_do; 91489e0f4d2SKip Macy 91549906218SDoug Rabson do { 91689e0f4d2SKip Macy XN_RX_LOCK_ASSERT(np); 91789e0f4d2SKip Macy if (!netfront_carrier_ok(np)) 91889e0f4d2SKip Macy return; 91989e0f4d2SKip Macy 92089e0f4d2SKip Macy mbufq_init(&errq); 92189e0f4d2SKip Macy mbufq_init(&rxq); 92289e0f4d2SKip Macy 92389e0f4d2SKip Macy ifp = np->xn_ifp; 92489e0f4d2SKip Macy 92589e0f4d2SKip Macy rp = np->rx.sring->rsp_prod; 92689e0f4d2SKip Macy rmb(); /* Ensure we see queued responses up to 'rp'. */ 92789e0f4d2SKip Macy 92889e0f4d2SKip Macy i = np->rx.rsp_cons; 92989e0f4d2SKip Macy while ((i != rp)) { 93089e0f4d2SKip Macy memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 93189e0f4d2SKip Macy memset(extras, 0, sizeof(rinfo.extras)); 93289e0f4d2SKip Macy 93383b92f6eSKip Macy m = NULL; 93483b92f6eSKip Macy err = xennet_get_responses(np, &rinfo, rp, &m, 93589e0f4d2SKip Macy &pages_flipped); 93689e0f4d2SKip Macy 93789e0f4d2SKip Macy if (unlikely(err)) { 93883b92f6eSKip Macy if (m) 93989e0f4d2SKip Macy mbufq_tail(&errq, m); 94089e0f4d2SKip Macy np->stats.rx_errors++; 94189e0f4d2SKip Macy i = np->rx.rsp_cons; 94289e0f4d2SKip Macy continue; 94389e0f4d2SKip Macy } 94489e0f4d2SKip Macy 94589e0f4d2SKip Macy m->m_pkthdr.rcvif = ifp; 94689e0f4d2SKip Macy if ( rx->flags & NETRXF_data_validated ) { 94789e0f4d2SKip Macy /* Tell the stack the checksums are okay */ 94889e0f4d2SKip Macy /* 94989e0f4d2SKip Macy * XXX this isn't necessarily the case - need to add 95089e0f4d2SKip Macy * check 95189e0f4d2SKip Macy */ 95289e0f4d2SKip Macy 95389e0f4d2SKip Macy m->m_pkthdr.csum_flags |= 95489e0f4d2SKip Macy (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID 95589e0f4d2SKip Macy | CSUM_PSEUDO_HDR); 95689e0f4d2SKip Macy m->m_pkthdr.csum_data = 0xffff; 95789e0f4d2SKip Macy } 95889e0f4d2SKip Macy 95989e0f4d2SKip Macy np->stats.rx_packets++; 96083b92f6eSKip Macy np->stats.rx_bytes += m->m_pkthdr.len; 96189e0f4d2SKip Macy 96289e0f4d2SKip Macy mbufq_tail(&rxq, m); 96389e0f4d2SKip Macy np->rx.rsp_cons = ++i; 96489e0f4d2SKip Macy } 96589e0f4d2SKip Macy 96689e0f4d2SKip Macy if (pages_flipped) { 96789e0f4d2SKip Macy /* Some pages are no longer absent... */ 96889e0f4d2SKip Macy #ifdef notyet 96989e0f4d2SKip Macy balloon_update_driver_allowance(-pages_flipped); 97089e0f4d2SKip Macy #endif 97189e0f4d2SKip Macy /* Do all the remapping work, and M->P updates, in one big 97289e0f4d2SKip Macy * hypercall. 97389e0f4d2SKip Macy */ 97489e0f4d2SKip Macy if (!!xen_feature(XENFEAT_auto_translated_physmap)) { 97589e0f4d2SKip Macy mcl = np->rx_mcl + pages_flipped; 97689e0f4d2SKip Macy mcl->op = __HYPERVISOR_mmu_update; 97789e0f4d2SKip Macy mcl->args[0] = (u_long)np->rx_mmu; 97889e0f4d2SKip Macy mcl->args[1] = pages_flipped; 97989e0f4d2SKip Macy mcl->args[2] = 0; 98089e0f4d2SKip Macy mcl->args[3] = DOMID_SELF; 98189e0f4d2SKip Macy (void)HYPERVISOR_multicall(np->rx_mcl, 98289e0f4d2SKip Macy pages_flipped + 1); 98389e0f4d2SKip Macy } 98489e0f4d2SKip Macy } 98589e0f4d2SKip Macy 98689e0f4d2SKip Macy while ((m = mbufq_dequeue(&errq))) 98789e0f4d2SKip Macy m_freem(m); 98889e0f4d2SKip Macy 98989e0f4d2SKip Macy /* 99089e0f4d2SKip Macy * Process all the mbufs after the remapping is complete. 99189e0f4d2SKip Macy * Break the mbuf chain first though. 99289e0f4d2SKip Macy */ 99389e0f4d2SKip Macy while ((m = mbufq_dequeue(&rxq)) != NULL) { 99489e0f4d2SKip Macy ifp->if_ipackets++; 99589e0f4d2SKip Macy 99689e0f4d2SKip Macy /* 99789e0f4d2SKip Macy * Do we really need to drop the rx lock? 99889e0f4d2SKip Macy */ 99989e0f4d2SKip Macy XN_RX_UNLOCK(np); 100012678024SDoug Rabson #if __FreeBSD_version >= 700000 100112678024SDoug Rabson /* Use LRO if possible */ 100212678024SDoug Rabson if ((ifp->if_capenable & IFCAP_LRO) == 0 || 100312678024SDoug Rabson lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) { 100412678024SDoug Rabson /* 100512678024SDoug Rabson * If LRO fails, pass up to the stack 100612678024SDoug Rabson * directly. 100712678024SDoug Rabson */ 100889e0f4d2SKip Macy (*ifp->if_input)(ifp, m); 100912678024SDoug Rabson } 101012678024SDoug Rabson #else 101112678024SDoug Rabson (*ifp->if_input)(ifp, m); 101212678024SDoug Rabson #endif 101389e0f4d2SKip Macy XN_RX_LOCK(np); 101489e0f4d2SKip Macy } 101589e0f4d2SKip Macy 101689e0f4d2SKip Macy np->rx.rsp_cons = i; 101789e0f4d2SKip Macy 101812678024SDoug Rabson #if __FreeBSD_version >= 700000 101912678024SDoug Rabson /* 102012678024SDoug Rabson * Flush any outstanding LRO work 102112678024SDoug Rabson */ 102212678024SDoug Rabson while (!SLIST_EMPTY(&lro->lro_active)) { 102312678024SDoug Rabson queued = SLIST_FIRST(&lro->lro_active); 102412678024SDoug Rabson SLIST_REMOVE_HEAD(&lro->lro_active, next); 102512678024SDoug Rabson tcp_lro_flush(lro, queued); 102612678024SDoug Rabson } 102712678024SDoug Rabson #endif 102812678024SDoug Rabson 102989e0f4d2SKip Macy #if 0 103089e0f4d2SKip Macy /* If we get a callback with very few responses, reduce fill target. */ 103189e0f4d2SKip Macy /* NB. Note exponential increase, linear decrease. */ 103289e0f4d2SKip Macy if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 103389e0f4d2SKip Macy ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) 103489e0f4d2SKip Macy np->rx_target = np->rx_min_target; 103589e0f4d2SKip Macy #endif 103689e0f4d2SKip Macy 103789e0f4d2SKip Macy network_alloc_rx_buffers(np); 103889e0f4d2SKip Macy 103949906218SDoug Rabson RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do); 104049906218SDoug Rabson } while (work_to_do); 104189e0f4d2SKip Macy } 104289e0f4d2SKip Macy 104389e0f4d2SKip Macy static void 104489e0f4d2SKip Macy xn_txeof(struct netfront_info *np) 104589e0f4d2SKip Macy { 104689e0f4d2SKip Macy RING_IDX i, prod; 104789e0f4d2SKip Macy unsigned short id; 104889e0f4d2SKip Macy struct ifnet *ifp; 104912678024SDoug Rabson netif_tx_response_t *txr; 105089e0f4d2SKip Macy struct mbuf *m; 105189e0f4d2SKip Macy 105289e0f4d2SKip Macy XN_TX_LOCK_ASSERT(np); 105389e0f4d2SKip Macy 105489e0f4d2SKip Macy if (!netfront_carrier_ok(np)) 105589e0f4d2SKip Macy return; 105689e0f4d2SKip Macy 105789e0f4d2SKip Macy ifp = np->xn_ifp; 105889e0f4d2SKip Macy ifp->if_timer = 0; 105989e0f4d2SKip Macy 106089e0f4d2SKip Macy do { 106189e0f4d2SKip Macy prod = np->tx.sring->rsp_prod; 106289e0f4d2SKip Macy rmb(); /* Ensure we see responses up to 'rp'. */ 106389e0f4d2SKip Macy 106489e0f4d2SKip Macy for (i = np->tx.rsp_cons; i != prod; i++) { 106512678024SDoug Rabson txr = RING_GET_RESPONSE(&np->tx, i); 106612678024SDoug Rabson if (txr->status == NETIF_RSP_NULL) 106712678024SDoug Rabson continue; 106812678024SDoug Rabson 106912678024SDoug Rabson id = txr->id; 107089e0f4d2SKip Macy m = np->xn_cdata.xn_tx_chain[id]; 10712d8fae98SAdrian Chadd KASSERT(m != NULL, ("mbuf not found in xn_tx_chain")); 10722d8fae98SAdrian Chadd M_ASSERTVALID(m); 107389e0f4d2SKip Macy 107412678024SDoug Rabson /* 107512678024SDoug Rabson * Increment packet count if this is the last 107612678024SDoug Rabson * mbuf of the chain. 107712678024SDoug Rabson */ 107812678024SDoug Rabson if (!m->m_next) 107989e0f4d2SKip Macy ifp->if_opackets++; 108089e0f4d2SKip Macy if (unlikely(gnttab_query_foreign_access( 108189e0f4d2SKip Macy np->grant_tx_ref[id]) != 0)) { 108289e0f4d2SKip Macy printf("network_tx_buf_gc: warning " 108389e0f4d2SKip Macy "-- grant still in use by backend " 108489e0f4d2SKip Macy "domain.\n"); 108589e0f4d2SKip Macy goto out; 108689e0f4d2SKip Macy } 108789e0f4d2SKip Macy gnttab_end_foreign_access_ref( 1088920ba15bSKip Macy np->grant_tx_ref[id]); 108989e0f4d2SKip Macy gnttab_release_grant_reference( 109089e0f4d2SKip Macy &np->gref_tx_head, np->grant_tx_ref[id]); 109189e0f4d2SKip Macy np->grant_tx_ref[id] = GRANT_INVALID_REF; 109289e0f4d2SKip Macy 109389e0f4d2SKip Macy np->xn_cdata.xn_tx_chain[id] = NULL; 109489e0f4d2SKip Macy add_id_to_freelist(np->xn_cdata.xn_tx_chain, id); 1095a4ec37f5SAdrian Chadd np->xn_cdata.xn_tx_chain_cnt--; 1096a4ec37f5SAdrian Chadd if (np->xn_cdata.xn_tx_chain_cnt < 0) { 1097a4ec37f5SAdrian Chadd panic("netif_release_tx_bufs: tx_chain_cnt must be >= 0"); 1098a4ec37f5SAdrian Chadd } 109912678024SDoug Rabson m_free(m); 1100d76e4550SAdrian Chadd /* Only mark the queue active if we've freed up at least one slot to try */ 1101d76e4550SAdrian Chadd ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 110289e0f4d2SKip Macy } 110389e0f4d2SKip Macy np->tx.rsp_cons = prod; 110489e0f4d2SKip Macy 110589e0f4d2SKip Macy /* 110689e0f4d2SKip Macy * Set a new event, then check for race with update of 110789e0f4d2SKip Macy * tx_cons. Note that it is essential to schedule a 110889e0f4d2SKip Macy * callback, no matter how few buffers are pending. Even if 110989e0f4d2SKip Macy * there is space in the transmit ring, higher layers may 111089e0f4d2SKip Macy * be blocked because too much data is outstanding: in such 111189e0f4d2SKip Macy * cases notification from Xen is likely to be the only kick 111289e0f4d2SKip Macy * that we'll get. 111389e0f4d2SKip Macy */ 111489e0f4d2SKip Macy np->tx.sring->rsp_event = 111589e0f4d2SKip Macy prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 111689e0f4d2SKip Macy 111789e0f4d2SKip Macy mb(); 111889e0f4d2SKip Macy } while (prod != np->tx.sring->rsp_prod); 111989e0f4d2SKip Macy 112089e0f4d2SKip Macy out: 112189e0f4d2SKip Macy if (np->tx_full && 112289e0f4d2SKip Macy ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) { 112389e0f4d2SKip Macy np->tx_full = 0; 112489e0f4d2SKip Macy #if 0 112589e0f4d2SKip Macy if (np->user_state == UST_OPEN) 112689e0f4d2SKip Macy netif_wake_queue(dev); 112789e0f4d2SKip Macy #endif 112889e0f4d2SKip Macy } 112989e0f4d2SKip Macy 113089e0f4d2SKip Macy } 113189e0f4d2SKip Macy 113289e0f4d2SKip Macy static void 113389e0f4d2SKip Macy xn_intr(void *xsc) 113489e0f4d2SKip Macy { 113589e0f4d2SKip Macy struct netfront_info *np = xsc; 113689e0f4d2SKip Macy struct ifnet *ifp = np->xn_ifp; 113789e0f4d2SKip Macy 113889e0f4d2SKip Macy #if 0 113989e0f4d2SKip Macy if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod && 114089e0f4d2SKip Macy likely(netfront_carrier_ok(np)) && 114189e0f4d2SKip Macy ifp->if_drv_flags & IFF_DRV_RUNNING)) 114289e0f4d2SKip Macy return; 114389e0f4d2SKip Macy #endif 114489e0f4d2SKip Macy if (np->tx.rsp_cons != np->tx.sring->rsp_prod) { 114589e0f4d2SKip Macy XN_TX_LOCK(np); 114689e0f4d2SKip Macy xn_txeof(np); 114789e0f4d2SKip Macy XN_TX_UNLOCK(np); 114889e0f4d2SKip Macy } 114989e0f4d2SKip Macy 115089e0f4d2SKip Macy XN_RX_LOCK(np); 115189e0f4d2SKip Macy xn_rxeof(np); 115289e0f4d2SKip Macy XN_RX_UNLOCK(np); 115389e0f4d2SKip Macy 115489e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING && 115589e0f4d2SKip Macy !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 115689e0f4d2SKip Macy xn_start(ifp); 115789e0f4d2SKip Macy } 115889e0f4d2SKip Macy 115989e0f4d2SKip Macy 116089e0f4d2SKip Macy static void 116189e0f4d2SKip Macy xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m, 116289e0f4d2SKip Macy grant_ref_t ref) 116389e0f4d2SKip Macy { 116489e0f4d2SKip Macy int new = xennet_rxidx(np->rx.req_prod_pvt); 116589e0f4d2SKip Macy 116689e0f4d2SKip Macy KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL")); 116789e0f4d2SKip Macy np->rx_mbufs[new] = m; 116889e0f4d2SKip Macy np->grant_rx_ref[new] = ref; 116989e0f4d2SKip Macy RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 117089e0f4d2SKip Macy RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 117189e0f4d2SKip Macy np->rx.req_prod_pvt++; 117289e0f4d2SKip Macy } 117389e0f4d2SKip Macy 117489e0f4d2SKip Macy static int 117589e0f4d2SKip Macy xennet_get_extras(struct netfront_info *np, 117689e0f4d2SKip Macy struct netif_extra_info *extras, RING_IDX rp) 117789e0f4d2SKip Macy { 117889e0f4d2SKip Macy struct netif_extra_info *extra; 117989e0f4d2SKip Macy RING_IDX cons = np->rx.rsp_cons; 118089e0f4d2SKip Macy 118189e0f4d2SKip Macy int err = 0; 118289e0f4d2SKip Macy 118389e0f4d2SKip Macy do { 118489e0f4d2SKip Macy struct mbuf *m; 118589e0f4d2SKip Macy grant_ref_t ref; 118689e0f4d2SKip Macy 118789e0f4d2SKip Macy if (unlikely(cons + 1 == rp)) { 118889e0f4d2SKip Macy #if 0 118989e0f4d2SKip Macy if (net_ratelimit()) 119089e0f4d2SKip Macy WPRINTK("Missing extra info\n"); 119189e0f4d2SKip Macy #endif 119289e0f4d2SKip Macy err = -EINVAL; 119389e0f4d2SKip Macy break; 119489e0f4d2SKip Macy } 119589e0f4d2SKip Macy 119689e0f4d2SKip Macy extra = (struct netif_extra_info *) 119789e0f4d2SKip Macy RING_GET_RESPONSE(&np->rx, ++cons); 119889e0f4d2SKip Macy 119989e0f4d2SKip Macy if (unlikely(!extra->type || 120089e0f4d2SKip Macy extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 120189e0f4d2SKip Macy #if 0 120289e0f4d2SKip Macy if (net_ratelimit()) 120389e0f4d2SKip Macy WPRINTK("Invalid extra type: %d\n", 120489e0f4d2SKip Macy extra->type); 120589e0f4d2SKip Macy #endif 120689e0f4d2SKip Macy err = -EINVAL; 120789e0f4d2SKip Macy } else { 120889e0f4d2SKip Macy memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); 120989e0f4d2SKip Macy } 121089e0f4d2SKip Macy 121189e0f4d2SKip Macy m = xennet_get_rx_mbuf(np, cons); 121289e0f4d2SKip Macy ref = xennet_get_rx_ref(np, cons); 121389e0f4d2SKip Macy xennet_move_rx_slot(np, m, ref); 121489e0f4d2SKip Macy } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 121589e0f4d2SKip Macy 121689e0f4d2SKip Macy np->rx.rsp_cons = cons; 121789e0f4d2SKip Macy return err; 121889e0f4d2SKip Macy } 121989e0f4d2SKip Macy 122089e0f4d2SKip Macy static int 122189e0f4d2SKip Macy xennet_get_responses(struct netfront_info *np, 122289e0f4d2SKip Macy struct netfront_rx_info *rinfo, RING_IDX rp, 122383b92f6eSKip Macy struct mbuf **list, 122489e0f4d2SKip Macy int *pages_flipped_p) 122589e0f4d2SKip Macy { 122689e0f4d2SKip Macy int pages_flipped = *pages_flipped_p; 122789e0f4d2SKip Macy struct mmu_update *mmu; 122889e0f4d2SKip Macy struct multicall_entry *mcl; 122989e0f4d2SKip Macy struct netif_rx_response *rx = &rinfo->rx; 123089e0f4d2SKip Macy struct netif_extra_info *extras = rinfo->extras; 123189e0f4d2SKip Macy RING_IDX cons = np->rx.rsp_cons; 123283b92f6eSKip Macy struct mbuf *m, *m0, *m_prev; 123389e0f4d2SKip Macy grant_ref_t ref = xennet_get_rx_ref(np, cons); 123483b92f6eSKip Macy int max = 5 /* MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD) */; 123589e0f4d2SKip Macy int frags = 1; 123689e0f4d2SKip Macy int err = 0; 123789e0f4d2SKip Macy u_long ret; 123889e0f4d2SKip Macy 123983b92f6eSKip Macy m0 = m = m_prev = xennet_get_rx_mbuf(np, cons); 124083b92f6eSKip Macy 124183b92f6eSKip Macy 124289e0f4d2SKip Macy if (rx->flags & NETRXF_extra_info) { 124389e0f4d2SKip Macy err = xennet_get_extras(np, extras, rp); 124489e0f4d2SKip Macy cons = np->rx.rsp_cons; 124589e0f4d2SKip Macy } 124689e0f4d2SKip Macy 124783b92f6eSKip Macy 124883b92f6eSKip Macy if (m0 != NULL) { 124983b92f6eSKip Macy m0->m_pkthdr.len = 0; 125083b92f6eSKip Macy m0->m_next = NULL; 125183b92f6eSKip Macy } 125283b92f6eSKip Macy 125389e0f4d2SKip Macy for (;;) { 125489e0f4d2SKip Macy u_long mfn; 125589e0f4d2SKip Macy 125683b92f6eSKip Macy #if 0 125783b92f6eSKip Macy printf("rx->status=%hd rx->offset=%hu frags=%u\n", 125883b92f6eSKip Macy rx->status, rx->offset, frags); 125983b92f6eSKip Macy #endif 126089e0f4d2SKip Macy if (unlikely(rx->status < 0 || 126189e0f4d2SKip Macy rx->offset + rx->status > PAGE_SIZE)) { 126289e0f4d2SKip Macy #if 0 126389e0f4d2SKip Macy if (net_ratelimit()) 126489e0f4d2SKip Macy WPRINTK("rx->offset: %x, size: %u\n", 126589e0f4d2SKip Macy rx->offset, rx->status); 126689e0f4d2SKip Macy #endif 126789e0f4d2SKip Macy xennet_move_rx_slot(np, m, ref); 126889e0f4d2SKip Macy err = -EINVAL; 126989e0f4d2SKip Macy goto next; 127089e0f4d2SKip Macy } 127189e0f4d2SKip Macy 127289e0f4d2SKip Macy /* 127389e0f4d2SKip Macy * This definitely indicates a bug, either in this driver or in 127489e0f4d2SKip Macy * the backend driver. In future this should flag the bad 127589e0f4d2SKip Macy * situation to the system controller to reboot the backed. 127689e0f4d2SKip Macy */ 127789e0f4d2SKip Macy if (ref == GRANT_INVALID_REF) { 127889e0f4d2SKip Macy #if 0 127989e0f4d2SKip Macy if (net_ratelimit()) 128089e0f4d2SKip Macy WPRINTK("Bad rx response id %d.\n", rx->id); 128189e0f4d2SKip Macy #endif 128289e0f4d2SKip Macy err = -EINVAL; 128389e0f4d2SKip Macy goto next; 128489e0f4d2SKip Macy } 128589e0f4d2SKip Macy 128689e0f4d2SKip Macy if (!np->copying_receiver) { 128789e0f4d2SKip Macy /* Memory pressure, insufficient buffer 128889e0f4d2SKip Macy * headroom, ... 128989e0f4d2SKip Macy */ 129089e0f4d2SKip Macy if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { 129189e0f4d2SKip Macy if (net_ratelimit()) 129289e0f4d2SKip Macy WPRINTK("Unfulfilled rx req " 129389e0f4d2SKip Macy "(id=%d, st=%d).\n", 129489e0f4d2SKip Macy rx->id, rx->status); 129589e0f4d2SKip Macy xennet_move_rx_slot(np, m, ref); 129689e0f4d2SKip Macy err = -ENOMEM; 129789e0f4d2SKip Macy goto next; 129889e0f4d2SKip Macy } 129989e0f4d2SKip Macy 130089e0f4d2SKip Macy if (!xen_feature( XENFEAT_auto_translated_physmap)) { 130189e0f4d2SKip Macy /* Remap the page. */ 130289e0f4d2SKip Macy void *vaddr = mtod(m, void *); 130389e0f4d2SKip Macy uint32_t pfn; 130489e0f4d2SKip Macy 130589e0f4d2SKip Macy mcl = np->rx_mcl + pages_flipped; 130689e0f4d2SKip Macy mmu = np->rx_mmu + pages_flipped; 130789e0f4d2SKip Macy 130889e0f4d2SKip Macy MULTI_update_va_mapping(mcl, (u_long)vaddr, 13096ae0e31bSKip Macy (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW | 131089e0f4d2SKip Macy PG_V | PG_M | PG_A, 0); 13113a6d1fcfSKip Macy pfn = (uintptr_t)m->m_ext.ext_arg1; 131289e0f4d2SKip Macy mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) | 131389e0f4d2SKip Macy MMU_MACHPHYS_UPDATE; 131489e0f4d2SKip Macy mmu->val = pfn; 131589e0f4d2SKip Macy 131689e0f4d2SKip Macy set_phys_to_machine(pfn, mfn); 131789e0f4d2SKip Macy } 131889e0f4d2SKip Macy pages_flipped++; 131989e0f4d2SKip Macy } else { 1320920ba15bSKip Macy ret = gnttab_end_foreign_access_ref(ref); 132189e0f4d2SKip Macy KASSERT(ret, ("ret != 0")); 132289e0f4d2SKip Macy } 132389e0f4d2SKip Macy 132489e0f4d2SKip Macy gnttab_release_grant_reference(&np->gref_rx_head, ref); 132589e0f4d2SKip Macy 132689e0f4d2SKip Macy next: 13273a539122SAdrian Chadd if (m == NULL) 13283a539122SAdrian Chadd break; 13293a539122SAdrian Chadd 133083b92f6eSKip Macy m->m_len = rx->status; 133183b92f6eSKip Macy m->m_data += rx->offset; 133283b92f6eSKip Macy m0->m_pkthdr.len += rx->status; 133383b92f6eSKip Macy 133489e0f4d2SKip Macy if (!(rx->flags & NETRXF_more_data)) 133589e0f4d2SKip Macy break; 133689e0f4d2SKip Macy 133789e0f4d2SKip Macy if (cons + frags == rp) { 133889e0f4d2SKip Macy if (net_ratelimit()) 133989e0f4d2SKip Macy WPRINTK("Need more frags\n"); 134089e0f4d2SKip Macy err = -ENOENT; 134189e0f4d2SKip Macy break; 134289e0f4d2SKip Macy } 134383b92f6eSKip Macy m_prev = m; 134489e0f4d2SKip Macy 134589e0f4d2SKip Macy rx = RING_GET_RESPONSE(&np->rx, cons + frags); 134689e0f4d2SKip Macy m = xennet_get_rx_mbuf(np, cons + frags); 134783b92f6eSKip Macy 134883b92f6eSKip Macy m_prev->m_next = m; 134983b92f6eSKip Macy m->m_next = NULL; 135089e0f4d2SKip Macy ref = xennet_get_rx_ref(np, cons + frags); 135189e0f4d2SKip Macy frags++; 135289e0f4d2SKip Macy } 135383b92f6eSKip Macy *list = m0; 135489e0f4d2SKip Macy 135589e0f4d2SKip Macy if (unlikely(frags > max)) { 135689e0f4d2SKip Macy if (net_ratelimit()) 135789e0f4d2SKip Macy WPRINTK("Too many frags\n"); 135889e0f4d2SKip Macy err = -E2BIG; 135989e0f4d2SKip Macy } 136089e0f4d2SKip Macy 136189e0f4d2SKip Macy if (unlikely(err)) 136289e0f4d2SKip Macy np->rx.rsp_cons = cons + frags; 136389e0f4d2SKip Macy 136489e0f4d2SKip Macy *pages_flipped_p = pages_flipped; 136589e0f4d2SKip Macy 136689e0f4d2SKip Macy return err; 136789e0f4d2SKip Macy } 136889e0f4d2SKip Macy 136989e0f4d2SKip Macy static void 137089e0f4d2SKip Macy xn_tick_locked(struct netfront_info *sc) 137189e0f4d2SKip Macy { 137289e0f4d2SKip Macy XN_RX_LOCK_ASSERT(sc); 137389e0f4d2SKip Macy callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 137489e0f4d2SKip Macy 137589e0f4d2SKip Macy /* XXX placeholder for printing debug information */ 137689e0f4d2SKip Macy 137789e0f4d2SKip Macy } 137889e0f4d2SKip Macy 137989e0f4d2SKip Macy 138089e0f4d2SKip Macy static void 138189e0f4d2SKip Macy xn_tick(void *xsc) 138289e0f4d2SKip Macy { 138389e0f4d2SKip Macy struct netfront_info *sc; 138489e0f4d2SKip Macy 138589e0f4d2SKip Macy sc = xsc; 138689e0f4d2SKip Macy XN_RX_LOCK(sc); 138789e0f4d2SKip Macy xn_tick_locked(sc); 138889e0f4d2SKip Macy XN_RX_UNLOCK(sc); 138989e0f4d2SKip Macy 139089e0f4d2SKip Macy } 139189e0f4d2SKip Macy static void 139289e0f4d2SKip Macy xn_start_locked(struct ifnet *ifp) 139389e0f4d2SKip Macy { 139423dc5621SKip Macy int otherend_id; 139589e0f4d2SKip Macy unsigned short id; 139612678024SDoug Rabson struct mbuf *m_head, *m; 139789e0f4d2SKip Macy struct netfront_info *sc; 139889e0f4d2SKip Macy netif_tx_request_t *tx; 139912678024SDoug Rabson netif_extra_info_t *extra; 140089e0f4d2SKip Macy RING_IDX i; 140189e0f4d2SKip Macy grant_ref_t ref; 140289e0f4d2SKip Macy u_long mfn, tx_bytes; 140312678024SDoug Rabson int notify, nfrags; 140489e0f4d2SKip Macy 140589e0f4d2SKip Macy sc = ifp->if_softc; 140623dc5621SKip Macy otherend_id = xenbus_get_otherend_id(sc->xbdev); 140789e0f4d2SKip Macy tx_bytes = 0; 140889e0f4d2SKip Macy 140989e0f4d2SKip Macy if (!netfront_carrier_ok(sc)) 141089e0f4d2SKip Macy return; 141189e0f4d2SKip Macy 141289e0f4d2SKip Macy for (i = sc->tx.req_prod_pvt; TRUE; i++) { 141389e0f4d2SKip Macy IF_DEQUEUE(&ifp->if_snd, m_head); 141489e0f4d2SKip Macy if (m_head == NULL) 141589e0f4d2SKip Macy break; 141689e0f4d2SKip Macy 1417c099cafaSAdrian Chadd /* 1418c099cafaSAdrian Chadd * netfront_tx_slot_available() tries to do some math to 1419c099cafaSAdrian Chadd * ensure that there'll be enough xenbus ring slots available 1420c099cafaSAdrian Chadd * for the maximum number of packet fragments (and a couple more 1421c099cafaSAdrian Chadd * for what I guess are TSO and other ring entry items.) 1422c099cafaSAdrian Chadd */ 142389e0f4d2SKip Macy if (!netfront_tx_slot_available(sc)) { 142489e0f4d2SKip Macy IF_PREPEND(&ifp->if_snd, m_head); 142589e0f4d2SKip Macy ifp->if_drv_flags |= IFF_DRV_OACTIVE; 142689e0f4d2SKip Macy break; 142789e0f4d2SKip Macy } 142889e0f4d2SKip Macy 142912678024SDoug Rabson /* 143012678024SDoug Rabson * Defragment the mbuf if necessary. 143112678024SDoug Rabson */ 143212678024SDoug Rabson for (m = m_head, nfrags = 0; m; m = m->m_next) 143312678024SDoug Rabson nfrags++; 143412678024SDoug Rabson if (nfrags > MAX_SKB_FRAGS) { 143512678024SDoug Rabson m = m_defrag(m_head, M_DONTWAIT); 143612678024SDoug Rabson if (!m) { 143712678024SDoug Rabson m_freem(m_head); 143812678024SDoug Rabson break; 143912678024SDoug Rabson } 144012678024SDoug Rabson m_head = m; 144112678024SDoug Rabson } 144289e0f4d2SKip Macy 1443a4ec37f5SAdrian Chadd /* Determine how many fragments now exist */ 1444a4ec37f5SAdrian Chadd for (m = m_head, nfrags = 0; m; m = m->m_next) 1445a4ec37f5SAdrian Chadd nfrags++; 1446a4ec37f5SAdrian Chadd 1447a4ec37f5SAdrian Chadd /* 14483fb28bbbSAdrian Chadd * Don't attempt to queue this packet if there aren't 14493fb28bbbSAdrian Chadd * enough free entries in the chain. 14503fb28bbbSAdrian Chadd * 14513fb28bbbSAdrian Chadd * There isn't a 1:1 correspondance between the mbuf TX ring 14523fb28bbbSAdrian Chadd * and the xenbus TX ring. 1453a4ec37f5SAdrian Chadd * xn_txeof() may need to be called to free up some slots. 1454a4ec37f5SAdrian Chadd * 14553fb28bbbSAdrian Chadd * It is quite possible that this can be later eliminated if 14563fb28bbbSAdrian Chadd * it turns out that partial * packets can be pushed into 14573fb28bbbSAdrian Chadd * the ringbuffer, with fragments pushed in when further slots 1458a4ec37f5SAdrian Chadd * free up. 1459a4ec37f5SAdrian Chadd * 14603fb28bbbSAdrian Chadd * It is also quite possible that the driver will lock up 14613fb28bbbSAdrian Chadd * if the TX queue fills up with no RX traffic, and 14623fb28bbbSAdrian Chadd * the mbuf ring is exhausted. The queue may need 14633fb28bbbSAdrian Chadd * a swift kick to continue. 1464a4ec37f5SAdrian Chadd */ 1465a4ec37f5SAdrian Chadd 14663fb28bbbSAdrian Chadd /* 14673fb28bbbSAdrian Chadd * It is not +1 like the allocation because we need to keep 14683fb28bbbSAdrian Chadd * slot [0] free for the freelist head 14693fb28bbbSAdrian Chadd */ 1470a4ec37f5SAdrian Chadd if (sc->xn_cdata.xn_tx_chain_cnt + nfrags >= NET_TX_RING_SIZE) { 1471a4ec37f5SAdrian Chadd printf("xn_start_locked: xn_tx_chain_cnt (%d) + nfrags %d >= NET_TX_RING_SIZE (%d); must be full!\n", 14723fb28bbbSAdrian Chadd (int) sc->xn_cdata.xn_tx_chain_cnt, 14733fb28bbbSAdrian Chadd (int) nfrags, (int) NET_TX_RING_SIZE); 1474a4ec37f5SAdrian Chadd IF_PREPEND(&ifp->if_snd, m_head); 1475a4ec37f5SAdrian Chadd ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1476a4ec37f5SAdrian Chadd break; 1477a4ec37f5SAdrian Chadd } 1478a4ec37f5SAdrian Chadd 14793fb28bbbSAdrian Chadd /* 14807c66482cSAdrian Chadd * Make sure there's actually space available in the 14817c66482cSAdrian Chadd * Xen TX ring for this. Overcompensate for the possibility 14827c66482cSAdrian Chadd * of having a TCP offload fragment just in case for now 14837c66482cSAdrian Chadd * (the +1) rather than adding logic to accurately calculate 14847c66482cSAdrian Chadd * the required size. 14853fb28bbbSAdrian Chadd */ 14867c66482cSAdrian Chadd if (RING_FREE_REQUESTS(&sc->tx) < (nfrags + 1)) { 14877c66482cSAdrian Chadd printf("xn_start_locked: free ring slots (%d) < (nfrags + 1) (%d); must be full!\n", 14887c66482cSAdrian Chadd (int) RING_FREE_REQUESTS(&sc->tx), 14897c66482cSAdrian Chadd (int) (nfrags + 1)); 14907c66482cSAdrian Chadd IF_PREPEND(&ifp->if_snd, m_head); 14917c66482cSAdrian Chadd ifp->if_drv_flags |= IFF_DRV_OACTIVE; 14927c66482cSAdrian Chadd break; 14937c66482cSAdrian Chadd } 1494a4ec37f5SAdrian Chadd 149589e0f4d2SKip Macy /* 149689e0f4d2SKip Macy * Start packing the mbufs in this chain into 149789e0f4d2SKip Macy * the fragment pointers. Stop when we run out 149889e0f4d2SKip Macy * of fragments or hit the end of the mbuf chain. 149989e0f4d2SKip Macy */ 150012678024SDoug Rabson m = m_head; 150112678024SDoug Rabson extra = NULL; 150212678024SDoug Rabson for (m = m_head; m; m = m->m_next) { 150389e0f4d2SKip Macy tx = RING_GET_REQUEST(&sc->tx, i); 150412678024SDoug Rabson id = get_id_from_freelist(sc->xn_cdata.xn_tx_chain); 1505a4ec37f5SAdrian Chadd if (id == 0) 1506a4ec37f5SAdrian Chadd panic("xn_start_locked: was allocated the freelist head!\n"); 1507a4ec37f5SAdrian Chadd sc->xn_cdata.xn_tx_chain_cnt++; 1508a4ec37f5SAdrian Chadd if (sc->xn_cdata.xn_tx_chain_cnt >= NET_TX_RING_SIZE+1) 1509a4ec37f5SAdrian Chadd panic("xn_start_locked: tx_chain_cnt must be < NET_TX_RING_SIZE+1\n"); 151012678024SDoug Rabson sc->xn_cdata.xn_tx_chain[id] = m; 151189e0f4d2SKip Macy tx->id = id; 151289e0f4d2SKip Macy ref = gnttab_claim_grant_reference(&sc->gref_tx_head); 151389e0f4d2SKip Macy KASSERT((short)ref >= 0, ("Negative ref")); 151412678024SDoug Rabson mfn = virt_to_mfn(mtod(m, vm_offset_t)); 151523dc5621SKip Macy gnttab_grant_foreign_access_ref(ref, otherend_id, 151689e0f4d2SKip Macy mfn, GNTMAP_readonly); 151789e0f4d2SKip Macy tx->gref = sc->grant_tx_ref[id] = ref; 151812678024SDoug Rabson tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); 151989e0f4d2SKip Macy tx->flags = 0; 152012678024SDoug Rabson if (m == m_head) { 152112678024SDoug Rabson /* 152212678024SDoug Rabson * The first fragment has the entire packet 152312678024SDoug Rabson * size, subsequent fragments have just the 152412678024SDoug Rabson * fragment size. The backend works out the 152512678024SDoug Rabson * true size of the first fragment by 152612678024SDoug Rabson * subtracting the sizes of the other 152712678024SDoug Rabson * fragments. 152812678024SDoug Rabson */ 152912678024SDoug Rabson tx->size = m->m_pkthdr.len; 153089e0f4d2SKip Macy 153112678024SDoug Rabson /* 153212678024SDoug Rabson * The first fragment contains the 153312678024SDoug Rabson * checksum flags and is optionally 153412678024SDoug Rabson * followed by extra data for TSO etc. 153512678024SDoug Rabson */ 153612678024SDoug Rabson if (m->m_pkthdr.csum_flags 153712678024SDoug Rabson & CSUM_DELAY_DATA) { 153812678024SDoug Rabson tx->flags |= (NETTXF_csum_blank 153912678024SDoug Rabson | NETTXF_data_validated); 154012678024SDoug Rabson } 154112678024SDoug Rabson #if __FreeBSD_version >= 700000 154212678024SDoug Rabson if (m->m_pkthdr.csum_flags & CSUM_TSO) { 154312678024SDoug Rabson struct netif_extra_info *gso = 154412678024SDoug Rabson (struct netif_extra_info *) 154512678024SDoug Rabson RING_GET_REQUEST(&sc->tx, ++i); 154689e0f4d2SKip Macy 154712678024SDoug Rabson if (extra) 154812678024SDoug Rabson extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; 154912678024SDoug Rabson else 155012678024SDoug Rabson tx->flags |= NETTXF_extra_info; 155189e0f4d2SKip Macy 155212678024SDoug Rabson gso->u.gso.size = m->m_pkthdr.tso_segsz; 155312678024SDoug Rabson gso->u.gso.type = 155412678024SDoug Rabson XEN_NETIF_GSO_TYPE_TCPV4; 155512678024SDoug Rabson gso->u.gso.pad = 0; 155612678024SDoug Rabson gso->u.gso.features = 0; 155712678024SDoug Rabson 155812678024SDoug Rabson gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 155912678024SDoug Rabson gso->flags = 0; 156012678024SDoug Rabson extra = gso; 156112678024SDoug Rabson } 156212678024SDoug Rabson #endif 156312678024SDoug Rabson } else { 156412678024SDoug Rabson tx->size = m->m_len; 156512678024SDoug Rabson } 156612678024SDoug Rabson if (m->m_next) { 156712678024SDoug Rabson tx->flags |= NETTXF_more_data; 156812678024SDoug Rabson i++; 156912678024SDoug Rabson } 157012678024SDoug Rabson } 157112678024SDoug Rabson 157212678024SDoug Rabson BPF_MTAP(ifp, m_head); 157312678024SDoug Rabson 157412678024SDoug Rabson sc->stats.tx_bytes += m_head->m_pkthdr.len; 157589e0f4d2SKip Macy sc->stats.tx_packets++; 157689e0f4d2SKip Macy } 157789e0f4d2SKip Macy 157889e0f4d2SKip Macy sc->tx.req_prod_pvt = i; 157989e0f4d2SKip Macy RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify); 158089e0f4d2SKip Macy if (notify) 158189e0f4d2SKip Macy notify_remote_via_irq(sc->irq); 158289e0f4d2SKip Macy 158389e0f4d2SKip Macy xn_txeof(sc); 158489e0f4d2SKip Macy 158589e0f4d2SKip Macy if (RING_FULL(&sc->tx)) { 158689e0f4d2SKip Macy sc->tx_full = 1; 158789e0f4d2SKip Macy #if 0 158889e0f4d2SKip Macy netif_stop_queue(dev); 158989e0f4d2SKip Macy #endif 159089e0f4d2SKip Macy } 159189e0f4d2SKip Macy 159289e0f4d2SKip Macy return; 159389e0f4d2SKip Macy } 159489e0f4d2SKip Macy 159589e0f4d2SKip Macy static void 159689e0f4d2SKip Macy xn_start(struct ifnet *ifp) 159789e0f4d2SKip Macy { 159889e0f4d2SKip Macy struct netfront_info *sc; 159989e0f4d2SKip Macy sc = ifp->if_softc; 160089e0f4d2SKip Macy XN_TX_LOCK(sc); 160189e0f4d2SKip Macy xn_start_locked(ifp); 160289e0f4d2SKip Macy XN_TX_UNLOCK(sc); 160389e0f4d2SKip Macy } 160489e0f4d2SKip Macy 160589e0f4d2SKip Macy /* equivalent of network_open() in Linux */ 160689e0f4d2SKip Macy static void 160789e0f4d2SKip Macy xn_ifinit_locked(struct netfront_info *sc) 160889e0f4d2SKip Macy { 160989e0f4d2SKip Macy struct ifnet *ifp; 161089e0f4d2SKip Macy 161189e0f4d2SKip Macy XN_LOCK_ASSERT(sc); 161289e0f4d2SKip Macy 161389e0f4d2SKip Macy ifp = sc->xn_ifp; 161489e0f4d2SKip Macy 161589e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING) 161689e0f4d2SKip Macy return; 161789e0f4d2SKip Macy 161889e0f4d2SKip Macy xn_stop(sc); 161989e0f4d2SKip Macy 162089e0f4d2SKip Macy network_alloc_rx_buffers(sc); 162189e0f4d2SKip Macy sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1; 162289e0f4d2SKip Macy 162389e0f4d2SKip Macy ifp->if_drv_flags |= IFF_DRV_RUNNING; 162489e0f4d2SKip Macy ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 162589e0f4d2SKip Macy 162689e0f4d2SKip Macy callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 162789e0f4d2SKip Macy 162889e0f4d2SKip Macy } 162989e0f4d2SKip Macy 163089e0f4d2SKip Macy 163189e0f4d2SKip Macy static void 163289e0f4d2SKip Macy xn_ifinit(void *xsc) 163389e0f4d2SKip Macy { 163489e0f4d2SKip Macy struct netfront_info *sc = xsc; 163589e0f4d2SKip Macy 163689e0f4d2SKip Macy XN_LOCK(sc); 163789e0f4d2SKip Macy xn_ifinit_locked(sc); 163889e0f4d2SKip Macy XN_UNLOCK(sc); 163989e0f4d2SKip Macy 164089e0f4d2SKip Macy } 164189e0f4d2SKip Macy 164289e0f4d2SKip Macy 164389e0f4d2SKip Macy static int 164489e0f4d2SKip Macy xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 164589e0f4d2SKip Macy { 164689e0f4d2SKip Macy struct netfront_info *sc = ifp->if_softc; 164789e0f4d2SKip Macy struct ifreq *ifr = (struct ifreq *) data; 164889e0f4d2SKip Macy struct ifaddr *ifa = (struct ifaddr *)data; 164989e0f4d2SKip Macy 165089e0f4d2SKip Macy int mask, error = 0; 165189e0f4d2SKip Macy switch(cmd) { 165289e0f4d2SKip Macy case SIOCSIFADDR: 165389e0f4d2SKip Macy case SIOCGIFADDR: 165489e0f4d2SKip Macy XN_LOCK(sc); 165589e0f4d2SKip Macy if (ifa->ifa_addr->sa_family == AF_INET) { 165689e0f4d2SKip Macy ifp->if_flags |= IFF_UP; 165789e0f4d2SKip Macy if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 165889e0f4d2SKip Macy xn_ifinit_locked(sc); 165989e0f4d2SKip Macy arp_ifinit(ifp, ifa); 166089e0f4d2SKip Macy XN_UNLOCK(sc); 166149906218SDoug Rabson } else { 166249906218SDoug Rabson XN_UNLOCK(sc); 166349906218SDoug Rabson error = ether_ioctl(ifp, cmd, data); 166449906218SDoug Rabson } 166589e0f4d2SKip Macy break; 166689e0f4d2SKip Macy case SIOCSIFMTU: 166789e0f4d2SKip Macy /* XXX can we alter the MTU on a VN ?*/ 166889e0f4d2SKip Macy #ifdef notyet 166989e0f4d2SKip Macy if (ifr->ifr_mtu > XN_JUMBO_MTU) 167089e0f4d2SKip Macy error = EINVAL; 167189e0f4d2SKip Macy else 167289e0f4d2SKip Macy #endif 167389e0f4d2SKip Macy { 167489e0f4d2SKip Macy ifp->if_mtu = ifr->ifr_mtu; 167589e0f4d2SKip Macy ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 167689e0f4d2SKip Macy xn_ifinit(sc); 167789e0f4d2SKip Macy } 167889e0f4d2SKip Macy break; 167989e0f4d2SKip Macy case SIOCSIFFLAGS: 168089e0f4d2SKip Macy XN_LOCK(sc); 168189e0f4d2SKip Macy if (ifp->if_flags & IFF_UP) { 168289e0f4d2SKip Macy /* 168389e0f4d2SKip Macy * If only the state of the PROMISC flag changed, 168489e0f4d2SKip Macy * then just use the 'set promisc mode' command 168589e0f4d2SKip Macy * instead of reinitializing the entire NIC. Doing 168689e0f4d2SKip Macy * a full re-init means reloading the firmware and 168789e0f4d2SKip Macy * waiting for it to start up, which may take a 168889e0f4d2SKip Macy * second or two. 168989e0f4d2SKip Macy */ 169089e0f4d2SKip Macy #ifdef notyet 169189e0f4d2SKip Macy /* No promiscuous mode with Xen */ 169289e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING && 169389e0f4d2SKip Macy ifp->if_flags & IFF_PROMISC && 169489e0f4d2SKip Macy !(sc->xn_if_flags & IFF_PROMISC)) { 169589e0f4d2SKip Macy XN_SETBIT(sc, XN_RX_MODE, 169689e0f4d2SKip Macy XN_RXMODE_RX_PROMISC); 169789e0f4d2SKip Macy } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 169889e0f4d2SKip Macy !(ifp->if_flags & IFF_PROMISC) && 169989e0f4d2SKip Macy sc->xn_if_flags & IFF_PROMISC) { 170089e0f4d2SKip Macy XN_CLRBIT(sc, XN_RX_MODE, 170189e0f4d2SKip Macy XN_RXMODE_RX_PROMISC); 170289e0f4d2SKip Macy } else 170389e0f4d2SKip Macy #endif 170489e0f4d2SKip Macy xn_ifinit_locked(sc); 170589e0f4d2SKip Macy } else { 170689e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 170789e0f4d2SKip Macy xn_stop(sc); 170889e0f4d2SKip Macy } 170989e0f4d2SKip Macy } 171089e0f4d2SKip Macy sc->xn_if_flags = ifp->if_flags; 171189e0f4d2SKip Macy XN_UNLOCK(sc); 171289e0f4d2SKip Macy error = 0; 171389e0f4d2SKip Macy break; 171489e0f4d2SKip Macy case SIOCSIFCAP: 171589e0f4d2SKip Macy mask = ifr->ifr_reqcap ^ ifp->if_capenable; 171612678024SDoug Rabson if (mask & IFCAP_TXCSUM) { 171712678024SDoug Rabson if (IFCAP_TXCSUM & ifp->if_capenable) { 171812678024SDoug Rabson ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4); 171912678024SDoug Rabson ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP 172012678024SDoug Rabson | CSUM_IP | CSUM_TSO); 172112678024SDoug Rabson } else { 172212678024SDoug Rabson ifp->if_capenable |= IFCAP_TXCSUM; 172312678024SDoug Rabson ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP 172412678024SDoug Rabson | CSUM_IP); 172589e0f4d2SKip Macy } 172612678024SDoug Rabson } 172712678024SDoug Rabson if (mask & IFCAP_RXCSUM) { 172812678024SDoug Rabson ifp->if_capenable ^= IFCAP_RXCSUM; 172912678024SDoug Rabson } 173012678024SDoug Rabson #if __FreeBSD_version >= 700000 173112678024SDoug Rabson if (mask & IFCAP_TSO4) { 173212678024SDoug Rabson if (IFCAP_TSO4 & ifp->if_capenable) { 173312678024SDoug Rabson ifp->if_capenable &= ~IFCAP_TSO4; 173412678024SDoug Rabson ifp->if_hwassist &= ~CSUM_TSO; 173512678024SDoug Rabson } else if (IFCAP_TXCSUM & ifp->if_capenable) { 173612678024SDoug Rabson ifp->if_capenable |= IFCAP_TSO4; 173712678024SDoug Rabson ifp->if_hwassist |= CSUM_TSO; 173812678024SDoug Rabson } else { 17393552092bSAdrian Chadd IPRINTK("Xen requires tx checksum offload" 174012678024SDoug Rabson " be enabled to use TSO\n"); 174112678024SDoug Rabson error = EINVAL; 174212678024SDoug Rabson } 174312678024SDoug Rabson } 174412678024SDoug Rabson if (mask & IFCAP_LRO) { 174512678024SDoug Rabson ifp->if_capenable ^= IFCAP_LRO; 174612678024SDoug Rabson 174712678024SDoug Rabson } 174812678024SDoug Rabson #endif 174989e0f4d2SKip Macy error = 0; 175089e0f4d2SKip Macy break; 175189e0f4d2SKip Macy case SIOCADDMULTI: 175289e0f4d2SKip Macy case SIOCDELMULTI: 175389e0f4d2SKip Macy #ifdef notyet 175489e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 175589e0f4d2SKip Macy XN_LOCK(sc); 175689e0f4d2SKip Macy xn_setmulti(sc); 175789e0f4d2SKip Macy XN_UNLOCK(sc); 175889e0f4d2SKip Macy error = 0; 175989e0f4d2SKip Macy } 176089e0f4d2SKip Macy #endif 176189e0f4d2SKip Macy /* FALLTHROUGH */ 176289e0f4d2SKip Macy case SIOCSIFMEDIA: 176389e0f4d2SKip Macy case SIOCGIFMEDIA: 176489e0f4d2SKip Macy error = EINVAL; 176589e0f4d2SKip Macy break; 176689e0f4d2SKip Macy default: 176789e0f4d2SKip Macy error = ether_ioctl(ifp, cmd, data); 176889e0f4d2SKip Macy } 176989e0f4d2SKip Macy 177089e0f4d2SKip Macy return (error); 177189e0f4d2SKip Macy } 177289e0f4d2SKip Macy 177389e0f4d2SKip Macy static void 177489e0f4d2SKip Macy xn_stop(struct netfront_info *sc) 177589e0f4d2SKip Macy { 177689e0f4d2SKip Macy struct ifnet *ifp; 177789e0f4d2SKip Macy 177889e0f4d2SKip Macy XN_LOCK_ASSERT(sc); 177989e0f4d2SKip Macy 178089e0f4d2SKip Macy ifp = sc->xn_ifp; 178189e0f4d2SKip Macy 178289e0f4d2SKip Macy callout_stop(&sc->xn_stat_ch); 178389e0f4d2SKip Macy 178489e0f4d2SKip Macy xn_free_rx_ring(sc); 178589e0f4d2SKip Macy xn_free_tx_ring(sc); 178689e0f4d2SKip Macy 178789e0f4d2SKip Macy ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 178889e0f4d2SKip Macy } 178989e0f4d2SKip Macy 179089e0f4d2SKip Macy /* START of Xenolinux helper functions adapted to FreeBSD */ 179123dc5621SKip Macy int 179223dc5621SKip Macy network_connect(struct netfront_info *np) 179389e0f4d2SKip Macy { 17943a6d1fcfSKip Macy int i, requeue_idx, error; 179589e0f4d2SKip Macy grant_ref_t ref; 179689e0f4d2SKip Macy netif_rx_request_t *req; 179789e0f4d2SKip Macy u_int feature_rx_copy, feature_rx_flip; 179889e0f4d2SKip Macy 17993a6d1fcfSKip Macy error = xenbus_scanf(XBT_NIL, xenbus_get_otherend_path(np->xbdev), 18003a6d1fcfSKip Macy "feature-rx-copy", NULL, "%u", &feature_rx_copy); 18013a6d1fcfSKip Macy if (error) 180289e0f4d2SKip Macy feature_rx_copy = 0; 18033a6d1fcfSKip Macy error = xenbus_scanf(XBT_NIL, xenbus_get_otherend_path(np->xbdev), 18043a6d1fcfSKip Macy "feature-rx-flip", NULL, "%u", &feature_rx_flip); 18053a6d1fcfSKip Macy if (error) 180689e0f4d2SKip Macy feature_rx_flip = 1; 180789e0f4d2SKip Macy 180889e0f4d2SKip Macy /* 180989e0f4d2SKip Macy * Copy packets on receive path if: 181089e0f4d2SKip Macy * (a) This was requested by user, and the backend supports it; or 181189e0f4d2SKip Macy * (b) Flipping was requested, but this is unsupported by the backend. 181289e0f4d2SKip Macy */ 181389e0f4d2SKip Macy np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || 181489e0f4d2SKip Macy (MODPARM_rx_flip && !feature_rx_flip)); 181589e0f4d2SKip Macy 181689e0f4d2SKip Macy XN_LOCK(np); 181789e0f4d2SKip Macy /* Recovery procedure: */ 18183a6d1fcfSKip Macy error = talk_to_backend(np->xbdev, np); 18193a6d1fcfSKip Macy if (error) 18203a6d1fcfSKip Macy return (error); 182189e0f4d2SKip Macy 182289e0f4d2SKip Macy /* Step 1: Reinitialise variables. */ 182389e0f4d2SKip Macy netif_release_tx_bufs(np); 182489e0f4d2SKip Macy 182589e0f4d2SKip Macy /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 182689e0f4d2SKip Macy for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 182789e0f4d2SKip Macy struct mbuf *m; 18283a6d1fcfSKip Macy u_long pfn; 182989e0f4d2SKip Macy 183089e0f4d2SKip Macy if (np->rx_mbufs[i] == NULL) 183189e0f4d2SKip Macy continue; 183289e0f4d2SKip Macy 183389e0f4d2SKip Macy m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i); 183489e0f4d2SKip Macy ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 183589e0f4d2SKip Macy req = RING_GET_REQUEST(&np->rx, requeue_idx); 18363a6d1fcfSKip Macy pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT; 183789e0f4d2SKip Macy 183889e0f4d2SKip Macy if (!np->copying_receiver) { 183989e0f4d2SKip Macy gnttab_grant_foreign_transfer_ref(ref, 184023dc5621SKip Macy xenbus_get_otherend_id(np->xbdev), 18413a6d1fcfSKip Macy pfn); 184289e0f4d2SKip Macy } else { 184389e0f4d2SKip Macy gnttab_grant_foreign_access_ref(ref, 184423dc5621SKip Macy xenbus_get_otherend_id(np->xbdev), 18453a6d1fcfSKip Macy PFNTOMFN(pfn), 0); 184689e0f4d2SKip Macy } 184789e0f4d2SKip Macy req->gref = ref; 184889e0f4d2SKip Macy req->id = requeue_idx; 184989e0f4d2SKip Macy 185089e0f4d2SKip Macy requeue_idx++; 185189e0f4d2SKip Macy } 185289e0f4d2SKip Macy 185389e0f4d2SKip Macy np->rx.req_prod_pvt = requeue_idx; 185489e0f4d2SKip Macy 185589e0f4d2SKip Macy /* Step 3: All public and private state should now be sane. Get 185689e0f4d2SKip Macy * ready to start sending and receiving packets and give the driver 185789e0f4d2SKip Macy * domain a kick because we've probably just requeued some 185889e0f4d2SKip Macy * packets. 185989e0f4d2SKip Macy */ 186089e0f4d2SKip Macy netfront_carrier_on(np); 186189e0f4d2SKip Macy notify_remote_via_irq(np->irq); 186289e0f4d2SKip Macy XN_TX_LOCK(np); 186389e0f4d2SKip Macy xn_txeof(np); 186489e0f4d2SKip Macy XN_TX_UNLOCK(np); 186589e0f4d2SKip Macy network_alloc_rx_buffers(np); 186689e0f4d2SKip Macy XN_UNLOCK(np); 186789e0f4d2SKip Macy 186889e0f4d2SKip Macy return (0); 186989e0f4d2SKip Macy } 187089e0f4d2SKip Macy 187189e0f4d2SKip Macy static void 187289e0f4d2SKip Macy show_device(struct netfront_info *sc) 187389e0f4d2SKip Macy { 187489e0f4d2SKip Macy #ifdef DEBUG 187589e0f4d2SKip Macy if (sc) { 187689e0f4d2SKip Macy IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n", 187789e0f4d2SKip Macy sc->xn_ifno, 187889e0f4d2SKip Macy be_state_name[sc->xn_backend_state], 187989e0f4d2SKip Macy sc->xn_user_state ? "open" : "closed", 188089e0f4d2SKip Macy sc->xn_evtchn, 188189e0f4d2SKip Macy sc->xn_irq, 188289e0f4d2SKip Macy sc->xn_tx_if, 188389e0f4d2SKip Macy sc->xn_rx_if); 188489e0f4d2SKip Macy } else { 188589e0f4d2SKip Macy IPRINTK("<vif NULL>\n"); 188689e0f4d2SKip Macy } 188789e0f4d2SKip Macy #endif 188889e0f4d2SKip Macy } 188989e0f4d2SKip Macy 189089e0f4d2SKip Macy /** Create a network device. 189189e0f4d2SKip Macy * @param handle device handle 189289e0f4d2SKip Macy */ 189323dc5621SKip Macy int 189423dc5621SKip Macy create_netdev(device_t dev) 189589e0f4d2SKip Macy { 189689e0f4d2SKip Macy int i; 189789e0f4d2SKip Macy struct netfront_info *np; 189889e0f4d2SKip Macy int err; 189989e0f4d2SKip Macy struct ifnet *ifp; 190089e0f4d2SKip Macy 190123dc5621SKip Macy np = device_get_softc(dev); 190289e0f4d2SKip Macy 190389e0f4d2SKip Macy np->xbdev = dev; 190489e0f4d2SKip Macy 190589e0f4d2SKip Macy XN_LOCK_INIT(np, xennetif); 190689e0f4d2SKip Macy np->rx_target = RX_MIN_TARGET; 190789e0f4d2SKip Macy np->rx_min_target = RX_MIN_TARGET; 190889e0f4d2SKip Macy np->rx_max_target = RX_MAX_TARGET; 190989e0f4d2SKip Macy 191089e0f4d2SKip Macy /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */ 191189e0f4d2SKip Macy for (i = 0; i <= NET_TX_RING_SIZE; i++) { 191289e0f4d2SKip Macy np->tx_mbufs[i] = (void *) ((u_long) i+1); 191389e0f4d2SKip Macy np->grant_tx_ref[i] = GRANT_INVALID_REF; 191489e0f4d2SKip Macy } 191589e0f4d2SKip Macy for (i = 0; i <= NET_RX_RING_SIZE; i++) { 191689e0f4d2SKip Macy np->rx_mbufs[i] = NULL; 191789e0f4d2SKip Macy np->grant_rx_ref[i] = GRANT_INVALID_REF; 191889e0f4d2SKip Macy } 191989e0f4d2SKip Macy /* A grant for every tx ring slot */ 192089e0f4d2SKip Macy if (gnttab_alloc_grant_references(TX_MAX_TARGET, 192189e0f4d2SKip Macy &np->gref_tx_head) < 0) { 192289e0f4d2SKip Macy printf("#### netfront can't alloc tx grant refs\n"); 192389e0f4d2SKip Macy err = ENOMEM; 192489e0f4d2SKip Macy goto exit; 192589e0f4d2SKip Macy } 192689e0f4d2SKip Macy /* A grant for every rx ring slot */ 192789e0f4d2SKip Macy if (gnttab_alloc_grant_references(RX_MAX_TARGET, 192889e0f4d2SKip Macy &np->gref_rx_head) < 0) { 192989e0f4d2SKip Macy printf("#### netfront can't alloc rx grant refs\n"); 193089e0f4d2SKip Macy gnttab_free_grant_references(np->gref_tx_head); 193189e0f4d2SKip Macy err = ENOMEM; 193289e0f4d2SKip Macy goto exit; 193389e0f4d2SKip Macy } 193489e0f4d2SKip Macy 193589e0f4d2SKip Macy err = xen_net_read_mac(dev, np->mac); 193689e0f4d2SKip Macy if (err) { 193723dc5621SKip Macy xenbus_dev_fatal(dev, err, "parsing %s/mac", 193823dc5621SKip Macy xenbus_get_node(dev)); 193989e0f4d2SKip Macy goto out; 194089e0f4d2SKip Macy } 194189e0f4d2SKip Macy 194289e0f4d2SKip Macy /* Set up ifnet structure */ 194323dc5621SKip Macy ifp = np->xn_ifp = if_alloc(IFT_ETHER); 194489e0f4d2SKip Macy ifp->if_softc = np; 194523dc5621SKip Macy if_initname(ifp, "xn", device_get_unit(dev)); 19463a6d1fcfSKip Macy ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 194789e0f4d2SKip Macy ifp->if_ioctl = xn_ioctl; 194889e0f4d2SKip Macy ifp->if_output = ether_output; 194989e0f4d2SKip Macy ifp->if_start = xn_start; 195089e0f4d2SKip Macy #ifdef notyet 195189e0f4d2SKip Macy ifp->if_watchdog = xn_watchdog; 195289e0f4d2SKip Macy #endif 195389e0f4d2SKip Macy ifp->if_init = xn_ifinit; 195489e0f4d2SKip Macy ifp->if_mtu = ETHERMTU; 195589e0f4d2SKip Macy ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1; 195689e0f4d2SKip Macy 195789e0f4d2SKip Macy ifp->if_hwassist = XN_CSUM_FEATURES; 195889e0f4d2SKip Macy ifp->if_capabilities = IFCAP_HWCSUM; 195912678024SDoug Rabson #if __FreeBSD_version >= 700000 196012678024SDoug Rabson ifp->if_capabilities |= IFCAP_TSO4; 196112678024SDoug Rabson if (xn_enable_lro) { 196212678024SDoug Rabson int err = tcp_lro_init(&np->xn_lro); 196312678024SDoug Rabson if (err) { 196412678024SDoug Rabson device_printf(dev, "LRO initialization failed\n"); 196512678024SDoug Rabson goto exit; 196612678024SDoug Rabson } 196712678024SDoug Rabson np->xn_lro.ifp = ifp; 196812678024SDoug Rabson ifp->if_capabilities |= IFCAP_LRO; 196912678024SDoug Rabson } 197089e0f4d2SKip Macy #endif 197112678024SDoug Rabson ifp->if_capenable = ifp->if_capabilities; 197289e0f4d2SKip Macy 197389e0f4d2SKip Macy ether_ifattach(ifp, np->mac); 197489e0f4d2SKip Macy callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE); 197589e0f4d2SKip Macy netfront_carrier_off(np); 197689e0f4d2SKip Macy 197789e0f4d2SKip Macy return (0); 197889e0f4d2SKip Macy 197989e0f4d2SKip Macy exit: 198089e0f4d2SKip Macy gnttab_free_grant_references(np->gref_tx_head); 198189e0f4d2SKip Macy out: 198289e0f4d2SKip Macy panic("do something smart"); 198389e0f4d2SKip Macy 198489e0f4d2SKip Macy } 198589e0f4d2SKip Macy 198689e0f4d2SKip Macy /** 198789e0f4d2SKip Macy * Handle the change of state of the backend to Closing. We must delete our 198889e0f4d2SKip Macy * device-layer structures now, to ensure that writes are flushed through to 198989e0f4d2SKip Macy * the backend. Once is this done, we can switch to Closed in 199089e0f4d2SKip Macy * acknowledgement. 199189e0f4d2SKip Macy */ 199289e0f4d2SKip Macy #if 0 199323dc5621SKip Macy static void netfront_closing(device_t dev) 199489e0f4d2SKip Macy { 199589e0f4d2SKip Macy #if 0 199689e0f4d2SKip Macy struct netfront_info *info = dev->dev_driver_data; 199789e0f4d2SKip Macy 199889e0f4d2SKip Macy DPRINTK("netfront_closing: %s removed\n", dev->nodename); 199989e0f4d2SKip Macy 200089e0f4d2SKip Macy close_netdev(info); 200189e0f4d2SKip Macy #endif 200289e0f4d2SKip Macy xenbus_switch_state(dev, XenbusStateClosed); 200389e0f4d2SKip Macy } 200489e0f4d2SKip Macy #endif 200589e0f4d2SKip Macy 200623dc5621SKip Macy static int netfront_detach(device_t dev) 200789e0f4d2SKip Macy { 200823dc5621SKip Macy struct netfront_info *info = device_get_softc(dev); 200989e0f4d2SKip Macy 201023dc5621SKip Macy DPRINTK("%s\n", xenbus_get_node(dev)); 201189e0f4d2SKip Macy 201289e0f4d2SKip Macy netif_free(info); 201389e0f4d2SKip Macy 201489e0f4d2SKip Macy return 0; 201589e0f4d2SKip Macy } 201689e0f4d2SKip Macy 201789e0f4d2SKip Macy 201889e0f4d2SKip Macy static void netif_free(struct netfront_info *info) 201989e0f4d2SKip Macy { 202089e0f4d2SKip Macy netif_disconnect_backend(info); 202189e0f4d2SKip Macy #if 0 202289e0f4d2SKip Macy close_netdev(info); 202389e0f4d2SKip Macy #endif 202489e0f4d2SKip Macy } 202589e0f4d2SKip Macy 202689e0f4d2SKip Macy static void netif_disconnect_backend(struct netfront_info *info) 202789e0f4d2SKip Macy { 20283a6d1fcfSKip Macy XN_RX_LOCK(info); 20293a6d1fcfSKip Macy XN_TX_LOCK(info); 20303a6d1fcfSKip Macy netfront_carrier_off(info); 20313a6d1fcfSKip Macy XN_TX_UNLOCK(info); 20323a6d1fcfSKip Macy XN_RX_UNLOCK(info); 20333a6d1fcfSKip Macy 203489e0f4d2SKip Macy end_access(info->tx_ring_ref, info->tx.sring); 203589e0f4d2SKip Macy end_access(info->rx_ring_ref, info->rx.sring); 203689e0f4d2SKip Macy info->tx_ring_ref = GRANT_INVALID_REF; 203789e0f4d2SKip Macy info->rx_ring_ref = GRANT_INVALID_REF; 203889e0f4d2SKip Macy info->tx.sring = NULL; 203989e0f4d2SKip Macy info->rx.sring = NULL; 204089e0f4d2SKip Macy 204189e0f4d2SKip Macy if (info->irq) 20423a6d1fcfSKip Macy unbind_from_irqhandler(info->irq); 20433a6d1fcfSKip Macy 204489e0f4d2SKip Macy info->irq = 0; 204589e0f4d2SKip Macy } 204689e0f4d2SKip Macy 204789e0f4d2SKip Macy 204889e0f4d2SKip Macy static void end_access(int ref, void *page) 204989e0f4d2SKip Macy { 205089e0f4d2SKip Macy if (ref != GRANT_INVALID_REF) 2051920ba15bSKip Macy gnttab_end_foreign_access(ref, page); 205289e0f4d2SKip Macy } 205389e0f4d2SKip Macy 205489e0f4d2SKip Macy /* ** Driver registration ** */ 205523dc5621SKip Macy static device_method_t netfront_methods[] = { 205623dc5621SKip Macy /* Device interface */ 205723dc5621SKip Macy DEVMETHOD(device_probe, netfront_probe), 205823dc5621SKip Macy DEVMETHOD(device_attach, netfront_attach), 205923dc5621SKip Macy DEVMETHOD(device_detach, netfront_detach), 206023dc5621SKip Macy DEVMETHOD(device_shutdown, bus_generic_shutdown), 206123dc5621SKip Macy DEVMETHOD(device_suspend, bus_generic_suspend), 206223dc5621SKip Macy DEVMETHOD(device_resume, netfront_resume), 206389e0f4d2SKip Macy 206423dc5621SKip Macy /* Xenbus interface */ 206523dc5621SKip Macy DEVMETHOD(xenbus_backend_changed, netfront_backend_changed), 206689e0f4d2SKip Macy 206723dc5621SKip Macy { 0, 0 } 206889e0f4d2SKip Macy }; 206989e0f4d2SKip Macy 207023dc5621SKip Macy static driver_t netfront_driver = { 207123dc5621SKip Macy "xn", 207223dc5621SKip Macy netfront_methods, 207323dc5621SKip Macy sizeof(struct netfront_info), 207489e0f4d2SKip Macy }; 207523dc5621SKip Macy devclass_t netfront_devclass; 207689e0f4d2SKip Macy 207723dc5621SKip Macy DRIVER_MODULE(xe, xenbus, netfront_driver, netfront_devclass, 0, 0); 2078